code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
import six
from six.moves import builtins
_property = builtins.property
_tuple = builtins.tuple
from operator import itemgetter as _itemgetter
class TocPageset(tuple):
'TocPageset(label, pages)'
__slots__ = ()
_fields = ('label', 'pages')
def __new__(_cls, label, pages):
'Create new instance of TocPageset(label, pages)'
return _tuple.__new__(_cls, (label, pages))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new TocPageset object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 2:
raise TypeError('Expected 2 arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '(label=%r, pages=%r)' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(_self, **kwds):
'Return a new TocPageset object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('label', 'pages'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
label = _property(_itemgetter(0), doc='Alias for field number 0')
pages = _property(_itemgetter(1), doc='Alias for field number 1')
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import datetime
import ast
import logging
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import six
from six.moves import configparser
class LayeredConfig(object):
"""Provide unified access to nested configuration parameters
specified by default settings in code, config file (using, for
now, .ini-file style syntax) and command line parameters. Also
enable persistance to a ini file when parameters are
programmatically changed."""
def __init__(self, defaults=None, inifile=None, commandline=None, cascade=False):
"""Initialize the config object from a bunch of sources.
:param defaults: A dict with configuration keys and values. If
any values are dicts, these are turned into
nested config objects.
:type defaults: dict
:param inifile: The name of a ini-style configuration
file. Any subsections in this file are turned
into nested config objects.
:type defaults: string
:param commandline: The contents of sys.argv, or something
similar. Any long-style parameters are
turned into configuration values, and
parameters with hyphens are turned into
nested config objects
(i.e. ``--module-parameter=foo`` results
in self.module.parameter == "foo".
:type defaults: list
:param cascade: If a configuration key is not found,
search parent config object.
:type defaults: bool
"""
self._cascade = cascade
self._subsections = OrderedDict()
self._defaults = OrderedDict()
self._load_defaults(defaults)
self._inifile = OrderedDict()
self._load_inifile(inifile)
self._inifilename = inifile
self._inifile_dirty = False
self._commandline = OrderedDict()
self._load_commandline(commandline)
self._parent = None
self._sectionkey = None
def _has(self, name):
try:
getattr(self, name)
return True
except ValueError:
return False
def write(self):
root = self
while root._parent:
root = root._parent
if root._inifile_dirty:
with open(root._inifilename, "w") as fp:
root._configparser.write(fp)
def __iter__(self):
l = []
# l.extend(self._subsections.keys())
l.extend(self._commandline.keys())
l.extend(self._inifile.keys())
l.extend(self._defaults.keys())
if self._cascade and self._parent:
l.extend(list(self._parent))
for k in l:
yield k
def __getattribute__(self, name):
if name.startswith("_") or name == "write":
return object.__getattribute__(self, name)
if name in self._subsections:
return self._subsections[name]
if name in self._commandline:
return self._commandline[name]
if self._cascade:
current = self._parent
while current:
if name in current._commandline:
return current._commandline[name]
current = current._parent
if name in self._inifile:
return self._inifile[name]
if self._cascade:
current = self._parent
while current:
if name in current._inifile:
return current._inifile[name]
current = current._parent
if name in self._defaults and not isinstance(self._defaults[name], type):
return self._defaults[name]
if self._cascade:
current = self._parent
while current:
if name in current._defaults and not isinstance(current._defaults[name], type):
return current._defaults[name]
current = current._parent
raise AttributeError("Configuration key %s doesn't exist" % name)
def __setattr__(self, name, value):
# print("__setattribute__ %s to %s" % (name,value))
if name.startswith("_"):
object.__setattr__(self, name, value)
return
# First make sure that the higher-priority
#commandline-derived data doesn't shadow the new value.
if name in self._commandline:
del self._commandline[name]
# then update our internal representation
if name not in self._inifile:
self._inifile[name] = None
if value != self._inifile[name]:
self._inifile[name] = value
root = self
while root._parent:
root = root._parent
if root._inifilename:
root._inifile_dirty = True
# and finally update our associated cfgparser object so that we later
# can write() out the inifile. This lasts part is a bit complicated as
# we need to find the root LayeredConfig object where the cfgparser
# object resides.
root = self
sectionkeys = []
while root._parent:
# print("root._parent is not None")
sectionkeys.append(root._sectionkey)
root = root._parent
branch = root._configparser
if branch:
section = "".join(sectionkeys) # doesn't really work with more than 1 level
# print("Setting %s %s %s" % (section,name,str(value)))
root._configparser.set(section, name, str(value))
else:
# If there is no _configparser object, then this
# LayeredConfig instance was created without one. There is
# no way to persist configuration values, so we're done
pass
def _load_defaults(self, defaults):
if not defaults:
return
for (k, v) in defaults.items():
if isinstance(v, dict):
self._subsections[k] = LayeredConfig(
defaults=v, cascade=self._cascade)
self._subsections[k]._sectionkey = k
self._subsections[k]._parent = self
else:
self._defaults[k] = v
def _load_inifile(self, inifilename):
if not inifilename:
self._configparser = None
return
if not os.path.exists(inifilename):
logging.warn("INI file %s does not exist" % inifilename)
self._configparser = None
return
self._configparser = configparser.ConfigParser(dict_type=OrderedDict)
self._configparser.read(inifilename)
if self._configparser.has_section('__root__'):
self._load_inifile_section('__root__')
for sectionkey in self._configparser.sections():
# Do we have a LayeredConfig object for sectionkey already?
if sectionkey not in self._subsections:
self._subsections[
sectionkey] = LayeredConfig(cascade=self._cascade)
self._subsections[sectionkey]._sectionkey = sectionkey
self._subsections[sectionkey]._parent = self
if self._subsections[sectionkey]._configparser is None:
self._subsections[
sectionkey]._configparser = self._configparser
self._subsections[sectionkey]._load_inifile_section(sectionkey)
# return cfgparser
def _type_value(self, key, value):
"""Find appropriate method/class constructor to convert a
string value to the correct type IF we know the correct
type."""
def boolconvert(value):
return value == "True"
def listconvert(value):
# this function is called with both string represenations
# of entire lists and simple (unquoted) strings. The
# ast.literal_eval handles the first case, and if the
# value can't be parsed as a python expression, it is
# returned verbatim (not wrapped in a list, for reasons)
try:
return ast.literal_eval(value)
except (SyntaxError, ValueError):
return value
def datetimeconvert(value):
return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
# FIXME: Probably needs special handling for
# most types besides int and float (eg. bool("False") !=
# False, datetime.datetime("2012-09-14 12:06:00") won't
# work, etc
if key in self._defaults:
if type(self._defaults[key]) == type:
# print("Using class for %s" % key)
t = self._defaults[key]
else:
# print("Using instance for %s" % key)
t = type(self._defaults[key])
else:
t = str
if t == bool:
t = boolconvert
elif t == list:
t = listconvert
elif t == datetime.datetime:
t = datetimeconvert
# print("Converting %r to %r" % (value,t(value)))
return t(value)
def _load_inifile_section(self, sectionname):
for (key, value) in self._configparser.items(sectionname):
self._inifile[key] = self._type_value(key, value)
# For now: only support long arguments with = separating the parameter and the value, ie
# "./foo.py --longarg=value" works, "./foo.py --longarg value" or even
# "./foo.py --longarg = value" doesn't work.
def _load_commandline(self, commandline):
if not commandline:
return
for arg in commandline:
if arg.startswith("--"):
if "=" in arg:
(param, value) = arg.split("=", 1)
else:
(param, value) = (arg, True) # assume bool, not str
# '--param' => ['param']
# '--module-param' => ['module','param']
# Note: Options may not contains hyphens (ie they can't
# be called "parse-force") since this clashes with hyphen
# as the sectionkey separator.
parts = param[2:].split("-")
self._load_commandline_part(parts, value)
def _load_commandline_part(self, parts, value):
if len(parts) == 1:
key = parts[0]
if type(value) != bool: # bools are created implicitly for value-less options
value = self._type_value(key, value)
# create a new value, or append to an existing list?
if key in self._commandline:
if not isinstance(self._commandline[key], list):
self._commandline[key] = [self._commandline[key]]
self._commandline[key].append(value)
else:
self._commandline[key] = value
else:
(sectionkey) = parts[0]
if sectionkey not in self._subsections:
self._subsections[
sectionkey] = LayeredConfig(cascade=self._cascade)
self._subsections[sectionkey]._sectionkey = sectionkey
self._subsections[sectionkey]._parent = self
self._subsections[sectionkey]._load_commandline_part(parts[1:], value)
| Python |
import six
from six.moves import builtins
_property = builtins.property
_tuple = builtins.tuple
from operator import itemgetter as _itemgetter
class TocCriteria(tuple):
"""Represents a particular way of organizing the documents in a
repository, for the purpose of generating a table of contents for
those douments.
:param binding: The variable name (binding) for that same
predicate in the sparql results retrieved from the
query constructed by toc_query.
:param label: A text label for the entire set of toc for this criteria.
:param pagetitle: ...
:param selector: a callable that groups documents (eg. first
letter, year-part of date, etc).
:param key: a callable used for sorting documents
"""
__slots__ = ()
_fields = ('binding', 'label', 'pagetitle', 'selector', 'key')
def __new__(_cls, binding, label, pagetitle, selector, key):
'Create new instance of TocCriteria(binding, label, pagetitle, selector, key)'
return _tuple.__new__(_cls, (binding, label, pagetitle, selector, key))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new TocCriteria object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 4:
raise TypeError('Expected 4 arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '(binding=%r, label=%r, pagetitle=%r, selector=%r, key=%r)' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(_self, **kwds):
'Return a new TocCriteria object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('binding', 'label', 'pagetitle', 'selector','key'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
binding = _property(_itemgetter(0), doc='Alias for field number 0')
label = _property(_itemgetter(1), doc='Alias for field number 1')
pagetitle = _property(_itemgetter(2), doc='Alias for field number 2')
selector = _property(_itemgetter(3), doc='Alias for field number 3')
key = _property(_itemgetter(4), doc='Alias for field number 4')
| Python |
from contextlib import contextmanager
import shutil
import os
from tempfile import NamedTemporaryFile
import filecmp
import six
if six.PY3:
from urllib.parse import quote, unquote
else:
from urllib import quote, unquote # NOQA
from ferenda import util
from ferenda import errors
class DocumentStore(object):
"""
Unifies handling of reading and writing of various data files
during the download, parse and generate stages.
:param datadir: The root directory (including docrepo path
segment) where files are stored.
:type datadir: str
:param downloaded_suffix: File suffix for the main source document
format. Determines the suffix of
downloaded files.
:type downloaded_suffix: str
:param storage_policy: Some repositories have documents in several
formats, documents split amongst several
files or embedded resources. If
``storage_policy`` is set to ``dir``, then
each document gets its own directory (the
default filename being ``index`` +suffix),
otherwise each doc gets stored as a file in
a directory with other files. Affects
:py:meth:`~ferenda.DocumentStore.path`
(and therefore all other ``*_path``
methods)
:type storage_policy: str
"""
def __init__(self, datadir, downloaded_suffix=".html", storage_policy="file"):
self.datadir = datadir # docrepo.datadir + docrepo.alias
self.downloaded_suffix = downloaded_suffix
self.storage_policy = storage_policy
@contextmanager
def _open(self, filename, mode):
if "w" in mode:
fp = NamedTemporaryFile(mode, delete=False)
fp.realname = filename
try:
yield fp
finally:
tempname = fp.name
fp.close()
if not os.path.exists(filename) or not filecmp.cmp(tempname,filename):
util.ensure_dir(filename)
shutil.move(tempname,filename)
else:
os.unlink(tempname)
else:
if "a" in mode and not os.path.exists(filename):
util.ensure_dir(filename)
fp = open(filename,mode)
yield fp
@contextmanager
def open(self, basefile, maindir, suffix, mode="r", version=None, attachment=None):
filename = self.path(basefile, maindir, suffix, version, attachment)
fp = NamedTemporaryFile(mode, delete=False)
fp.realname = filename
try:
yield fp
finally:
tempname = fp.name
fp.close()
if not os.path.exists(filename) or not filecmp.cmp(tempname,filename):
util.ensure_dir(filename)
shutil.move(tempname,filename)
else:
os.unlink(tempname)
def path(self, basefile, maindir, suffix, version=None, attachment=None):
"""
Calculate the full filesystem path for the given basefile and
stage of processing.
Example:
>>> d = DocumentStore(datadir="/tmp/base")
>>> realsep = os.sep
>>> os.sep = "/"
>>> d.path('123/a', 'parsed', '.xhtml')
'/tmp/base/parsed/123/a.xhtml'
>>> d.storage_policy = "dir"
>>> d.path('123/a', 'parsed', '.xhtml')
'/tmp/base/parsed/123/a/index.xhtml'
>>> d.path('123/a', 'downloaded', None, 'r4711', 'appendix.txt')
'/tmp/base/archive/downloaded/123/a/r4711/appendix.txt'
>>> os.sep = realsep
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param maindir: The processing stage directory (normally ``downloaded``, ``parsed``, or ``generated``)
:type maindin: str
:param suffix: The file extension including period (i.e. ``.txt``, not ``txt``)
:type suffix: str
:param version: Optional, the archived version id
:type version: str
:param attachment: Optional. Any associated file needed by the main file. Requires that ``storage_policy`` is set to ``dir``. ``suffix`` is ignored if this parameter is used.
:type attachment: str
:returns: The full filesystem path
:rtype: str
"""
pathfrag = self.basefile_to_pathfrag(basefile)
if version:
v_pathfrag = self.basefile_to_pathfrag(version)
segments = [self.datadir,
'archive', maindir, pathfrag, v_pathfrag]
else:
segments = [self.datadir, maindir, pathfrag]
if self.storage_policy == "dir":
if attachment:
for illegal in ':/':
if illegal in attachment:
raise errors.AttachmentNameError("Char '%s' in attachment name '%s' not allowed" % (illegal, attachment))
segments.append(attachment)
else:
segments.append("index" + suffix)
else:
if attachment != None:
raise errors.AttachmentPolicyError(
"Can't add attachments (name %s) if "
"self.storage_policy != 'dir'" % attachment)
segments[-1] += suffix
unixpath = "/".join(segments)
if os.sep == "/":
return unixpath
else:
return unixpath.replace("/",os.sep)
def list_basefiles_for(self, action, basedir=None):
"""Get all available basefiles that can be used for the
specified action.
:param action: The action for which to get available
basefiles (``parse``, ``relate``, ``generate``
or ``news``)
:type action: str
:param basedir: The base directory in which to search for
available files. If not provided, defaults to
``self.datadir``.
:type basedir: str
:returns: All available basefiles
:rtype: generator
"""
if not basedir:
basedir = self.datadir
directory = None
if action == "parse":
directory = os.path.sep.join((basedir, "downloaded"))
if self.storage_policy == "dir":
# If each document is stored in a separate directory,
# there is usually other auxillary files (attachments
# and whatnot) in that directory as well. Make sure we
# only yield a single file from each directory. By
# convention, the main file is called index.html,
# index.pdf or whatever.
# print("storage_policy dir: %s" % self.storage_policy)
suffix = "index" + self.downloaded_suffix
else:
# print("storage_policy file: %s" % self.storage_policy)
suffix = self.downloaded_suffix
elif action == "relate":
directory = os.path.sep.join((basedir, "distilled"))
suffix = ".rdf"
elif action == "generate":
directory = os.path.sep.join((basedir, "parsed"))
suffix = ".xhtml"
elif action == "news":
directory = os.path.sep.join((basedir, "entries"))
suffix = ".json"
# FIXME: fake action, needed for get_status. replace with
# something more elegant
elif action in ("_postgenerate"):
directory = os.path.sep.join((basedir, "generated"))
suffix = ".html"
if not directory:
raise ValueError("No directory calculated for action %s" % action)
if not os.path.exists(directory):
return
for x in util.list_dirs(directory, suffix, reverse=True):
# ignore empty files placed by download (which may
# have done that in order to avoid trying to
# re-download nonexistent resources)
if os.path.exists(x) and os.path.getsize(x) > 0:
# get a pathfrag from full path
suffixlen = len(suffix) if self.storage_policy == "file" else len(suffix)+1
x = x[len(directory)+1:-suffixlen]
yield self.pathfrag_to_basefile(x)
def list_versions(self, basefile, action=None):
"""Get all archived versions of a given basefile.
:param basefile: The basefile to list archived versions for
:type basefile: str
:param action: The type of file to look for (either
``downloaded``, ``parsed`` or ``generated``. If
``None``, look for all types.
:type action: str
:returns: All available versions for that basefile
:rtype: generator
"""
if action:
assert action in ('downloaded','parsed','generated'), "Action %s invalid" % action
actions = (action,)
else:
actions = ('downloaded','parsed','generated')
basedir = self.datadir
pathfrag = self.pathfrag_to_basefile(basefile)
yielded_basefiles = []
for action in actions:
directory = os.sep.join((basedir, "archive",
action, pathfrag))
if not os.path.exists(directory):
continue
for x in util.list_dirs(directory, reverse=False):
if os.path.exists(x):
# /datadir/base/archive/downloaded/basefile/version.html
# => version.html
x = x[len(directory)+1:]
if self.storage_policy == "dir":
# version/index.html => version
x = os.sep.join(x.split(os.sep)[:-1])
else:
# version.html => version
x = os.path.splitext(x)[0]
if os.sep in x:
# we didn't find an archived file for
# basefile, instead we found an archived file
# for another basefile that startswith our
# basefile (eg '123' and '123/a', and we found
# '123/a/4.html')
continue
# print("Found file %r %r" % (x, self.pathfrag_to_basefile(x)))
basefile = self.pathfrag_to_basefile(x)
if basefile not in yielded_basefiles:
yielded_basefiles.append(basefile)
yield basefile
def list_attachments(self, basefile, action, version=None):
"""Get all attachments for a basefile in a specified state
:param action: The state (type of file) to look for (either
``downloaded``, ``parsed`` or ``generated``. If
``None``, look for all types.
:type action: str
:param basefile: The basefile to list attachments for
:type basefile: str
:param version: The version of the basefile to list attachments for. If None, list attachments for the current version.
:type version: str
:returns: All available attachments for the basefile
:rtype: generator
"""
basedir = self.datadir
pathfrag = self.pathfrag_to_basefile(basefile)
if version:
v_pathfrag = self.pathfrag_to_basefile(version)
directory = os.sep.join((basedir, "archive", action, pathfrag, v_pathfrag))
else:
directory = os.sep.join((basedir, action, pathfrag))
# FIXME: Similar map exists in list_basefiles_for and in other
# places throughout the code. Should subclasses be able to
# control suffixes beyond the simple self.downloaded_suffix
# mechanism?
suffixmap = {'downloaded':self.downloaded_suffix,
'parsed':'.xhtml',
'generated':'.html'}
mainfile = "index"+suffixmap[action]
for x in util.list_dirs(directory, reverse=False):
# /datadir/base/downloaded/basefile/attachment.txt => attachment.txt
x = x[len(directory)+1:]
if x != mainfile:
yield x
def basefile_to_pathfrag(self, basefile):
"""Given a basefile, returns a string that can safely be used
as a fragment of the path for any representation of that
file. The default implementation recognizes a number of
characters that are unsafe to use in file names and replaces
them with HTTP percent-style encoding.
Example:
>>> d = DocumentStore("/tmp")
>>> realsep = os.sep
>>> os.sep = "/"
>>> d.basefile_to_pathfrag('1998:204')
'1998/%3A204'
>>> os.sep = realsep
If you wish to override how document files are stored in
directories, you can override this method, but you should make
sure to also override
:py:meth:`~ferenda.DocumentStore.pathfrag_to_basefile` to
work as the inverse of this method.
:param basefile: The basefile to encode
:type basefile: str
:returns: The encoded path fragment
:rtype: str
"""
return quote(basefile,
safe='/;@&=+,').replace('%', os.sep+'%')
def pathfrag_to_basefile(self, pathfrag):
"""Does the inverse of
:py:meth:`~ferenda.DocumentStore.basefile_to_pathfrag`,
that is, converts a fragment of a file path into the
corresponding basefile.
:param pathfrag: The path fragment to decode
:type pathfrag: str
:returns: The resulting basefile
:rtype: str
"""
if os.sep == "\\":
pathfrag = pathfrag.replace("\\","/")
return unquote(pathfrag.replace('/%','%'))
def archive(self, basefile, version):
"""Moves the current version of a document to an archive. All
files related to the document are moved (downloaded, parsed,
generated files and any existing attachment files).
:param basefile: The basefile of the document to archive
:type basefile: str
:param version: The version id to archive under
:type version: str
"""
for meth in (self.downloaded_path, self.documententry_path,
self.parsed_path, self.distilled_path,
self.annotation_path, self.generated_path):
# FIXME: what about intermediate? Ignore them as they
# should be able to be regenerated at any time?
src = meth(basefile)
dest = meth(basefile, version)
if self.storage_policy == "dir":
src = os.path.dirname(src)
dest = os.path.dirname(dest)
if not os.path.exists(src):
continue
if os.path.exists(dest):
raise errors.ArchivingError("Archive destination %s for basefile %s version %s already exists!" % (dest,basefile,version))
# self.log.debug("Archiving %s to %s" % (src,dest))
# print("Archiving %s to %s" % (src,dest))
util.ensure_dir(dest)
shutil.move(src,dest)
def downloaded_path(self, basefile, version=None, attachment=None):
"""Get the full path for the downloaded file for the given
basefile (and optionally archived version and/or attachment
filename).
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param version: Optional. The archived version id
:type version: str
:param attachment: Optional. Any associated file needed by the main file.
:type attachment: str
:returns: The full filesystem path
:rtype: str
"""
return self.path(basefile, 'downloaded',
self.downloaded_suffix, version, attachment)
def open_downloaded(self,basefile, mode="r", version=None, attachment=None):
filename = self.downloaded_path(basefile, version, attachment)
return self._open(filename,mode)
def documententry_path(self, basefile, version=None):
"""Get the full path for the documententry file for the given
basefile (and optionally archived version).
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param version: Optional. The archived version id
:type version: str
:returns: The full filesystem path
:rtype: str
"""
return self.path(basefile, 'entries', '.json', version)
def parsed_path(self, basefile, version=None, attachment=None):
"""Get the full path for the parsed file for the given
basefile.
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param version: Optional. The archived version id
:type version: str
:param attachment: Optional. Any associated file needed by the
main file (created by
:py:meth:`~ferenda.DocumentStore.parse`)
:type attachment: str
:returns: The full filesystem path
:rtype: str
"""
return self.path(basefile, 'parsed', '.xhtml',
version, attachment)
def open_parsed(self,basefile, mode="r", version=None, attachment=None):
filename = self.parsed_path(basefile, version, attachment)
return self._open(filename,mode)
def distilled_path(self, basefile, version=None):
"""Get the full path for the distilled RDF/XML file for the given
basefile.
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param version: Optional. The archived version id
:type version: str
:returns: The full filesystem path
:rtype: str
"""
return self.path(basefile, 'distilled', '.rdf',
version)
def open_distilled(self,basefile, mode="r", version=None):
filename = self.distilled_path(basefile, version)
return self._open(filename,mode)
def generated_path(self, basefile, version=None, attachment=None):
"""Get the full path for the generated file for the given
basefile (and optionally archived version and/or attachment
filename).
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param version: Optional. The archived version id
:type version: str
:param attachment: Optional. Any associated file needed by the main file.
:type attachment: str
:returns: The full filesystem path
:rtype: str
"""
return self.path(basefile, 'generated', '.html',
version, attachment)
def open_generated(self, basefile, mode="r", version=None, attachment=None):
filename = self.generated_path(basefile,version,attachment)
return self._open(filename, mode)
def annotation_path(self, basefile, version=None):
"""Get the full path for the annotation file for the given
basefile (and optionally archived version).
:param basefile: The basefile for which to calculate the path
:type basefile: str
:param version: Optional. The archived version id
:type version: str
:returns: The full filesystem path
:rtype: str
"""
return self.path(basefile, 'annotations', '.grit.xml',
version)
def open_annotation(self, basefile, mode="r", version=None):
filename = self.annotation_path(basefile,version)
return self._open(filename, mode)
def dependencies_path(self, basefile):
return self.path(basefile, 'deps', '.txt')
def open_dependencies(self, basefile, mode="r"):
filename = self.dependencies_path(basefile)
return self._open(filename, mode)
def atom_path(self, basefile):
return self.path(basefile, 'feed', '.atom')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# if on python 2.6
from ordereddict import OrderedDict
from collections import defaultdict
from operator import attrgetter, itemgetter
from datetime import datetime
from pprint import pprint
from tempfile import mkstemp
from io import BytesIO
from itertools import islice
from wsgiref.handlers import format_date_time as format_http_date
import codecs
import logging
import logging.handlers
import os
import re
import sys
import time
import calendar
import filecmp
# 3rd party
import pkg_resources
from lxml import etree
from lxml.builder import ElementMaker
from lxml.etree import XSLT
import lxml.html
from rdflib import Graph
from rdflib import Literal, Namespace, URIRef
import bs4
import requests
import requests.exceptions
import six
from six import text_type as str
if six.PY3:
from urllib.parse import quote, unquote
else:
from urllib import quote, unquote # NOQA
# mine
from ferenda import util, errors, decorators
from ferenda import Describer, LayeredConfig, TripleStore, FulltextIndex, Document, DocumentEntry, NewsCriteria, TocCriteria, TocPageset, TocPage, DocumentStore
from ferenda.elements import AbstractElement, serialize, Body, Nav, Link, Section, Subsection, Subsubsection, Heading, UnorderedList, ListItem, Preformatted, Paragraph
from ferenda.elements.html import elements_from_soup
from ferenda.thirdparty import patch
__version__ = (1, 6)
__author__ = "Staffan Malmgren <staffan@tomtebo.org>"
class DocumentRepository(object):
"""Base class for downloading, parsing and generating HTML versions of a repository of documents.
Start building your application by subclassing this class, and
then override methods in order to customize the downloading,
parsing and generation behaviour.
:param \*\*kwargs: Any named argument overrides any
similarly-named configuration file parameter.
Example:
>>> class MyRepo(DocumentRepository):
... alias="myrepo"
...
>>> d = MyRepo(datadir="/tmp/ferenda")
>>> d.store.downloaded_path("mybasefile").replace(os.sep,'/') == '/tmp/ferenda/myrepo/downloaded/mybasefile.html'
True
"""
# There are seven main entry points into the module, with the
# following principal call chains:
#
# download
# do_download
# download_single
# downloaded_path
# download_if_needed
# remote_url
# download_update_entry
# parse
# parsed_path
# soup_from_basefile
# parse_from_soup
# render_xhtml
#
# relate
#
# generate
# generated_file
# prep_annotation_file
# graph_to_annotation_file
#
# toc
# toc_query
# toc_criteria
# toc_predicates
# toc_item
#
# news
# news_selections
# news_selection
# news_get_entry
#
# frontpage_content
################
# general class properties
# FIXME: Duplicated in documentstore -- how do we unify?
downloaded_suffix = ".html"
"""File suffix for the main document format. Determines the suffix
of downloaded files."""
# FIXME: Duplicated in documentstore -- how do we unify?
storage_policy = "file"
"""Some repositories have documents in several formats, documents
split amongst several files or embedded resources. If
``storage_policy`` is set to ``dir``, then each document gets its own
directory (the default filename being ``index`` +suffix),
otherwise each doc gets stored as a file in a directory with other
files. Affects
:py:meth:`~ferenda.DocumentRepository.path` (and therefore
all other ``*_path`` methods)"""
alias = "base"
"""A short name for the class, used by the command line
``ferenda-build.py`` tool. Also determines where to store
downloaded, parsed and generated files. When you subclass
:py:class:`~ferenda.DocumentRepository` you *must* override
this."""
namespaces = ['rdf','rdfs','xsd','dct','skos','foaf','xhv','owl','prov','bibo']
"""The namespaces that are included in the XHTML and RDF files
generated by :py:meth:`~ferenda.DocumentRepository.parse`. This
can be a list of strings, in which case the strings are assumed to
be well-known prefixes to established namespaces, or a list of
(prefix, namespace) tuples. All well-known prefixes are available
in :py:data:`ferenda.util.ns`."""
################
# download() related class properties
start_url = "http://example.org/"
"""The main entry page for the remote web store of documents. May
be a list of documents, a search form or whatever. If it's
something more complicated than a simple list of documents, you
need to override :py:meth:`~ferenda.DocumentRepository.download`
in order to tell which documents are to be downloaded."""
document_url_template = "http://example.org/docs/%(basefile)s.html"
"""A string template for creating URLs for individual documents on
the remote web server. Directly used by
:py:meth:`~ferenda.DocumentRepository.remote_url` and indirectly
by :py:meth:`~ferenda.DocumentRepository.download_single`."""
document_url_regex = "http://example.org/docs/(?P<basefile>\w+).html"
"""A regex that matches URLs for individual documents -- the
reverse of what
:py:data:`~ferenda.DocumentRepository.document_url_template` is
used for. Used by
:py:meth:`~ferenda.DocumentRepository.download()` to find suitable
links if :py:data:`~ferenda.DocumentRepository.basefile_regex`
doesn't match. Must define the named group ``basefile`` using the
``(?P<basefile>...)`` syntax"""
# matches "ID: foo/123" or "ID: Bar:Baz/Quux" but not "ID: Foo bar"
basefile_regex = "^ID: ?(?P<basefile>[\w\d\:\/]+)$"
"""A regex for matching document names in link text, as used by
:py:meth:`~ferenda.DocumentRepository.download()`. Must define a
named group ``basefile``, just like
:py:data:`~ferenda.DocumentRepository.document_url_template`."""
################
# parse() specific class properties
rdf_type = Namespace(util.ns['foaf'])['Document']
"""The RDF type of the documents you are handling (expressed as a
:py:class:`rdflib.term.URIRef` object)."""
source_encoding = "utf-8"
"""The character set that the source HTML documents use (if
applicable)."""
lang = "en"
"""The language which the source documents are assumed to be
written in (unless otherwise specified), and the language which
output document should use."""
# css selectors, handled by BeautifulSoup's select() method
parse_content_selector = "body"
"""CSS selector used to select the main part of the document
content by the default
:py:meth:`~ferenda.DocumentRepository.parse` implementation."""
parse_filter_selectors = ["script"]
"""CSS selectors used to filter/remove certain parts of the
document content by the default
:py:meth:`~ferenda.DocumentRepository.parse` implementation."""
################
# generate() specific class properties
xslt_template = "res/xsl/generic.xsl"
"""A template used by
:py:meth:`~ferenda.DocumentRepository.generate` to transform the
XML file into browser-ready HTML. If your document type is
complex, you might want to override this (and write your own XSLT
transform). You should include ``base.xslt`` in that template,
though."""
sparql_annotations = "res/sparql/annotations.rq"
"""A template for annotations."""
documentstore_class = DocumentStore
def __init__(self, **kwargs):
"""See class docstring for constructor doc."""
codedefaults = self.get_default_options()
defaults = util.merge_dict_recursive(codedefaults, kwargs)
self.config = LayeredConfig(defaults=defaults)
# FIXME: Make it possible to provide an alternative (subclass
# etc) to DocumentStore
self.store = self.documentstore_class(self.config.datadir + os.sep + self.alias)
# should documentstore have a connection to self, ie
# self.store = DocumentStore(basedir, self) ?
self.store.downloaded_suffix = self.downloaded_suffix
self.store.storage_policy = self.storage_policy
logname = self.alias
# alternatively (nonambigious and helpful for debugging, but verbose)
# logname = self.__class__.__module__+"."+self.__class__.__name__
self.log = self._setup_logger(logname)
self.ns = {}
for ns in self.namespaces:
if isinstance(ns,tuple):
prefix,uri = ns
self.ns[prefix]=Namespace(uri)
else:
prefix = ns
# assume that any standalone prefix is well known
self.ns[prefix] = Namespace(util.ns[prefix])
def get_default_options(self):
"""Returns the class' configuration default configuration
properties. These can be overridden by a configution file, or
by named arguments to
:py:meth:`~ferenda.DocumentRepository.__init__`. See
:ref:`configuration` for a list of standard configuration
properties (your subclass is free to define and use additional
configuration properties).
:returns: default configuration properties
:rtype: dict
"""
return {'loglevel': 'INFO',
'datadir': 'data',
'patchdir': 'patches',
'force': False,
'parseforce': False,
'generateforce': False,
'fsmdebug': False,
'refresh': False,
'lastdownload': None,
'conditionalget': True,
'url': 'http://localhost:8000/',
'fulltextindex': False,
'useragent': 'ferenda-bot',
'storetype': 'SQLITE',
'storelocation': 'data/ferenda.sqlite',
'storerepository': 'ferenda',
'indexlocation': 'data/whooshindex',
'combineresources': False,
'cssfiles': ['http://fonts.googleapis.com/css?family=Raleway:200,100',
'res/css/normalize.css',
'res/css/main.css',
'res/css/ferenda.css'],
'jsfiles': ['res/js/jquery-1.9.0.js',
'res/js/modernizr-2.6.2-respond-1.1.0.min.js',
'res/js/ferenda.js']}
def list_basefiles_for(self, action, basedir=None):
"""Get all available basefiles that can be used for the
specified action.
:param action: The action for which to get available
basefiles (``parse``, ``relate``, ``generate``
or ``news``)
:type action: str
:param basedir: The base directory in which to search for
available files. If not provided, defaults to
``self.config.datadir``.
:type basedir: str
:returns: All available basefiles
:rtype: generator
"""
return self.store.list_basefiles_for(action,basedir)
@classmethod
def setup(cls, action, config):
"""Runs before any of the _all methods starts executing"""
if hasattr(cls, action + "_all_setup"):
cbl = getattr(cls, action + "_all_setup")
return cbl(config)
@classmethod
def teardown(cls, action, config):
"""Runs after any of the _all methods has finished executing"""
if hasattr(cls, action + "_all_teardown"):
cbl = getattr(cls, action + "_all_teardown")
return cbl(config)
def get_archive_version(self,basefile):
"""Get a version identifier for the current version of the
document identified by ``basefile``.
The default implementation simply increments most recent
archived version identifier, starting at "1". If versions in
your docrepo are normally identified in some other way (such
as SCM revision numbers, dates or similar) you should override
this method to return those identifiers.
:param basefile: The basefile of the document to archive
:type basefile: str
:returns: The version identifier for the current version of
the document.
:rtype: str
"""
return str(len(list(self.store.list_versions(basefile)))+1)
def context(self):
"""The context URI under which RDF statements should be
stored.
:returns: The context URI
:rtype: str
"""
return "http://example.org/ctx/%s" % (self.alias)
def qualified_class_name(self):
"""The qualified class name of this class
:returns: class name (e.g. ``ferenda.DocumentRepository``)
:rtype: str
"""
return self.__class__.__module__ + "." + self.__class__.__name__
def canonical_uri(self, basefile):
"""The canonical URI for the document identified by ``basefile``.
:returns: The canonical URI
:rtype: str
"""
# Note that there might not be a 1:1 mappning between
# documents/basefiles and URIs -- don't know what we should do
# in those cases.
#
# It might also be impossible to provide the canonical_uri
# without actually parse()ing the document
return "%sres/%s/%s" % (self.config.url, self.alias, basefile)
def basefile_from_uri(self, uri):
"""The reverse of canonical_uri. Returns None if the uri doesn't map to a basefile in this repo."""
if uri.startswith(self.config.url + "res/"):
path = uri[len(self.config.url + "res/"):]
if "/" in path:
alias, basefile = path.split("/", 1)
if alias == self.alias:
return basefile
################################################################
#
# STEP 1: Download documents from the web
#
################################################################
@decorators.action
def download(self, basefile=None):
"""Downloads all documents from a remote web service.
The default generic implementation assumes that all documents
are linked from a single page (which has the url of
:py:data:`~ferenda.DocumentRepository.start_url`), that they
all have URLs matching the
:py:data:`~ferenda.DocumentRepository.document_url_regex` or
that the link text is always equal to basefile (as determined
by :py:data:`~ferenda.DocumentRepository.basefile_regex`). If
these assumptions don't hold, you need to override this
method.
If you do override it, your download method should read and set the
``lastdownload`` parameter to either the datetime of the last
download or any other module-specific string (id number or
similar).
You should also read the ``refresh`` parameter. If it is
``True`` (the default), then you should call
:py:meth:`~ferenda.DocumentRepository.download_single` for
every basefile you encounter, even though they may already
exist in some form on
disk. :py:meth:`~ferenda.DocumentRepository.download_single`
will normally be using conditional GET to see if there is a
newer version available.
See :ref:`implementing-download` for more details.
:returns: True if any document was downloaded, False otherwise.
:rtype: bool
"""
if basefile:
if self.document_url_template:
return self.download_single(basefile)
else:
raise ValueError("Downloading single basefile '%s' not supported (no way to convert basefile to url)" % basefile)
lastdownload = self.config.lastdownload
if lastdownload:
self.log.debug("download: Last download was at %s" % lastdownload)
else:
self.log.debug("download: Starting full download")
# NOTE: This very generic implementation of download has no
# use for lastdownload, as all the documents it can find are
# the one linked from the start page. Therefore it's not used
# for anything else than a diagnostic tool.
refresh = self.config.refresh
if refresh:
self.log.debug("download: Refreshing all downloaded files")
else:
self.log.debug("download: Not re-downloading downloaded files")
self.log.debug("Starting at %s" % self.start_url)
# url_regex = self.document_url.replace("%s", "(.*)")
updated = False
# self.browser.open(self.start_url)
resp = requests.get(self.start_url)
tree = lxml.html.document_fromstring(resp.text)
tree.make_links_absolute(self.start_url, resolve_base_href=True)
if 'downloadmax' in self.config or 'FERENDA_DOWNLOADMAX' in os.environ:
if 'downloadmax' in self.config:
maxdoc = int(self.config.downloadmax)
else:
maxdoc = int(os.environ['FERENDA_DOWNLOADMAX'])
self.log.info("Only downloading max %s documents" % maxdoc)
links = islice(self.download_get_basefiles(tree.iterlinks()), maxdoc)
else:
links = self.download_get_basefiles(tree.iterlinks())
for (basefile, link) in links:
if (refresh or
(not os.path.exists(self.store.downloaded_path(basefile)))):
ret = self.download_single(basefile, link)
updated = updated or ret
self.config.lastdownload = datetime.now()
return updated
def download_get_basefiles(self, source):
for (element, attribute, link, pos) in source:
url = basefile = None
# Two step process: First examine link text to see if
# basefile_regex match. If not, examine link url to see
# if document_url_regex
if (self.basefile_regex and
element.text and
re.search(self.basefile_regex, element.text)):
m = re.search(self.basefile_regex, element.text)
yield(m.group("basefile"), link)
elif self.document_url_regex and re.match(self.document_url_regex, link):
m = re.match(self.document_url_regex, link)
if m:
yield(m.group("basefile"), link)
def download_single(self, basefile, url=None):
"""Downloads the document from the web (unless explicitly
specified, the URL to download is determined by
:py:meth:`~ferenda.DocumentRepository.document_url` combined
with basefile, the location on disk is determined by the
function
:py:meth:`~ferenda.DocumentRepository.downloaded_path()`).
If the document exists on disk, but the version on the web is
unchanged (determined using a conditional GET), the file on disk
is left unchanged (i.e. the timestamp is not modified).
:param basefile: The basefile of the document to download
:param url: The URL to download (optional)
:type basefile: string
:type url: string or None
:returns: True if the document was downloaded and stored on
disk, False if the file on disk was not updated.
"""
if url is None:
url = self.remote_url(basefile)
updated = False
created = False
checked = True
filename = self.store.downloaded_path(basefile)
created = not os.path.exists(filename)
# util.print_open_fds()
if self.download_if_needed(url, basefile):
if created:
self.log.info("%s: downloaded from %s" % (basefile, url))
else:
self.log.info(
"%s: downloaded new version from %s" % (basefile, url))
updated = True
else:
self.log.debug("%s: exists and is unchanged" % basefile)
entry = DocumentEntry(self.store.documententry_path(basefile))
now = datetime.now()
entry.orig_url = url
if created:
entry.orig_created = now
if updated:
entry.orig_updated = now
if checked:
entry.orig_checked = now
entry.save()
return updated
def _addheaders(self, filename=None):
headers = {"User-agent": self.config.useragent}
if filename:
if os.path.exists(filename + ".etag"):
headers["If-none-match"] = util.readfile(filename + ".etag")
elif os.path.exists(filename):
stamp = os.stat(filename).st_mtime
headers["If-modified-since"] = format_http_date(stamp)
return headers
def download_if_needed(self, url, basefile, archive=True, filename=None):
"""Downloads a remote resource to a local file. If a different
version is already in place, archive that old version.
:param url: The url to download
:type url: str
:param basefile: The basefile of the document to download
:type basefile: str
:param archive: Whether to archive existing older versions of
the document, or just delete the previously
downloaded file.
:type archive: bool
:param filename: The filename to download to. If not provided, the filename is derived from the supplied basefile
:type filename: str
:returns: True if the local file was updated (and archived),
False otherwise.
:rtype: bool
"""
if not filename:
filename = self.store.downloaded_path(basefile)
if self.config.conditionalget:
# sets if-none-match or if-modified-since (in that order) headers
headers = self._addheaders(filename)
else:
headers = self._addheaders()
fileno, tmpfile = mkstemp()
fp = os.fdopen(fileno)
fp.close()
# Since this part, containing the actual HTTP request call, is
# called repeatedly, we take extra precautions in the event of
# temporary network failures etc. Try 5 times with 1 second
# pause inbetween before giving up.
fetched = False
remaining_attempts = 5
try:
while (not fetched) and (remaining_attempts > 0):
try:
response = requests.get(url, headers=headers)
fetched = True
except requests.exceptions.ConnectionError as e:
self.log.warning("Failed to fetch %s: error %s (%s remaining attempts)" % (url, e, remaining_attempts))
# close session in hope that this rectifies things
s = requests.Session()
s.close()
remaining_attempts -= 1
time.sleep(1)
if not fetched:
self.log.error("Failed to fetch %s, giving up" % url)
return False
# handles other errors except ConnectionError
except requests.exceptions.RequestException as e:
self.log.error("Failed to fetch %s: error %s" % (url, e))
raise e
if response.status_code == 304:
self.log.debug("%s: 304 Not modified" % url)
return False # ie not updated
elif response.status_code > 400:
response.raise_for_status()
with open(tmpfile, "wb") as fp:
fp.write(response.content)
if not os.path.exists(filename):
util.robust_rename(tmpfile, filename)
updated = True
elif not filecmp.cmp(tmpfile, filename, shallow=False):
version = self.get_archive_version(basefile)
self.store.archive(basefile, version)
util.robust_rename(tmpfile, filename)
updated = True
else:
updated = False
if updated:
# OK we have a new file in place. Now examine the
# headers to find if we should change file
# modification time (last-modified) and/or create a
# .etag file (etag)
if response.headers["last-modified"]:
mtime = calendar.timegm(util.parse_rfc822_date(
response.headers["last-modified"]).timetuple())
# FIXME: set a orig_lastmodified on DocumentEntry
os.utime(filename, (time.time(), mtime))
# FIXME: set this on DocumentEntry (orig_etag) instead
# of writing a separate file
if response.headers["etag"]:
with open(filename + ".etag", "w") as fp:
fp.write(response.headers["etag"])
return updated
def remote_url(self, basefile):
"""Get the URL of the source document at it's remote location,
unless the source document is fetched by other means or if it
cannot be computed from basefile only. The default
implementation uses
:py:data:`~ferenda.DocumentRepository.document_url_template`
to calculate the url.
Example:
>>> d = DocumentRepository()
>>> d.remote_url("123/a") == 'http://example.org/docs/123/a.html'
True
>>> d.document_url_template = "http://mysite.org/archive/%(basefile)s/"
>>> d.remote_url("123/a") == 'http://mysite.org/archive/123/a/'
True
:param basefile: The basefile of the source document
:type basefile: str
:returns: The remote url where the document can be fetched, or ``None``.
:rtype: str
"""
return self.document_url_template % {'basefile':quote(basefile)}
def generic_url(self, basefile, maindir, suffix):
"""
Analogous to
:py:meth:`ferenda.DocumentStore.path`, calculate
the full local url for the given basefile and stage of
processing.
:param basefile: The basefile for which to calculate the local url
:type basefile: str
:param maindir: The processing stage directory (normally
``downloaded``, ``parsed``, or ``generated``)
:type maindir: str
:param suffix: The file extension including period (i.e. ``.txt``,
not ``txt``)
:type suffix: str
:returns: The local url
:rtype: str
"""
path = "%s/%s/%s%s" % (self.alias, maindir, basefile, suffix)
return self.config.url + path
def downloaded_url(self, basefile):
"""Get the full local url for the downloaded file for the
given basefile.
:param basefile: The basefile for which to calculate the local url
:type basefile: str
:returns: The local url
:rtype: str
"""
return self.generic_url(basefile, 'downloaded', self.downloaded_suffix)
################################################################
#
# STEP 2: Parse the downloaded data into a structured XML document
# with RDFa metadata.
#
################################################################
@classmethod
def parse_all_setup(cls, config):
"""
Runs any action needed prior to parsing all documents in a
docrepo. The default implementation does nothing.
.. note::
This is a classmethod for now (and that's why a config
object is passsed as an argument), but might change to a
instance method.
"""
pass
@classmethod
def parse_all_teardown(cls, config):
"""
Runs any cleanup action needed after parsing all documents in
a docrepo. The default implementation does nothing.
.. note::
Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
this might change to a instance method.
"""
pass
@decorators.action
@decorators.managedparsing
def parse(self, doc):
"""Parse downloaded documents into structured XML and RDF.
It will also save the same RDF statements in a separate
RDF/XML file.
You will need to provide your own parsing logic, but often
it's easier to just override parse_from_soup (assuming your
indata is in a HTML format parseable by BeautifulSoup) and let
the base class read and write the files.
If your data is not in a HTML format, or BeautifulSoup is not
an appropriate parser to use, override this method.
:param doc: The document object to fill in.
:type doc: ferenda.Document
"""
soup = self.soup_from_basefile(doc.basefile)
self.parse_metadata_from_soup(soup, doc)
self.parse_document_from_soup(soup, doc)
def patch_if_needed(self, basefile, text):
# 1. do we have a patch?
patchstore = self.documentstore_class(self.config.patchdir + os.sep + self.alias)
patchpath = patchstore.path(basefile, "patches", ".patch")
descpath = patchstore.path(basefile, "patches", ".desc")
if os.path.exists(patchpath):
# 4. make sure error msgs from the patch modules are
# available if we fail.
from io import StringIO
pbuf = StringIO()
plog = logging.getLogger('ferenda.thirdparty.patch')
plog.setLevel(logging.WARNING)
plog.addHandler(logging.StreamHandler(pbuf))
# 2. read and parse it
with open(patchpath) as fp:
ps = patch.PatchSet()
success = ps.parse(fp)
if not success:
raise errors.PatchError("Patch %s couldn't be parsed: %s" % (patchpath, pbuf.getvalue()))
assert len(ps.items) == 1
# 3. Create a temporary file with the file to be patched
# open tmpfile
fileno, tmpfile = mkstemp()
fp = os.fdopen(fileno, "wb")
# dump text to tmpfile
fp.write(text.encode("utf-8")) # assume that patches are also in utf-8
fp.close()
ps.items[0].source = tmpfile
# 5. now do the patching
success = ps.apply()
if not success:
raise errors.PatchError("Patch %s failed: %s" % (patchpath, pbuf.getvalue()))
else:
# 6. Finally get a patch description
if ps.items[0].hunks[0].desc:
desc = ps.items[0].hunks[0].desc
elif os.path.exists(descpath):
desc = util.readfile(descpath)
else:
desc = "(No patch description available)"
return util.readfile(tmpfile), desc
else:
return (text, None)
def parse_metadata_from_soup(self, soup, doc):
"""
Given a BeautifulSoup document, retrieve all document-level
metadata from it and put it into the given ``doc`` object's
``meta`` property.
.. note::
The default implementation sets ``rdf:type``,
``dct:title``, ``dct:identifier`` and
``prov:wasGeneratedBy`` properties in ``doc.meta``, as well
as setting the language of the document in ``doc.lang``.
:param soup: A parsed document
:type soup: bs4.BeautifulSoup
:param doc: Our document
:type doc: ferenda.Document
:returns: None
"""
# set rdf:type and dct:identifier of document automatically?
# set title and other simple things
# Default language unless we can find out from source doc?
# Check html/@xml:lang || html/@lang
root = soup.find('html')
if root:
try:
doc.lang = root['xml:lang']
except KeyError:
try:
doc.lang = root['lang']
except KeyError:
doc.lang = self.lang
else:
doc.lang = self.lang
try:
title = soup.find('title').string
except AttributeError:
title = None
# create document-level metadata
d = Describer(doc.meta, doc.uri)
d.rdftype(self.rdf_type)
if title:
d.value(self.ns['dct'].title, Literal(title, lang=doc.lang))
d.value(self.ns['dct'].identifier, doc.basefile)
d.value(self.ns['prov'].wasGeneratedBy, self.qualified_class_name())
def parse_document_from_soup(self, soup, doc):
"""
Given a BeautifulSoup document, convert it into the provided
``doc`` object's ``body`` property as suitable
:py:mod:`ferenda.elements` objects.
.. note::
The default implementation respects
:py:data:`~ferenda.DocumentRepository.parse_content_selector`
and
:py:data:`~ferenda.DocumentRepository.parse_filter_selectors`.
:param soup: A parsed document
:type soup: bs4.BeautifulSoup
:param doc: Our document
:type doc: ferenda.Document
:returns: None
"""
soups = soup.select(self.parse_content_selector)
if len(soups) == 0:
raise errors.ParseError("%s: parse_content_selector %r matches nothing" %
(doc.basefile,self.parse_content_selector))
if len(soups) > 1:
self.log.warn("%s: parse_content_selector %r matches more than one tag" %
(doc.basefile,self.parse_content_selector))
soup = soups[0]
for filter_selector in self.parse_filter_selectors:
for tag in soup.select(filter_selector):
# tag.decompose()
tag.extract() # decompose fails on some trees
doc.body = elements_from_soup(soup)
def soup_from_basefile(self, basefile, encoding='utf-8', parser='lxml'):
"""
Load the downloaded document for basefile into a BeautifulSoup object
:param basefile: The basefile for the downloaded document to parse
:type basefile: str
:param encoding: The encoding of the downloaded document
:type encoding: str
:returns: The parsed document
:rtype: bs4.BeautifulSoup
.. note::
Helper function. You probably don't need to override it.
"""
filename = self.store.downloaded_path(basefile)
with codecs.open(filename, encoding=encoding, errors='replace') as fp:
soup = bs4.BeautifulSoup(fp.read(), parser)
return soup
def make_document(self, basefile=None):
"""
Create a :py:class:`~ferenda.Document` objects with basic
initialized fields.
.. note::
Helper method used by the
:py:func:`~ferenda.decorators.makedocument` decorator.
:param basefile: The basefile for the document
:type basefile: str
:rtype: ferenda.Document
"""
doc = Document()
doc.basefile = basefile
doc.meta = self.make_graph()
if basefile:
doc.basefile = basefile
doc.uri = self.canonical_uri(basefile)
return doc
def make_graph(self):
"""
Initialize a rdflib Graph object with proper namespace prefix
bindings (as determined by
:py:data:`~ferenda.DocumentRepository.namespaces`)
:rtype: rdflib.Graph
"""
g = Graph()
for prefix, uri in list(self.ns.items()):
# print "Binding %s to %s" % (prefix,uri)
g.bind(prefix, uri)
return g
def create_external_resources(self, doc):
"""Optionally create external files that go together with the
parsed file (stylesheets, images, etc).
The default implementation does nothing.
:param doc: The document
:type doc: ferenda.Document
"""
pass
def list_external_resources(self, basefile):
"""Return a list of external files that parse (through
create_external_resources or otherwise) has created.
The default implementation returns an empty list.
.. note::
This is probably obsoleted by
:py:meth:`~ferenda.DocumentRepository.list_attachments`
:param doc: The document to list external files for
:type doc: ferenda.Document
:returns: External files created by :py:meth:`~ferenda.DocumentRepository.parse`
:rtype: list
"""
return []
def render_xhtml(self, doc, outfile):
"""Renders the parsed object structure as a XHTML file with
RDFa attributes (also returns the same XHTML as a string).
:param doc: The document to render
:type doc: ferenda.Document
:param outfile: The file name for the XHTML document
:type outfile: str
:returns: The XHTML document
:rtype: str
"""
XML_LANG = "{http://www.w3.org/XML/1998/namespace}lang"
def render_head(g, uri):
children = []
# we sort to get a predictable order (by predicate, then by object)
for (subj, pred, obj) in sorted(g, key=lambda t:(t[1],t[2])):
if str(subj) != uri and str(obj) != uri:
self.log.warning("%s != %s" % (subj, uri))
continue
if g.qname(pred) == "dct:title":
if obj.language:
children.append(
E.title({'property': 'dct:title', }, str(obj)))
else:
children.append(E.title({'property':
'dct:title', XML_LANG: ""}, str(obj)))
elif isinstance(obj, URIRef) and str(subj) == uri:
children.append(E.link({'rel': g.qname(pred),
'href': str(obj)}))
elif isinstance(obj, URIRef):
children.append(E.link({'rev': g.qname(pred),
'href': str(subj)}))
elif obj.datatype:
children.append(E.meta({'property': g.qname(pred),
'datatype': g.qname(obj.datatype),
'content': str(obj)}))
elif obj.language:
children.append(E.meta({'property': g.qname(pred),
XML_LANG: obj.language,
'content': str(obj)}))
else:
children.append(E.meta({'property': g.qname(pred),
'content': str(obj),
XML_LANG: ''}))
return E.head({'about': uri}, *children)
nsmap = {None: "http://www.w3.org/1999/xhtml"}
for prefix, namespace in self.ns.items():
nsmap[prefix] = str(namespace)
E = ElementMaker(namespace="http://www.w3.org/1999/xhtml",
nsmap=nsmap)
headcontent = render_head(doc.meta, doc.uri)
bodycontent = doc.body.as_xhtml(doc.uri)
if doc.lang:
htmlattrs = {XML_LANG: doc.lang}
else:
htmlattrs = {}
xhtmldoc = E.html(
htmlattrs,
headcontent,
bodycontent,
)
doctype = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" '
'"http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">')
res = etree.tostring(xhtmldoc,
pretty_print=True,
xml_declaration=True,
encoding='utf-8',
doctype=doctype)
fileno, tmpfile = mkstemp()
fp = os.fdopen(fileno)
fp.close()
with open(tmpfile, "wb") as fp:
fp.write(res)
util.replace_if_different(tmpfile, outfile)
return res
def parsed_url(self, basefile):
"""Get the full local url for the parsed file for the
given basefile.
:param basefile: The basefile for which to calculate the local url
:type basefile: str
:returns: The local url
:rtype: str
"""
return self.generic_url(basefile, 'parsed', '.xhtml')
def distilled_url(self, basefile):
"""Get the full local url for the distilled RDF/XML file for the
given basefile.
:param basefile: The basefile for which to calculate the local url
:type basefile: str
:returns: The local url
:rtype: str
"""
return self.generic_url(basefile, 'distilled', '.rdf')
################################################################
#
# STEP 3: Extract and store the RDF data
#
################################################################
@classmethod
def relate_all_setup(cls, config):
"""Runs any cleanup action needed prior to relating all documents in
a docrepo. The default implementation clears the corresponsing
context (see :py:meth:`~ferenda.DocumentRepository.context`)
in the triple store.
.. note::
Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
this might change to a instance method.
Returns False if no relation needs to be done (as determined
by the timestamp on the dump nt file)
"""
# FIXME: duplicate of code in context() (which is not a classmethod)
context = "http://example.org/ctx/%s" % (cls.alias)
# FIXME: this blows away the entire triplestore content for a
# particular context, making it impossible to update just some
# data. One way would be to check the timestamp on dump.nt,
# and if it's newer than all distilled files (how to get them,
# given that this is a classmethod?), don't clear the
# triplestore (and don't run any relate() method)
docstore = DocumentStore(config.datadir + os.sep + cls.alias)
dump = docstore.path("dump", "distilled", ".nt")
if not config.force: # check if we need to work at all
distilled = []
for basefile in docstore.list_basefiles_for("relate"):
distilled.append(docstore.distilled_path(basefile))
if util.outfile_is_newer(distilled,dump):
return False
store = TripleStore(
config.storelocation, config.storerepository, context, config.storetype)
log = cls._setup_logger(cls.alias)
log.info("Clearing context %s at repository %s" % (
context, config.storerepository))
store.clear()
# we can't clear the whoosh index in the same way as one index
# contains documents from all repos. But we need to be able to
# clear it from time to time, maybe with a clear/setup method
# in manager? Or fulltextindex maybe could have a clear method
# that removes all documents for a particular repo?
return True
@classmethod
def relate_all_teardown(cls, config):
"""
Runs any cleanup action needed after relating all documents in
a docrepo. The default implementation does nothing.
.. note::
Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
this might change to a instance method.
"""
# FIXME: should use context(), but that's a instancemethod
context = "http://example.org/ctx/%s" % (cls.alias)
store = TripleStore(
config.storelocation, config.storerepository, context, config.storetype)
docstore = DocumentStore(config.datadir + os.sep + cls.alias)
dump = docstore.path("dump", "distilled", ".nt")
log = cls._setup_logger(cls.alias)
log.info("Dumping triples from context %s at repository %s to %s" % (
context, config.storerepository, dump))
util.ensure_dir(dump)
store.get_serialized_file(dump, format="nt")
def relate(self, basefile, otherrepos=[]):
self.relate_triples(basefile)
# FIXME: How should we pass in (or create) a list if
# instantiated repositories? When using API, the caller must
# be able to create and pass the list, eg repos=[] as method
# signature When using manager, we'll probably have to do some
# specialcase in the run() method (like for
# makeresources/runserver, but different -- in particular, one
# instance of every registered repo should be created).
# When otherrepos = [], should we still provide self as one repo? Yes.
if otherrepos == []:
otherrepos.append(self)
self.relate_dependencies(basefile, otherrepos)
if self.config.fulltextindex:
self.relate_fulltext(basefile)
def _get_triplestore(self):
if not hasattr(self,'_triplestore'):
self._triplestore = TripleStore(self.config.storelocation,
self.config.storerepository,
self.context(),
self.config.storetype)
return self._triplestore
def relate_triples(self,basefile):
"""Insert the (previously distilled) RDF statements into the
triple store.
:param basefile: The basefile for the document containing the
RDF statements.
:type basefile: str
:returns: None
"""
with util.logtime(self.log.debug,
"%(basefile)s: Added %(rdffile)s to %(triplestore)s in %(elapsed).3f sec",
{'basefile': basefile,
'rdffile': self.store.distilled_path(basefile),
'triplestore': self.config.storelocation}):
data = open(self.store.distilled_path(basefile)).read()
self._get_triplestore().add_serialized(data, format="xml")
def _get_fulltext_indexer(self, batchoptimize=False):
if not hasattr(self, '_fulltextindexer'):
self._fulltextindexer = FulltextIndex(self.config.indexlocation)
if hasattr(self.config, 'all'):
self._fulltextindexer._batchwriter = True
return self._fulltextindexer
def relate_dependencies(self, basefile, repos=[]):
values = {'basefile':basefile,
'deps':0}
with util.logtime(self.log.debug,
"%(basefile)s: Registered %(deps)s dependencies in %(elapsed).3f sec",
values):
with self.store.open_distilled(basefile) as fp:
g = Graph().parse(fp, format="xml")
for (s,p,o) in g:
# for each URIRef in graph
if isinstance(o,URIRef):
for repo in repos:
# find out if any docrepo can handle it
dep_basefile = repo.basefile_from_uri(str(o))
if dep_basefile:
# if so, add to that repo's dependencyfile
repo.add_dependency(dep_basefile,
self.store.parsed_path(basefile))
values['deps'] += 1
return values['deps']
def add_dependency(self, basefile, dependencyfile):
present = False
if os.path.exists(self.store.dependencies_path(basefile)):
with self.store.open_dependencies(basefile) as fp:
for line in fp:
if line.strip() == dependencyfile:
present = True
if not present:
with self.store.open_dependencies(basefile, "a") as fp:
fp.write(dependencyfile+"\n")
return not present # return True if we added something, False otherwise
def relate_fulltext(self,basefile):
"""Index the text of the document into fulltext index.
:param basefile: The basefile for the document to be indexed.
:type basefile: str
:returns: None
"""
values = {'basefile':basefile,
'resources':0,
'words':0}
with util.logtime(self.log.debug,
"%(basefile)s: Added %(resources)s resources (%(words)s words) to fulltext index in %(elapsed).3f s", values):
indexer = self._get_fulltext_indexer()
tree = etree.parse(self.store.parsed_path(basefile))
g = Graph()
desc = Describer(g.parse(self.store.distilled_path(basefile)))
dct = self.ns['dct']
for resource in tree.findall(".//*[@about]"):
if resource.tag == "{http://www.w3.org/1999/xhtml}head":
continue
about = resource.get('about')
desc.about(about)
plaintext = self._extract_plaintext(resource)
l = desc.getvalues(dct.title)
title = str(l[0]) if l else None
l = desc.getvalues(dct.identifier)
identifier = str(l[0]) if l else None
indexer.update(uri=about,
repo=self.alias,
basefile=basefile,
title=title,
identifier=identifier,
text=plaintext)
values['resources'] += 1
values['words'] += len(plaintext.split())
indexer.commit() # NB: Destroys indexer._writer
def _extract_plaintext(self,node):
# helper to extract any text from a elementtree node,
# excluding subnodes that are resources themselves (ie they
# have an @about node)
plaintext = node.text if node.text else ""
for subnode in node:
if not subnode.get('about'):
plaintext += self._extract_plaintext(subnode)
if node.tail:
plaintext += node.tail
# append trailing space for block-level elements (including
# <br>, <img> and some others that formally are inline
# elements)
trailspace = "" if node.tag in ("a" "b","i","span") else " "
return plaintext.strip()+trailspace
################################################################
#
# STEP 4: Generate browser-ready HTML with navigation panels,
# information about related documents and so on.
#
################################################################
@classmethod
def generate_all_setup(cls, config):
"""
Runs any action needed prior to generating all documents in a
docrepo. The default implementation does nothing.
.. note::
Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
this might change to a instance method.
"""
pass
@classmethod
def generate_all_teardown(cls, config):
"""
Runs any cleanup action needed after generating all documents
in a docrepo. The default implementation does nothing.
.. note::
Like :py:meth:`~ferenda.DocumentRepository.parse_all_setup`
this might change to a instance method.
"""
pass
@decorators.action
def generate(self, basefile):
"""Generate a browser-ready HTML file from structured XML and RDF.
Uses the XML and RDF files constructed by
:py:meth:`ferenda.DocumentRepository.parse`.
The generation is done by XSLT, and normally you won't need to
override this, but you might want to provide your own xslt
file and set
:py:data:`ferenda.DocumentRepository.xslt_template` to the
name of that file.
If you want to generate your browser-ready HTML by any other
means than XSLT, you should override this method.
:param basefile: The basefile for which to generate HTML
:type basefile: str
:returns: None
"""
with util.logtime(self.log.info, "%(basefile)s OK (%(elapsed).3f sec)",
{'basefile': basefile}):
infile = self.store.parsed_path(basefile)
annotations = self.store.annotation_path(basefile)
if os.path.exists(self.store.dependencies_path(basefile)):
dependencies = util.readfile(self.store.dependencies_path(basefile)).split("\n")
else:
dependencies = []
dependencies.extend((infile,annotations))
outfile = self.store.generated_path(basefile)
force = (self.config.force or
self.config.generateforce)
if not force and util.outfile_is_newer(dependencies, outfile):
self.log.debug("%s: Skipped", basefile)
return
self.log.debug("%s: Starting", basefile)
xsltdir = self.setup_transform_templates(os.path.dirname(self.xslt_template))
xsltfile = xsltdir + os.sep + os.path.basename(self.xslt_template)
with util.logtime(self.log.debug,
"%(basefile)s get_transform_configuration in %(elapsed).3f sec",
{'basefile': basefile}):
params = self.get_transform_configuration(xsltdir,outfile)
assert 'configurationfile' in params
# The actual function code
with util.logtime(self.log.debug,
"%(basefile)s prep_annotation_file in %(elapsed).3f sec",
{'basefile': basefile}):
annotation_file = self.prep_annotation_file(basefile)
if annotation_file:
relpath = os.path.relpath(annotation_file,
os.path.dirname(xsltfile))
# NOTE: Even on Win32, lxml needs to have this path using
# unix separators, i.e. / instead of the native \
relpath = relpath.replace("\\","/")
params['annotationfile'] = XSLT.strparam(relpath)
with util.logtime(self.log.debug,
"%(basefile)s transform_html %(elapsed).3f",
{'basefile': basefile}):
self.transform_html(xsltfile, infile, outfile, params)
# At this point, outfile may appear untouched if it already
# existed and wasn't actually changed. But this will cause the
# above outfile_is_newer check to fail next time around. Also,
# the docentry.updated parameter will be incosistent with the
# timestamp on the file. What to do?
os.utime(outfile, None) # update access/modified timestamp
now = datetime.now()
docentry = DocumentEntry(self.store.documententry_path(basefile))
if not docentry.published:
docentry.published = now
docentry.updated = now
docentry.save()
def transform_html(self, stylesheet, infile, outfile,
parameters={},
format=True,
xinclude=False):
"""Creates browser-ready HTML5 from a basic XHTML+RDFa file
using a XSLT transform.
:param stylesheet: the filename of the XSLT stylesheet to use
:type stylesheet: string
:param infile: The filename of the basic XHTML+RDFa file to be
transformed
:type infile: string
:param outfile: The filename of the created HTML5 file
:type outfile: string
:param parameters: Any parameters passed to the XSLT stylesheet (see
:py:meth:`~ferenda.DocumentRepository.get_transform_configuration`)
:type parameters: dict
:param format: Whether to format/indent the resulting outfile
:type format: bool
:param xinclude: Whether to process xinlude directives in the infile
:type xinclude: bool
:returns: True if the transform resulted in a new or updated
outfile, False if the result was identical
to the previously existing outfile.
:rtype: bool
"""
assert not xinclude, "xinclude not supported yet"
# Open the XSLT stylesheet, either as a normal file
# (user-provided) or a package resource (ferenda built-in)
# FIXME: load-path mechanism (cf manager.makeresources())?
if os.path.exists(stylesheet):
fp = open(stylesheet)
elif pkg_resources.resource_exists('ferenda',stylesheet): # prefix stylesheet with 'res/xsl'?
fp = pkg_resources.resource_stream('ferenda',stylesheet)
else:
raise ValueError("Stylesheet %s not found" % stylesheet)
parser = etree.XMLParser(remove_blank_text=format)
xsltree = etree.parse(fp,parser)
fp.close()
transform = etree.XSLT(xsltree)
with open(infile) as fp:
intree = etree.parse(fp,parser)
try:
outtree = transform(intree,**parameters)
except etree.XSLTApplyError as e:
raise errors.TransformError(str(e.error_log))
if len(transform.error_log) > 0:
raise errors.TransformError(str(transform.error_log))
res = etree.tostring(outtree,pretty_print=format).strip()
if format:
bytefp = BytesIO(res)
parser = etree.XMLParser(remove_blank_text=True)
res = etree.tostring(etree.parse(bytefp,parser),pretty_print=True)
fileno, tmpfile = mkstemp()
fp = os.fdopen(fileno)
fp.close()
if res.startswith(b"<remove-this-tag>"):
res = b"<!DOCTYPE html>\n"+res[17:-19].strip()
with open(tmpfile,"wb") as fp:
fp.write(res)
util.ensure_dir(outfile)
return util.replace_if_different(tmpfile,outfile)
# xsltpath = os.path.join(os.curdir,'../ferenda',self.xslt_template)
def get_transform_configuration(self, xsltdir, outfile=None):
"""
Set up a dict of parameters pointing to the configuration XML
file needed for XSLT transform.
.. note::
Maybe this should be an internal method.
:param xsltdir: path to the directory where the root xslt file is stored
:type xsltdir: str
:param outfile: path to the planned file resulting from the XSLT transfomrm
:type outfile: str
:returns: The path to the resources.xml file, wrapped through lxml.etree.XSLT.strparam and put in a a dict
:rtype: dict
"""
assert os.path.isdir(xsltdir), "%s does not exist (or is not a directory)" % xsltdir
params = {}
conffile = os.sep.join([self.config.datadir,'rsrc','resources.xml'])
if os.path.exists(conffile):
if outfile:
# We should maybe also detect if stylesheet[@href] and
# script[@src] point correctly, and if not, create a
# new version of configurationfile where os.relpath
# has been applied to them.
tree = etree.parse(conffile)
assert outfile.startswith(self.config.datadir), "outfile %s not under datadir %s" % (outfile, self.config.datadir)
# "datadir/foo/bar/baz.html" -> "foo/bar"
# "/var/folders/sy/r4f/T/tmpcRojl/foo/bar/baz.html" -> "foo/bar"p
relative_outfile = outfile[len(self.config.datadir)+1:]
if os.sep in relative_outfile:
outdir = relative_outfile.rsplit(os.sep,1)[0]
else:
outdir = ""
for node in tree.findall("stylesheets/link"):
if not (re.match("https?://", node.get('href'))):
node.set('href', os.path.relpath(node.get('href'),outdir).replace(os.sep,"/"))
for node in tree.findall("javascripts/script"):
if not (re.match("https?://", node.get('src'))):
node.set('src', os.path.relpath(node.get('src'),outdir).replace(os.sep,"/"))
depth = tree.find("stylesheets/link").get('href').count('..')
if depth > 0:
# create a new file
(base, ext) = os.path.splitext(conffile)
modfile = base + ("-depth-%s" % depth) + ext
if not util.outfile_is_newer([conffile], modfile):
tree.write(modfile)
conffile = modfile
relpath = os.path.relpath(conffile,xsltdir).replace(os.sep,"/")
params['configurationfile'] = XSLT.strparam(relpath)
# params['configurationfile_plain'] = relpath
# params['configurationfile'] = relpath
return params
_transform_resourcedir=None
def setup_transform_templates(self, xsltdir):
"""Unpack/extract all XSLT files and other resources needed to
for the XSLT transform, if needed (which is the case if
ferenda is distributed as an egg, i.e. all files are contained
in a zip file).
:param xsltdir: path to the directory where the root xslt file is stored
:type xsltdir: str
:returns: The path to extracted files
:rtype: str
"""
# Unpack/extract all the files (NB: if not using zip/eggs just
# return existing filesystem path)
if not self._transform_resourcedir:
for f in pkg_resources.resource_listdir('ferenda',xsltdir):
p = pkg_resources.resource_filename('ferenda', xsltdir+"/"+f)
# print("extracted %s/%s to %s" % (xsltdir,f,p))
self._transform_resourcedir = os.path.dirname(p)
return self._transform_resourcedir
def prep_annotation_file(self, basefile):
"""Helper function used by :py:meth:`generate` -- prepares a RDF/XML file
containing statements that in some way annotates the
information found in the document that generate handles, like
URI/title of other documents that refers to this one.
.. note::
This does not yet have a generic implementation.
:param basefile: The basefile for which to collect annotating
statements.
:type basefile: str
:returns: The full path to the prepared RDF/XML file
:rtype: str
"""
graph = self.construct_annotations(self.canonical_uri(basefile))
if graph:
with self.store.open_annotation(basefile,"w") as fp:
fp.write(self.graph_to_annotation_file(graph))
return self.store.annotation_path(basefile)
def construct_annotations(self, uri):
query_template = self.sparql_annotations
# if self.config.storetype in ("SLEEPYCAT", "SQLITE"):
# query_template = "%s.sparql10%s" % os.path.splitext(query_template)
if os.path.exists(query_template):
fp = open(query_template)
elif pkg_resources.resource_exists('ferenda',query_template):
fp = pkg_resources.resource_stream('ferenda',query_template)
else:
raise ValueError("query template %s not found" % query_template)
params = {'uri': uri}
sq = fp.read().decode('utf-8') % params
fp.close()
if self.config.storelocation:
store = TripleStore(self.config.storelocation,
self.config.storerepository,
None, # self.context(),
self.config.storetype)
res = store.construct(sq)
if self.config.storetype in ("SLEEPYCAT", "SQLITE"):
store.graph.close()
return res
# helper for the prep_annotation_file helper -- it expects a
# RDFLib graph, and returns a XML string in Grit format
def graph_to_annotation_file(self, graph):
"""Converts a RDFLib graph into a XML file with the same
statements, ordered using the Grit format
(https://code.google.com/p/oort/wiki/Grit) for easier XSLT
inclusion.
:param graph: The graph to convert
:type graph: rdflib.Graph
:returns: A serialized XML document with the RDF statements
:rtype: str
"""
fp = BytesIO(graph.serialize(format="xml"))
intree = etree.parse(fp)
stylesheet = "res/xsl/rdfxml-grit.xsl"
if os.path.exists(stylesheet):
fp = open(stylesheet)
elif pkg_resources.resource_exists('ferenda',stylesheet): # prefix stylesheet with 'res/xsl'?
fp = pkg_resources.resource_stream('ferenda',stylesheet)
else:
raise ValueError("Stylesheet %s not found" % stylesheet)
transform = etree.XSLT(etree.parse(fp))
resulttree = transform(intree)
res = etree.tostring(resulttree,pretty_print=format)
return res.decode('utf-8')
def generated_url(self, basefile):
"""Get the full local url for the generated file for the
given basefile.
:param basefile: The basefile for which to calculate the local url
:type basefile: str
:returns: The local url
:rtype: str
"""
return self.generic_url(basefile, 'generated', '.html')
################################################################
#
# STEP 5: Generate HTML pages for a TOC of a all documents, news
# pages of new/updated documents, and other odds'n ends.
#
################################################################
# toc
# toc_select
# toc_query
# (toc_predicates ?)
# toc_criteria
# toc_predicates
# toc_selector
# toc_pagesets
# (selectors)
# toc_select_for_pages <-- where most of the the magic happens
# (selectors)
# toc_item
# toc_generate_pages
# toc_generate_page
#
def toc(self):
"""Creates a set of pages that together acts as a table of
contents for all documents in the repository. For smaller
repositories a single page might be enough, but for
repositoriees with a few hundred documents or more, there will
usually be one page for all documents starting with A,
starting with B, and so on. There might be different ways of
browseing/drilling down, i.e. both by title, publication year,
keyword and so on.
The default implementation calls
:py:meth:`~ferenda.DocumentRepository.toc_select` to get all
data from the triple store,
:py:meth:`~ferenda.DocumentRepository.toc_criteria` to find
out the criteria for ordering,
:py:meth:`~ferenda.DocumentRepository.toc_pagesets` to
calculate the total set of TOC html files,
:py:meth:`~ferenda.DocumentRepository.toc_select_for_pages` to
create a list of documents for each TOC html file, and finally
:py:meth:`~ferenda.DocumentRepository.toc_generate_pages` to
create the HTML files. The default implemention assumes that
documents have a title (in the form of a ``dct:title``
property) and a publication date (in the form of a
``dct:issued`` property).
You can override any of these methods to customize any part of
the toc generation process. Often overriding :py:meth:`~ferenda.DocumentRepository.toc_criteria` to
specify other document properties will be sufficient."""
data = self.toc_select(self.context())
criteria = self.toc_criteria(self.toc_predicates())
pagesets = self.toc_pagesets(data,criteria)
pagecontent = self.toc_select_for_pages(data, pagesets, criteria)
self.toc_generate_pages(pagecontent,pagesets)
self.toc_generate_first_page(pagecontent,pagesets)
def toc_select(self,context=None):
"""Select all data from the triple store needed to make up all
TOC pages.
:param context: The context (named graph) to restrict the query to.
If None, search entire triplestore.
:type context: str
:returns: The results of the query, as python objects
:rtype: set of dicts"""
store = TripleStore(self.config.storelocation,
self.config.storerepository,
None, # self.context(),
self.config.storetype)
if self.config.storetype in ('SQLITE','SLEEPYCAT'):
store.context = context
sq = self.toc_query(None)
else:
sq = self.toc_query(context)
self.log.debug("toc: querying:\n%s" % sq)
res = store.select(sq, "python")
store.close()
return res
def toc_query(self,context=None):
"""Constructs a SPARQL SELECT query that fetches all
information needed to construct the complete set of TOC pages
in the form of a single list of result rows.
Override this method if you need to customize the query.
:param context: The context (named graph) to which to limit
the query. If None, query the entire
triplestore.
:type context: str
:returns: The SPARQL query
:rtype: str
Example:
>>> d = DocumentRepository()
>>> expected = 'PREFIX dct:<http://purl.org/dc/terms/> SELECT DISTINCT ?uri ?title ?issued FROM <http://example.org/ctx/base> WHERE {?uri dct:title ?title . ?uri dct:issued ?issued . }'
>>> d.toc_query("http://example.org/ctx/base") == expected
True
"""
# FIXME: create query from self.toc_criteria
from_graph = ""
if context:
from_graph = "FROM <%s>" % context
# FIXME: load from res/sparql/toc.sq instead
return """PREFIX dct:<http://purl.org/dc/terms/> SELECT DISTINCT ?uri ?title ?issued %s WHERE {?uri dct:title ?title . ?uri dct:issued ?issued . }""" % from_graph
def toc_criteria(self, predicates=None):
"""Create the criteria used to organize the documents in the
repository into different pagesets.
:param predicates: The :py:class:`~rdflib.term.URIRef` terms to use as base for criteria
:type predicates: list
:returns: :py:class:`~ferenda.sources.documentsource.TocCriteria`
objects, each representing a particular way of organizing the
documents, and each corresponding to a TocPageset object (constructed
by :py:meth:`~ferenda.sources.DocumentRepository.toc_pagesets`)
:rtype: list
"""
criteria = []
for predicate in predicates:
# make an appropriate selector etc. a proper implementation
# would look at the ontology of the predicate, take a look
# at the range for that DataProperty and select an
# appropriate selector.
if predicate == self.ns['dct']['issued']: # date property
selector = lambda x: x['issued'][:4]
key = selector
label = 'Sorted by publication year'
pagetitle = 'Documents published in %s'
else:
# selector and key for proper title sort
# (eg. disregarding leading "the", not counting
# spaces) -- really stretching the limit on what can
# be comfortably done with lambdas...
selector = lambda x: x['title'][4].lower() if x['title'].lower().startswith("the ") else x['title'][0].lower()
key = lambda x: "".join((x['title'][4:] if x['title'].lower().startswith("the ") else x['title']).lower().split())
label = label='Sorted by ' + util.uri_leaf(predicate)
pagetitle = 'Documents starting with "%s"'
criteria.append(TocCriteria(binding=util.uri_leaf(predicate).lower(),
label=label,
pagetitle=pagetitle,
selector=selector,
key=key))
return criteria
def toc_predicates(self):
"""Return a list of predicates (as
:py:class:`~rdflib.term.URIRef` objects that each should be
used to organize a table of contents of documents in this
docrepo).
Is used by toc_criteria, must match results from sparql query
in toc_query."""
return [self.ns['dct']['title'], self.ns['dct']['issued']]
def toc_pagesets(self, data, criteria):
"""Calculate the set of needed TOC pages based on the result rows
:param data: list of dicts, each dict containing metadata about a single document
:param criteria: list of TocCriteria objects
:returns: The link text, page title and base file for each needed
TOC page, structured by selection criteria.
:rtype: 3-dimensional named tuple
Example:
>>> d = DocumentRepository()
>>> rows = [{'uri':'http://ex.org/1','title':'Abc','issued':'2009-04-02'},
... {'uri':'http://ex.org/2','title':'Abcd','issued':'2010-06-30'},
... {'uri':'http://ex.org/3','title':'Dfg','issued':'2010-08-01'}]
>>> from operator import itemgetter
>>> criteria = (TocCriteria(binding='title',
... label='By title',
... pagetitle='Documents starting with "%s"',
... selector=lambda x: x['title'][0].lower(),
... key=itemgetter('title')),
... TocCriteria(binding='issued',
... label='By publication year',
... pagetitle='Documents published in %s',
... selector=lambda x: x['issued'][:4],
... key=itemgetter('issued')))
>>> # Note: you can get a suitable tuple of TocCriteria
>>> # objects by calling toc_criteria() as well
>>> pagesets=d.toc_pagesets(rows,criteria)
>>> pagesets[0].label == 'By title'
True
>>> pagesets[0].pages[0] == TocPage(linktext='a', title='Documents starting with "a"', basefile='title/a')
True
>>> pagesets[0].pages[0].linktext == 'a'
True
>>> pagesets[0].pages[0].title == 'Documents starting with "a"'
True
>>> pagesets[0].pages[0].basefile == 'title/a'
True
>>> pagesets[1].label == 'By publication year'
True
>>> pagesets[1].pages[0] == TocPage(linktext='2009', title='Documents published in 2009', basefile='issued/2009')
True
"""
res = []
for criterion in criteria:
pageset = TocPageset(label=criterion.label,pages=[])
selector_values = {}
selector = criterion.selector
binding = criterion.binding
for row in data:
selector_values[selector(row)] = True
for value in sorted(list(selector_values.keys())):
pageset.pages.append(TocPage(linktext=value,
title=criterion.pagetitle % value,
basefile=binding + "/" + value))
res.append(pageset)
return res
def toc_select_for_pages(self, data, pagesets, criteria):
"""Go through all data rows (each row representing a document)
and, for each toc page, select those documents that are to
appear in a particular page.
Example:
>>> d = DocumentRepository()
>>> rows = [{'uri':'http://ex.org/1','title':'Abc','issued':'2009-04-02'},
... {'uri':'http://ex.org/2','title':'Abcd','issued':'2010-06-30'},
... {'uri':'http://ex.org/3','title':'Dfg','issued':'2010-08-01'}]
>>> from rdflib import Namespace
>>> dct = Namespace("http://purl.org/dc/terms/")
>>> criteria = d.toc_criteria([dct.title,dct.issued])
>>> pagesets=d.toc_pagesets(rows,criteria)
>>> expected={'title/a':[[Link('Abc',uri='http://ex.org/1')],
... [Link('Abcd',uri='http://ex.org/2')]],
... 'title/d':[[Link('Dfg',uri='http://ex.org/3')]],
... 'issued/2009':[[Link('Abc',uri='http://ex.org/1')]],
... 'issued/2010':[[Link('Abcd',uri='http://ex.org/2')],
... [Link('Dfg',uri='http://ex.org/3')]]}
>>> d.toc_select_for_pages(rows, pagesets, criteria) == expected
True
:param data: x
:param pagesets: y
:param criteria: z
:returns: mapping between toc basefile and documentlist for that basefile
:rtype: dict
"""
# to 1-dimensional dict (odict?): {basefile: [list-of-Elements]}
res = {}
for pageset, criterion in zip(pagesets,criteria):
documents = defaultdict(list)
for row in data:
key = criterion.selector(row)
# documents[key].append(self.toc_item(criterion.binding,row))
documents[key].append(row)
for key in documents.keys():
# find appropriate page in pageset and read it's basefile
for page in pageset.pages:
if page.linktext == key:
s = sorted(documents[key],
key=criterion.key)
res[page.basefile] = [self.toc_item(criterion.binding, row) for row in s]
return res
def toc_item(self, binding, row):
"""Returns a formatted version of row, using Element objects"""
# default impl always just a simple link with title as link text
return [Link(row['title'], # yes, ignore binding
uri=row['uri'])]
# pagecontent -> documentlists?
def toc_generate_pages(self, pagecontent, pagesets):
paths = []
for basefile,documents in pagecontent.items():
paths.append(self.toc_generate_page(basefile, documents, pagesets))
return paths
def toc_generate_first_page(self, pagecontent, pagesets):
(basefile, documents) = sorted(pagecontent.items(), key=itemgetter(0))[0]
return self.toc_generate_page(basefile, documents, pagesets, "index")
def toc_generate_page(self, basefile, documentlist, pagesets, effective_basefile=None):
if effective_basefile == None:
effective_basefile = basefile
outfile = self.store.path(effective_basefile, 'toc', '.html')
tmpfile = self.store.path(effective_basefile, 'toc', '.xhtml')
doc = self.make_document(basefile)
doc.uri = self.context()+"/"+basefile
d = Describer(doc.meta,doc.uri)
nav = UnorderedList(role='navigation')
for pageset in pagesets:
sublist = UnorderedList()
for page in pageset.pages:
if page.basefile == basefile:
title = page.title
sublist.append(ListItem([page.linktext]))
else:
# FIXME: less hardcoded strategy plz
path = self.store.path(page.basefile, 'toc','.html')
href = os.path.relpath(path,os.path.dirname(outfile)).replace(os.sep,"/")
sublist.append(ListItem([Link(str(page.linktext),href=href)]))
nav.append(ListItem([Paragraph(pageset.label),sublist]))
d.value(self.ns['dct'].title, title)
# Consider other strategies; definition lists with
# subheadings, orderedlists, tables...
ul = UnorderedList([ListItem(x) for x in documentlist],role='main')
doc.body = Body([nav,
Heading([title]),
ul
])
self.log.debug("Rendering XHTML to %s" % tmpfile)
self.render_xhtml(doc, tmpfile)
if not util.outfile_is_newer([tmpfile],outfile):
# Prepare a browser-ready HTML page using generic.xsl
self.log.debug("Transforming HTML to %s" % outfile)
# configure params
xsltfile = "res/xsl/toc.xsl"
xsltdir = self.setup_transform_templates(os.path.dirname(xsltfile))
params = self.get_transform_configuration(xsltdir,outfile)
self.transform_html("res/xsl/toc.xsl",
tmpfile, outfile, params)
self.log.info("Created %s" % outfile)
return outfile
# if we didn't actually create an outfile:
return outfile
def news(self):
criteria = self.news_criteria()
data = self.news_entries() # Generator of DocumentEntry objects
for entry in data:
for criterion in criteria:
if criterion.selector(entry):
criterion.entries.append(entry)
for criterion in criteria:
# should reverse=True be configurable? For datetime
# properties it makes sense to use most recent first, but
# maybe other cases?
entries = sorted(criterion.entries, key=criterion.key, reverse=True)
self.log.info("feed %s: %s entries" % (criterion.basefile, len(entries)))
self.news_write_atom(entries,
criterion.feedtitle,
criterion.basefile)
outfile = self.store.path(criterion.basefile, 'feed', '.html')
xsltdir = self.setup_transform_templates(os.path.dirname("res/xsl/atom.xsl"))
params = self.get_transform_configuration(xsltdir,outfile)
self.transform_html("res/xsl/atom.xsl",
self.store.atom_path(criterion.basefile),
outfile,
params)
def news_criteria(self):
"""Returns a list of NewsCriteria objects."""
return [NewsCriteria('main','New and updated documents')]
def news_entries(self):
republish_original = False
# If we just republish eg. the original PDF file and don't
# attempt to parse/enrich the document
directory = os.path.sep.join((self.config.datadir, self.alias, "entries"))
for basefile in self.list_basefiles_for("news"):
path = self.store.documententry_path(basefile)
entry = DocumentEntry(path)
if not entry.published:
# not published -> shouldn't be in feed
continue
if not entry.id:
entry.id = self.canonical_uri(basefile)
if not entry.url:
entry.url = self.generated_url(basefile)
if not os.path.exists(self.store.distilled_path(basefile)):
self.log.warn("%s: No distilled file at %s, skipping" % (basefile,
self.store.distilled_path(basefile)))
continue
g = Graph()
g.parse(self.store.distilled_path(basefile))
desc = Describer(g,entry.id)
dct = self.ns['dct']
if not entry.title:
try:
entry.title = desc.getvalue(dct.title)
except KeyError: # no dct:title -- not so good
self.log.warn("%s: No title available" % basefile)
entry.title = entry.id
try:
entry.summary = desc.getvalue(dct.abstract)
except KeyError: # no dct:abstract -- that's OK
pass
# 4: Set links to RDF metadata and document content
entry.set_link(self.store.distilled_path(basefile),
self.distilled_url(basefile))
if (republish_original):
entry.set_content(self.store.downloaded_path(basefile),
self.downloaded_url(basefile))
else:
# the parsed (machine reprocessable) version. The
# browser-ready version is referenced with the <link>
# element, separate from the set_link <link>
entry.set_content(self.store.parsed_path(basefile),
self.parsed_url(basefile))
yield entry
def news_write_atom(self, entries, title, basefile, archivesize=1000):
"""Given a list of Atom entry-like objects, including links to RDF
and PDF files (if applicable), create a rinfo-compatible Atom feed,
optionally splitting into archives."""
# This nested func does most of heavy lifting, the main
# function code only sets up basic constants and splits the
# entries list into appropriate chunks
def write_file(entries,suffix="",prevarchive=None, nextarchive=None):
# print("Called w suffix=%s, prevarchive=%s, nextarchive=%s" % (suffix, prevarchive, nextarchive))
feedfile = self.store.path(basefile+suffix, 'feed', '.atom')
nsmap = {None:'http://www.w3.org/2005/Atom',
'le':'http://purl.org/atompub/link-extensions/1.0'}
E = ElementMaker(nsmap=nsmap)
updated = max(entries,key=attrgetter('updated')).updated
contents = [E.id(feedid),
E.title(title),
E.updated(util.rfc_3339_timestamp(updated)),
E.author(
E.name("Ferenda"),
E.email("info@example.org"),
E.uri(self.config.url)
),
E.link({'rel':'self', 'href':feedurl})]
if prevarchive:
contents.append(E.link({'rel':'prev-archive',
'href':prevarchive}))
if nextarchive:
contents.append(E.link({'rel':'next-archive',
'href':nextarchive}))
for entry in entries:
entrynodes=[E.title(entry.title),
E.summary(str(entry.summary)),
E.id(entry.id),
E.published(util.rfc_3339_timestamp(entry.published)),
E.updated(util.rfc_3339_timestamp(entry.updated)),
E.link({'href':util.relurl(entry.url, feedurl)})]
if entry.link:
node = E.link({'rel':'alternate',
'href':util.relurl(entry.link['href'],
feedurl),
'type':entry.link['type'],
'length':str(entry.link['length']),
'hash':entry.link['hash']})
entrynodes.append(node)
if entry.content and entry.content['markup']:
node = E.content({'type':'xhtml',
'href':util.relurl(entry.content['href'],
feedurl),
'type':entry.content['type'],
'length':entry.content['length'],
'hash':entry.content['hash']},
etree.XML(entry.content['markup']))
entrynodes.append(node)
if entry.content and entry.content['src']:
node = E.content({'src':util.relurl(entry.content['src'],
feedurl),
'type':entry.content['type'],
'hash':entry.content['hash']})
entrynodes.append(node)
contents.append(E.entry(*list(entrynodes)))
feed = E.feed(*contents)
res = etree.tostring(feed,
pretty_print=True,
xml_declaration=True,
encoding='utf-8')
fileno, tmpfile = mkstemp()
fp = os.fdopen(fileno)
fp.close()
# tmpfile = mkstemp()[1]
with open(tmpfile, "wb") as fp:
fp.write(res)
util.replace_if_different(tmpfile, feedfile)
return feedfile
assert isinstance(entries,list), 'entries should be a list, not %s' % type(entries)
feedurl = self.generic_url(basefile, 'feed', '.atom')
# not sure abt this - should be uri of dataset?
feedid = feedurl
# assume entries are sorted newest first
# could be simplified with more_itertools.chunked?
cnt = 0
res = []
# print("chunking...")
while len(entries) >= archivesize*2:
cnt += 1
archiveentries = entries[-archivesize:]
entries[:] = entries[:-archivesize]
if cnt > 1:
prev = "%s-archive-%s.atom" % (basefile,cnt-1)
else:
prev = None
if len(entries) < archivesize*2:
next = "%s.atom" % basefile
else:
next = "%s-archive-%s.atom" % (basefile,cnt+1)
suffix = suffix='-archive-%s'%cnt
res.append(write_file(archiveentries,suffix=suffix,
prevarchive=prev,
nextarchive=next))
res.insert(0,write_file(entries,
prevarchive="%s-archive-%s.atom" % (basefile, cnt)))
return res
def frontpage_content(self, primary=False):
"""If the module wants to provide any particular content on
the frontpage, it can do so by returning a XHTML fragment (in
text form) here. If primary is true, the caller wants the
module to take primary responsibility for the frontpage
content. If primary is false, the caller only expects a
smaller amount of content (like a smaller presentation of the
repository and the document it contains)."""
g = self.make_graph()
qname = g.qname(self.rdf_type)
return ("<h2>Module %s</h2><p>Handles %s documents. "
"Contains %s published documents.</p>"
% (self.alias, qname,
len(list(self.list_basefiles_for("_postgenerate")))))
# @manager.action
def status(self, basefile=None, samplesize=3):
print("Status for document repository '%s' (%s)" % (self.alias, getattr(self.config,'class')))
s = self.get_status()
for step in s.keys(): # odict
exists = s[step]['exists']
todo = s[step]['todo']
exists_sample = ", ".join(exists[:samplesize])
exists_more = len(exists) - samplesize
todo_sample = ", ".join(todo[:samplesize])
todo_more = len(todo) - samplesize
if not exists_sample:
exists_sample = "None"
if exists_more > 0:
exists_more_label = ".. (%s more)" % exists_more
else:
exists_more_label = ""
if todo_more > 0:
todo_more_label = ".. (%s more)" % todo_more
else:
todo_more_label = ""
if step == 'download':
print(" download: %s.%s" % (exists_sample, exists_more_label))
else:
if todo_sample:
print(" %s: %s.%s Todo: %s.%s" % (step, exists_sample, exists_more_label,
todo_sample, todo_more_label))
else:
print(" %s: %s.%s" % (step, exists_sample, exists_more_label))
# alias and classname
# $ ./ferenda-build.py w3c status
# Status for document repository 'w3c' (w3cstandards.W3Cstandards)
# downloaded: rdb-direct-mapping r2rml ... (141 more)
# parsed: None (143 needs parsing)
# generated: None (143 needs generating)
def get_status(self):
# for step in ('download', 'parse', 'generate')
# basefiles[step] = list_basefiles_for(step)
# pathfunc = downloaded_path|parsed_path|generated_path
# physicals[step] = [pathfunc(x) for x in basefiles[step]]
# compare physical['parse'][idx] with physical['downloaded'][idx]
# if older or nonexistent:
# todo[step].append()
status = OrderedDict()
# download
exists = []
todo = []
for basefile in self.list_basefiles_for("parse"):
exists.append(basefile)
# no point in trying to append
status['download'] = {'exists':exists,
'todo':todo}
# parse
exists = []
todo = []
for basefile in self.list_basefiles_for("parse"):
dependency = self.store.downloaded_path(basefile)
target = self.store.parsed_path(basefile)
if os.path.exists(target):
exists.append(basefile)
# Note: duplication of (part of) parseifneeded logic
if not util.outfile_is_newer([dependency],target):
todo.append(basefile)
status['parse'] = {'exists':exists,
'todo':todo}
# generated
exists = []
todo = []
for basefile in self.list_basefiles_for("generate"):
dependency = self.store.parsed_path(basefile)
target = self.store.generated_path(basefile)
if os.path.exists(target):
exists.append(basefile)
# Note: duplication (see above)
if not util.outfile_is_newer([dependency],target):
todo.append(basefile)
status['generated'] = {'exists':exists,
'todo':todo}
return status
def tabs(self):
"""Get the navigation menu segment(s) provided by this docrepo
Returns a list of tuples, where each tuple will be rendered
as a tab in the main UI. First element of the tuple is the
link text, and the second is the link destination. Normally, a
module will only return a single tab.
:returns: List of tuples
"""
if self.rdf_type == self.ns['foaf'].Document:
return [(self.alias, "/" + self.alias + "/")]
else:
return [(util.uri_leaf(str(self.rdf_type)), "/" + self.alias + "/")]
# FIXME: This is conceptually similar to basefile_from_uri (given
# either a URI or a PATH_INFO, find out if self handles the
# document/resource pointed out by same), we should perhaps unify
# them somehow?
def http_handle(self, environ):
if environ['PATH_INFO'].count("/") > 2:
null, res, alias, basefile = environ['PATH_INFO'].split("/", 3)
if (alias == self.alias):
# we SHOULD be able to handle this -- maybe provide
# apologetic message about this if we can't?
genpath = self.store.generated_path(basefile)
if os.path.exists(genpath):
return (open(genpath, 'rb'),
os.path.getsize(genpath),
"text/html")
return (None, None, None)
@staticmethod
def _setup_logger(logname):
log = logging.getLogger(logname)
if log.handlers == []:
if hasattr(logging,'NullHandler'):
log.addHandler(logging.NullHandler())
else:
# py26 compatibility
class NullHandler(logging.Handler):
def emit(self, record):
pass
log.addHandler(NullHandler())
return log
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
import logging
from rdflib import Graph
from ferenda import TextReader, TripleStore
from ferenda.elements import serialize
class Devel(object):
"""This module acts as a docrepo (and as such is easily callable
from ``ferenda-manager.py``), but contains various tool commands
that is useful for developing and debugging your own docrepo classes."""
alias = "devel"
# Don't document this -- just needed for ferenda.manager compatibility
def get_default_options():
return {}
def dumprdf(self, filename, format="turtle"):
"""Extract all RDF data from a parsed file and dump it to
stdout.
:param filename: Full path of the parsed XHTML+RDFa file.
:type filename: string
:param format: The serialization format for RDF data (same as for :py:meth:`rdflib.graph.Graph.serialize`)
:type format: string
"""
g = Graph()
g.parse(filename, format="rdfa")
# At least the turtle serializer creates UTF-8 data. Fix this!
print((g.serialize(None, format=format).decode("utf-8")))
def dumpstore(self,format="turtle"):
"""Extract all RDF data from the system triplestore and dump
it to stdout using the specified format.
:param format: The serialization format for RDF data (same as
for :py:meth:`ferenda.TripleStore.get_serialized`).
:type format: string
"""
# print("Creating store of type %s, location %s, repository %s" %
# (self.config.storetype, self.config.storelocation, self.config.storerepository))
store = TripleStore(self.config.storelocation,
self.config.storerepository,
None,
self.config.storetype)
print(store.get_serialized(format=format).decode('utf-8'))
def testlog(self):
"""Logs a series of messages at various levels, to test that
your client code logging configuration behaves as
expectedly."""
log = logging.getLogger(__name__)
log.critical('Log message at CRITICAL level')
log.error('Log message at ERROR level')
log.warn('Log message at WARN level')
log.info('Log message at INFO level')
log.debug('Log message at DEBUG level')
sub = logging.getLogger(__name__+'.sublogger')
sub.critical('Sublog message at CRITICAL level')
sub.error('Sublog message at ERROR level')
sub.warn('Sublog message at WARN level')
sub.info('Sublog message at INFO level')
sub.debug('Sublog message at DEBUG level')
def mkpatch(self, alias, basefile):
"""Create a patch file from intermediate files.
:param alias: Docrepo alias
:type alias: string
:param basefile: The basefile for the document to patch
:type basefile: string
.. note::
This is currently broken.
"""
coding = 'utf-8' if sys.stdin.encoding == 'UTF-8' else 'iso-8859-1'
myargs = [arg.decode(coding) for arg in sys.argv]
# ask for description and place it alongside
# copy the modified file to a safe place
file_to_patch = myargs[1].replace("\\", "/") # normalize
tmpfile = mktemp()
copy2(file_to_patch, tmpfile)
# Run SFSParser._extractSFST() (and place the file in the correct location)
# or DVParser.word_to_docbook()
if "/sfs/intermediate/" in file_to_patch:
source = "sfs"
basefile = file_to_patch.split("/sfs/intermediate/")[1]
import SFS
p = SFS.SFSParser()
sourcefile = file_to_patch.replace("/intermediate/", "/downloaded/sfst/").replace(".txt", ".html")
print(("source %s, basefile %s, sourcefile %s" % (
source, basefile, sourcefile)))
plaintext = p._extractSFST([sourcefile])
f = codecs.open(file_to_patch, "w", 'iso-8859-1')
f.write(plaintext + "\n")
f.close()
print(("Wrote %s bytes to %s" % (len(plaintext), file_to_patch)))
elif "/dv/intermediate/docbook/" in file_to_patch:
source = "dv"
basefile = file_to_patch.split("/dv/intermediate/docbook/")[1]
import DV
p = DV.DVParser()
sourcefile = file_to_patch.replace(
"/docbook/", "/word/").replace(".xml", ".doc")
print(("source %r, basefile %r, sourcefile %r" % (
source, basefile, sourcefile)))
os.remove(file_to_patch)
p.word_to_docbook(sourcefile, file_to_patch)
elif "/dv/intermediate/ooxml/" in file_to_patch:
source = "dv"
basefile = file_to_patch.split("/dv/intermediate/ooxml/")[1]
import DV
p = DV.DVParser()
sourcefile = file_to_patch.replace(
"/ooxml/", "/word/").replace(".xml", ".docx")
print(("source %r, basefile %r, sourcefile %r" % (
source, basefile, sourcefile)))
os.remove(file_to_patch)
p.word_to_ooxml(sourcefile, file_to_patch)
# calculate place in patch tree
patchfile = "patches/%s/%s.patch" % (
source, os.path.splitext(basefile)[0])
util.ensure_dir(patchfile)
# run diff on the original and the modified file, placing the patch right in the patch tree
cmd = "diff -u %s %s > %s" % (file_to_patch, tmpfile, patchfile)
print(("Running %r" % cmd))
(ret, stdout, stderr) = util.runcmd(cmd)
if os.stat(patchfile).st_size == 0:
print("FAIL: Patchfile is empty")
os.remove(patchfile)
else:
if sys.platform == "win32":
os.system("unix2dos %s" % patchfile)
print(("Created patch file %r" % patchfile))
print("Please give a description of the patch")
patchdesc = sys.stdin.readline().decode('cp850')
fp = codecs.open(
patchfile.replace(".patch", ".desc"), "w", 'utf-8')
fp.write(patchdesc)
fp.close()
def parsestring(self, string, citationpattern, uriformatter=None):
"""Parse a string using a named citationpattern and print
parse tree and optionally formatted uri(s) on stdout.
:param string: The text to parse
:type string: string
:param citationpattern: The fully qualified name of a citationpattern
:type citationpattern: string
:param uriformatter: The fully qualified name of a uriformatter
:type uriformatter: string
.. note::
This is not implemented yet
"""
raise NotImplementedError
def fsmparse(self, functionname, source):
"""Parse a list of text chunks using a named fsm parser and
output the parse tree and final result to stdout.
:param functionname: A function that returns a configured
:py:class:`~ferenda.FSMParser`
:type functionname: string
:param source: A file containing the text chunks, separated
by double newlines
:type source: string
.. note::
The ``functionname`` parameter currently has no effect
(``ferenda.sources.tech.rfc.RFC.get_parser()`` is always
used)
"""
# fixme: do magic import() dance
print("parsefunc %s (really ferenda.sources.tech.rfc.RFC.get_parser()), source %s)" % (functionname,source))
import ferenda.sources.tech.rfc
parser = ferenda.sources.tech.rfc.RFC.get_parser()
parser.debug = True
tr=TextReader(source)
b = parser.parse(tr.getiterator(tr.readparagraph))
# print("=========
print(serialize(b))
def queryindex(self, string):
# open index
# query string
# print results
index = FulltextIndex(self.config.indexlocation)
rows = index.query(string)
for row in rows:
print("%s (%s): %s" % (row['identifier'], row['about']))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rdflib import Graph
# Don't create instances of Document directly. Create them through
# DocumentRepository.make_document in order to properly initialize the
# .meta RDFLib Graph object.
class Document(object):
def __init__(self):
self.meta = Graph()
self.body = []
self.uri = None
self.lang = None
self.basefile = None
| Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
import os,sys
# seems this code is run for example when pip install runs? In that
# case, these files don't exist.
if os.path.exists("README.txt"):
with open("README.txt") as fp:
longdesc = fp.read()
else:
longdesc = ""
if os.path.exists("LICENSE.txt"):
with open("LICENSE.txt") as fp:
license = fp.read()
else:
license = ""
# FIXME: We'd like to install rdflib-sqlalchemy and (the py3
# compatible branch of) html5lib, but these aren't available from
# pypi, only from git repos (see requirements.py3.txt). Is this
# possible?
install_requires = ['beautifulsoup4 >= 4.2.0',
'jsmin >= 2.0.2',
'lxml >= 3.2.0',
'rdflib >= 4.0.1',
'html5lib >= 1.0b1',
'rdfextras >= 0.4',
'requests >= 1.2.0',
'Whoosh >= 2.4.1',
'six >= 1.2.0']
if sys.version_info < (3,0,0):
install_requires.append('pyparsing==1.5.7')
# not py3 compatible, but not essential either
install_requires.append('SimpleParse >= 2.1.1')
else:
# lastest version 2.0.0 is not py2 compatible
install_requires.append('pyparsing')
if sys.version_info < (2,7,0):
install_requires.append('ordereddict >= 1.1')
tests_require = []
if sys.version_info < (3,3,0):
tests_require.append('mock >= 1.0.0')
if sys.version_info < (2,7,0):
tests_require.append('unittest2 >= 0.5.1')
# to get version
import ferenda
setup(name='ferenda',
version=ferenda.__version__,
description='Transform unstructured document collections to structured Linked Data',
long_description=longdesc,
author='Staffan Malmgren',
author_email='staffan.malmgren@gmail.com',
url='http://lagen.nu/ferenda/',
license=license,
install_requires=install_requires,
tests_require=tests_require,
entry_points = {
'console_scripts':['ferenda-setup = ferenda.manager:setup']
},
packages=find_packages(exclude=('test', 'docs')),
# package_dir = {'ferenda':'ferenda'},
# package_data = {'ferenda':['res/css/*.css', 'res/js/*.js', 'res/xsl/*.xsl']},
include_package_data = True,
zip_safe = False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Text Processing',
'Topic :: Text Processing :: Markup :: XML'
]
)
| Python |
#!/usr/bin/env python
import sys
import os
from ferenda import manager
if len(sys.argv) > 1 and sys.argv[1] == '-preflight':
manager.preflight_check('http://localhost:8080/openrdf-sesame')
else:
if sys.argv[1] == '-force':
sys.argv = sys.argv[1:]
manager.setup(force=True)
else:
manager.setup()
| Python |
from ferenda import elements, FSMParser
def is_section(parser):
chunk = parser.reader.peek()
lines = chunk.split("\n")
return (len(lines) == 2 and
len(lines[0]) == len(lines[1]) and
lines[1] == "=" * len(lines[0]))
def is_preformatted(parser):
chunk = parser.reader.peek()
lines=chunk.split("\n")
not_indented = lambda x: not x.startswith(" ")
return len(list(filter(not_indented,lines))) == 0
def is_paragraph(parser):
return True
def make_body(parser):
b = elements.Body()
return parser.make_children(b)
def make_section(parser):
chunk = parser.reader.next()
title = chunk.split("\n")[0]
s = elements.Section(title=title)
return parser.make_children(s)
def make_paragraph(parser):
return elements.Paragraph([parser.reader.next()])
def make_preformatted(parser):
return elements.Preformatted([parser.reader.next()])
transitions = {("body", is_section): (make_section, "section"),
("section", is_paragraph): (make_paragraph, None),
("section", is_preformatted): (make_preformatted, None),
("section", is_section): (False, None)}
text = """First section
=============
This is a regular paragraph. It will not be matched by is_section
(unlike the above chunk) or is_preformatted (unlike the below chunk),
but by the catch-all is_paragraph. The recognizers are run in the
order specified by FSMParser.set_transitions().
This is a preformatted section.
It could be used for source code,
+-------------------+
| line drawings |
+-------------------+
or what have you.
Second section
==============
The above new section implicitly closed the first section which we
were in. This was made explicit by the last transition rule, which
stated that any time a section is encountered while in the "section"
state, we should not create any more children (False) but instead
return to our previous state (which in this case is "body", but for a
more complex language could be any number of states)."""
p = FSMParser()
p.set_recognizers(is_section, is_preformatted, is_paragraph)
p.set_transitions(transitions)
body = p.parse(text.split("\n\n"), "body", make_body)
print(elements.serialize(body))
| Python |
from ferenda.sources.tech import RFC, W3Standards
from ferenda.manager import makeresources, frontpage, runserver, setup_logger
from ferenda.errors import DocumentRemovedError, ParseError, FSMStateError
config = {'datadir':'netstandards/exampledata',
'loglevel':'DEBUG',
'force':False,
'storetype':'SQLITE',
'storelocation':'netstandards/exampledata/netstandards.sqlite',
'storerepository':'netstandards',
'downloadmax': 50 }
setup_logger(level='DEBUG')
# Set up two document repositories
docrepos = RFC(**config), W3Standards(**config)
for docrepo in docrepos:
# Download a bunch of documents
docrepo.download()
# Parse all downloaded documents
for basefile in docrepo.list_basefiles_for("parse"):
try:
docrepo.parse(basefile)
except (DocumentRemovedError, ParseError, FSMStateError):
pass # just go on
# Index the text content and metadata of all parsed documents
for basefile in docrepo.list_basefiles_for("relate"):
docrepo.relate(basefile, docrepos)
# Prepare various assets for web site navigation
makeresources(docrepos,
resourcedir="netstandards/exampledata/rsrc",
sitename="Netstandards",
sitedescription="A repository of internet standard documents")
# Relate for all repos must run before generate for any repo
for docrepo in docrepos:
# Generate static HTML files from the parsed documents,
# with back- and forward links between them, etc.
for basefile in docrepo.list_basefiles_for("generate"):
docrepo.generate(basefile)
# Generate a table of contents of all available documents
docrepo.toc()
# Generate feeds of new and updated documents, in HTML and Atom flavors
docrepo.news()
# Create a frontpage for the entire site
frontpage(docrepos,path="netstandards/exampledata/index.html")
# Start WSGI app at http://localhost:8000/ with navigation,
# document viewing, search and API
runserver(docrepos, port=8000, documentroot="netstandards/exampledata")
| Python |
from ferenda.sources import DocumentRepository
class W3CStandards(DocumentRepository):
alias = "w3c"
start_url = "http://www.w3.org/TR/tr-status-all"
document_url_regex = "http://www.w3.org/TR/(?P<year>\d{4})/REC-(?P<basefile>.*)-(?P<date>\d+)".
parse_content_selector="body"
parse_filter_selectors=["div.toc", "div.head"]
def parse_metadata_from_soup(self, soup, doc):
d = Describer(doc.meta, doc.uri)
d.value(self.predicate("dct:title"),soup.find("title"), lang=doc.lang)
d.value(self.predicate("dct:abstract"),soup.find(class="abstract"), lang=doc.lang)
datestr = soup.find(h2, "W3C Recommendation ")
date = re.search(datestr, "(\d+ w+ \d{4})") # 07 may 2012
d.value(self.predictate("dct:published"), util.rfc_date_to_datetime(date))
for editor in soup.find("dt", text="Editors").find_siblings("dd"):
editor_name = editor.split(", ")[0]
d.value(self.predicate("dct:editor", editor_name))
| Python |
from ferenda.sources import DocumentRepository
class ExampleDocrepo(DocumentRepository):
# Basic way, using RDFLib API
def parse_metadata_from_soup(self,soup,doc):
from rdflib import Namespace, Literal, URIRef, RDF
title = "My Document title" # or find it using the BeautifulSoup object passed
authors = ["Fred Bloggs", "Joe Shmoe"] # ditto
identifier = "Docno 2013:4711"
pubdate = datetime.datetime(2013,1,6,10,8,0) # note that python types can be used
# Set up commonly used namespaces
RDF = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
DCT = Namespace('http://purl.org/dc/terms/')
PROV = Namespace('http://www.w3.org/ns/prov-o/')
# Start setting metadata:
# Mandatory - describe what type of thing this is. self.rdf_type
# defaults to foaf:Document, but can be overridden by your
# subclass
doc.meta.add((URIRef(doc.uri), RDF['type'], self.rdf_type))
# Optional - Make a note on what code generated this data
doc.meta.add((URIRef(doc.uri), PROV['wasGeneratedBy'], Literal(self.qualified_class_name())))
# Everything else is also optional, although dct:title is strongly
# recommended
doc.meta.add((URIRef(doc.uri), DCT['identifier'], Literal(identifier))
# Note that we specify the language of the title.
doc.meta.add((URIRef(doc.uri), DCT['title'], Literal(title, lang=doc.lang))
# Multiple values can be set for a specific metadata property
for author in authors:
doc.meta.add((URIRef(doc.uri), DCT['author'], Literal(author)))
# Simpler way
from rdflib Literal
from ferenda import Describer
def parse_metadata_from_soup(self,soup,doc):
title = "My Document title"
authors = ["Fred Bloggs", "Joe Shmoe"]
identifier = "Docno 2013:4711"
pubdate = datetime.datetime(2013,1,6,10,8,0)
d = Describer(doc.meta, doc.uri)
d.rdftype(self.rdf_type)
d.value(self.ns['prov'].wasGeneratedBy, self.qualified_class_name())
d.value(self.ns['dct'].title, Literal(title, lang=doc.lang))
d.value(self.ns['dct'].identifier, identifier)
for author in authors:
d.value(self.ns['dct'].author, author)
| Python |
from ferenda.sources import DocumentRepository
class RFCs(DocumentRepository):
alias = "rfc"
start_url = "http://www.ietf.org/download/rfc-index.txt"
document_url_regex = "http://tools.ietf.org/rfc/rfc(?P<basefile>).txt"
@recordlastdownload
def download(self):
self.log.debug("download: Start at %s" % self.start_url)
indextext = requests.get(self.start_url).text
reader = TextReader(ustring=indextext) # see TextReader class
for p in reader.getiterator(reader.readparagraph):
if re.match("^(\d{4}) ",p):
if not "Not Issued." in p: # Skip RFC we know don't exist
basefile = str(int(p[:4])) # eg. '0822' -> '822'
self.download_single(basefile)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os,sys
import codecs
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import six
from ferenda import CitationParser
import ferenda.citationpatterns
from ferenda.testutil import file_parametrize
class ParametricBase(unittest.TestCase):
parser = ferenda.citationpatterns.url
def parametric_test(self,filename):
with codecs.open(filename,encoding="utf-8") as fp:
testdata = fp.read()
cp = CitationParser(self.parser)
nodes = cp.parse_string(testdata)
got = []
for node in nodes:
if isinstance(node,six.text_type):
got.append(node.strip())
else:
(text,result) = node
got.append(result.asXML().strip())
wantfile = os.path.splitext(filename)[0] + ".result"
if os.path.exists(wantfile):
with open(wantfile) as fp:
want = [x.strip() for x in fp.read().split("\n\n")]
else:
print("\nparse_string() returns:")
print("\n\n".join(compare))
self.fail("%s not found" % wantfile)
self.maxDiff = 4096
self.assertListEqual(want,got)
class URL(ParametricBase):
parser = ferenda.citationpatterns.url
class EULaw(ParametricBase):
parser = ferenda.citationpatterns.eulaw
if sys.version_info[0:2] == (3,3):
file_parametrize(URL, "test/files/citation/url", ".txt", unittest.expectedFailure)
else:
file_parametrize(URL, "test/files/citation/url", ".txt")
# file_parametrize(URL, "test/files/citation/eulaw", ".txt")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from ferenda import Devel
class Main(unittest.TestCase):
def test_parsestring(self):
d = Devel()
with self.assertRaises(NotImplementedError):
d.parsestring(None,None,None)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals,print_function
import sys, os
if sys.version_info < (2, 7, 0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import codecs
import re
from ferenda.legalref import LegalRef
from ferenda.elements import serialize
from ferenda.testutil import file_parametrize
class TestLegalRef(unittest.TestCase):
def _test_parser(self, testfile, parser):
encoding = 'iso-8859-1'
with codecs.open(testfile,encoding=encoding) as fp:
testdata = fp.read()
parts = re.split('\r?\n\r?\n',testdata,1)
if len(parts) == 1:
want = ''
else:
(testdata, want) = parts
want = want.replace("\r\n", "\n").strip()
# p.currentlynamedlaws = {} # needed?
test_paras = re.split('\r?\n---\r?\n',testdata)
got_paras = []
for para in test_paras:
if para.startswith("RESET:"):
parser.currentlynamedlaws.clear()
if para.startswith("NOBASE:"):
baseuri = None
else:
baseuri = 'http://rinfo.lagrummet.se/publ/sfs/9999:999'
# print("Parsing %r" % para)
nodes = parser.parse(para, baseuri)
got_paras.append(serialize(nodes).strip())
got = "\n---\n".join(got_paras).replace("\r\n","\n").strip()
self.maxDiff = None
self.assertEqual(want, got)
class Lagrum(TestLegalRef):
def parametric_test(self,datafile):
p = LegalRef(LegalRef.LAGRUM)
return self._test_parser(datafile, p)
class KortLagrum(TestLegalRef):
def parametric_test(self,datafile):
p = LegalRef(LegalRef.LAGRUM, LegalRef.KORTLAGRUM)
return self._test_parser(datafile, p)
class Forarbeten(TestLegalRef):
def parametric_test(self,datafile):
p = LegalRef(LegalRef.FORARBETEN)
return self._test_parser(datafile, p)
class Rattsfall(TestLegalRef):
def parametric_test(self,datafile):
p = LegalRef(LegalRef.RATTSFALL)
return self._test_parser(datafile, p)
class EULaw(TestLegalRef):
def parametric_test(self,datafile):
p = LegalRef(LegalRef.EGLAGSTIFTNING)
return self._test_parser(datafile, p)
class EUCaselaw(TestLegalRef):
def parametric_test(self,datafile):
p = LegalRef(LegalRef.EGRATTSFALL)
return self._test_parser(datafile, p)
# Some tests are not simply working right now. Since having testdata
# and wanted result in the same file makes it tricky to mark tests as
# expectedFailure, we'll just list them here.
def make_closure(brokentests):
def broken(testname):
return testname in brokentests
return broken
file_parametrize(Lagrum,"test/files/legalref/SFS",".txt",
make_closure(['sfs-tricky-bokstavslista.txt',
'sfs-tricky-eller.txt',
'sfs-tricky-eller-paragrafer-stycke.txt',
'sfs-tricky-overgangsbestammelse.txt',
'sfs-tricky-uppdelat-lagnamn.txt',
'sfs-tricky-vvfs.txt']))
file_parametrize(KortLagrum, "test/files/legalref/Short",".txt")
file_parametrize(Forarbeten, "test/files/legalref/Regpubl",".txt")
file_parametrize(Rattsfall, "test/files/legalref/DV",".txt")
file_parametrize(EULaw, "test/files/legalref/EGLag",".txt")
file_parametrize(EUCaselaw, "test/files/legalref/ECJ",".txt",
make_closure(['civilservicetrib.txt',
'simple.txt']))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import codecs
import re
from ferenda import FSMParser, TextReader
from ferenda import elements
from ferenda.fsmparser import Peekable
from ferenda.testutil import file_parametrize
class TestPeekable(unittest.TestCase):
def test_peekable(self):
pk = Peekable(range(4))
self.assertEqual(pk.peek(),0)
self.assertEqual(pk.next(),0)
self.assertEqual(pk.peek(),1)
self.assertEqual(pk.next(),1)
self.assertEqual(pk.next(),2)
self.assertEqual(pk.next(),3)
with self.assertRaises(StopIteration):
self.assertEqual(pk.peek())
with self.assertRaises(StopIteration):
self.assertEqual(pk.next())
class Parse(unittest.TestCase):
def parametric_test(self,filename):
# some basic recognizers and constructors to parse a simple
# structured plaintext format.
#
# RECOGNIZERS
def is_header(parser):
suspect = parser.reader.peek()
return (len(suspect) > 100 and not suspect.endswith("."))
def is_section(parser):
(ordinal,title) = analyze_sectionstart(parser.reader.peek())
return section_segments_count(ordinal) == 1
def is_subsection(parser):
(ordinal,title) = analyze_sectionstart(parser.reader.peek())
return section_segments_count(ordinal) == 2
def is_subsubsection(parser):
(ordinal,title) = analyze_sectionstart(parser.reader.peek())
return section_segments_count(ordinal) == 3
def is_preformatted(parser):
return " " in parser.reader.peek()
def is_definition(parser):
return False
def is_description(parser):
return False
def is_li_decimal(parser):
listtype = analyze_listitem(parser.reader.peek())[0]
return listtype in ('decimal','decimal-leading-zero')
def is_li_alpha(parser):
listtype = analyze_listitem(parser.reader.peek())[0]
return listtype in ('lower-alpha','upper-alpha')
def is_li_roman(parser):
listtype = analyze_listitem(parser.reader.peek())[0]
return listtype in ('lower-roman','upper-roman')
def is_unordereditem(parser):
listtype = analyze_listitem(parser.reader.peek())[0]
return listtype in ('disc','circle','square','dash')
def is_state_a(parser):
return parser.reader.peek().startswith("State A:")
def is_state_b(parser):
return parser.reader.peek().startswith("State B:")
def is_state_c(parser):
return parser.reader.peek().startswith("State C:")
def is_paragraph(parser):
return True
# MAGIC
def sublist_or_parent(symbol,state_stack):
constructor = False
newstate = None
if symbol == is_li_alpha and "ol-alpha" not in state_stack: # maybe only check state_stack[-2]
constructor = make_ol_alpha
newstate = "ol-alpha"
elif symbol == is_li_roman and "ol-roman" not in state_stack:
constructor = make_ol_roman
newstate = "ol-roman"
elif symbol == is_li_decimal and "ol-decimal" not in state_stack:
constructor = make_ol_roman
newstate = "ol-roman"
else:
pass
return (constructor,newstate)
# CONSTRUCTORS
def make_body(parser):
parser._debug("Hello")
b = elements.Body()
return parser.make_children(b)
setattr(make_body,'newstate','body')
def make_section(parser):
(secnumber, title) = analyze_sectionstart(parser.reader.next())
s = elements.Section(ordinal=secnumber,title=title)
return parser.make_children(s)
setattr(make_section,'newstate','section')
def make_subsection(parser):
(secnumber, title) = analyze_sectionstart(parser.reader.next())
s = elements.Subsection(ordinal=secnumber,title=title)
return parser.make_children(s)
setattr(make_subsection,'newstate','subsection')
def make_subsubsection(parser):
(secnumber, title) = analyze_sectionstart(parser.reader.next())
s = elements.Subsubsection(ordinal=secnumber,title=title)
return parser.make_children(s)
setattr(make_subsubsection,'newstate','subsubsection')
def make_paragraph(parser):
return elements.Paragraph([parser.reader.next().strip()])
def make_preformatted(parser):
return elements.Preformatted([parser.reader.next()])
# def make_unorderedlist(parser):
# listtype = analyze_listitem(parser.reader.peek())[0]
# assert ordinal is None
# ul = elements.UnorderedList(type=listtype)
# ul.append(parser.make_child(IN_UNORDEREDLIST)) # 1st element of list
# return parser.make_children(ul)
# setattr(make_unorderedlist,'newstate','unorderedlist')
def make_ol_decimal(parser):
return make_orderedlist(parser,"decimal","ol-decimal")
setattr(make_ol_decimal,'newstate','ol-decimal')
def make_ol_alpha(parser):
return make_orderedlist(parser,"lower-alpha", "ol-alpha")
setattr(make_ol_alpha,'newstate','ol-alpha')
def make_ol_roman(parser):
return make_orderedlist(parser,"lower-roman", "ol-roman")
setattr(make_ol_roman,'newstate','ol-romal')
def make_listitem(parser):
chunk = parser.reader.next()
(listtype,ordinal,separator,rest) = analyze_listitem(chunk)
li = elements.ListItem(ordinal=ordinal)
li.append(rest)
return parser.make_children(li)
setattr(make_listitem,'newstate','listitem')
def make_state_a(parser):
return elements.Paragraph([parser.reader.next().strip()],id="state-a")
def make_state_b(parser):
return elements.Paragraph([parser.reader.next().strip()],id="state-b")
def make_state_c(parser):
return elements.Paragraph([parser.reader.next().strip()],id="state-c")
# HELPERS
def section_segments_count(s):
return ((s is not None) and
len(list(filter(None,s.split(".")))))
def make_orderedlist(parser,listtype,childstate):
listtype = analyze_listitem(parser.reader.peek())[0]
ol = elements.OrderedList(type=listtype)
ol.append(parser.make_child(make_listitem,"listitem"))
return parser.make_children(ol)
# matches
# "1 Blahonga"
# "1.2.3. This is a subsubsection"
re_sectionstart = re.compile("^(\d[\.\d]*) +(.*[^\.])$").match
def analyze_sectionstart(chunk):
m = re_sectionstart(chunk)
if m:
return (m.group(1).rstrip("."), m.group(2))
else:
return (None,chunk)
def analyze_listitem(chunk):
# returns: same as list-style-type in CSS2.1, sans 'georgian', 'armenian' and 'greek', plus 'dashed'
listtype = ordinal = separator = rest = None
# match "1. Foo…" or "14) bar…" but not "4 This is a heading"
m = re.match('^(\d+)([\.\)]) +',chunk)
if m:
if chunk.startswith("0"):
listtype="decimal-leading-zero"
else:
listtype="decimal"
(ordinal,separator) = m.groups()
rest = chunk[m.end():]
return (listtype,ordinal,separator,rest)
# match "IX. Foo… or "vii) bar…" but not "vi is a sucky
# editor" or "MMXIII is the current year"
m = re.match('^([IVXivx]+)([\.\)]) +', chunk)
if m:
if chunk[0].islower():
listtype = 'lower-roman'
else:
listtype = 'upper-roman'
(ordinal,separator) = m.groups()
rest = chunk[m.end():]
return (listtype,ordinal,separator,rest)
# match "a. Foo… or "z) bar…" but not "to. Next sentence…"
m = re.match('^([A-Za-z])([\.\)]) +', chunk)
if m:
if chunk[0].islower():
listtype = 'lower-alpha'
else:
listtype = 'upper-alpha'
(ordinal,separator) = m.groups()
rest = chunk[m.end():]
return (listtype,ordinal,separator,rest)
if chunk.startswith("* "):
return ("disc",None,None,chunk)
if chunk.startswith("- "):
return ("dash",None,None,chunk)
return (listtype,ordinal,separator,chunk) # None * 3
# MAIN CODE
p = FSMParser()
p.set_recognizers(is_li_decimal,
is_li_roman,
is_li_alpha,
is_header,
is_section,
is_subsection,
is_subsubsection,
is_preformatted,
is_definition,
is_description,
is_state_a,
is_state_b,
is_state_c,
is_paragraph)
p.set_transitions({("body", is_paragraph): (make_paragraph, None),
("body", is_section): (make_section,"section"),
("body", is_state_a): (make_state_a, "state-a"),
("state-a", is_state_b): (make_state_b, "state-b"),
("state-b", is_state_c): (make_state_c, "state-c"),
("section", is_paragraph): (make_paragraph, None),
("section", is_subsection): (make_subsection, "subsection"),
("subsection", is_paragraph): (make_paragraph,None),
("subsection", is_subsection): (False,None),
("subsection", is_subsubsection): (make_subsubsection,"subsubsection"),
("subsubsection", is_paragraph): (make_paragraph,None),
("subsubsection", is_section): (False, None),
("subsection", is_section): (False, None),
("section", is_section): (False, None),
("body", is_li_decimal): (make_ol_decimal, "ol-decimal"),
("ol-decimal",is_li_decimal):(make_listitem,"listitem"),
("ol-decimal",is_li_alpha):(make_ol_alpha,"ol-alpha"),
("ol-alpha",is_li_alpha):(make_listitem,"listitem"),
("ol-alpha",is_li_roman):(make_ol_roman,"ol-roman"),
("ol-roman",is_li_roman):(make_listitem,"listitem"),
("ol-roman",is_li_alpha):(False,None),
("ol-alpha",is_li_decimal):(False,None),
("listitem",is_li_alpha):sublist_or_parent,
("listitem",is_li_roman):sublist_or_parent,
("listitem",is_li_decimal):sublist_or_parent,
})
resultfilename = filename.replace(".txt",".xml")
if not os.path.exists(resultfilename):
p.debug = True
# p.debug = True
tr=TextReader(filename,encoding="utf-8",linesep=TextReader.UNIX)
p.initial_state = "body"
p.initial_constructor = make_body
b = p.parse(tr.getiterator(tr.readparagraph))
self.maxDiff = 4096
if os.path.exists(resultfilename):
with codecs.open(resultfilename,encoding="utf-8") as fp:
result = fp.read().strip()
# print(elements.serialize(b))
if result != elements.serialize(b).strip():
# re-run the parse but with debugging on
print("============DEBUG OUTPUT================")
p.debug = True
tr.seek(0)
b = p.parse(tr.getiterator(tr.readparagraph))
print("===============RESULT===================")
print(elements.serialize(b))
self.fail("========See output above=======")
else:
self.assertEqual(result, elements.serialize(b).strip())
else:
print("\nResult:\n"+elements.serialize(b))
self.fail()
file_parametrize(Parse,"test/files/fsmparser",".txt")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2, 7, 0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from six import text_type as str
from ferenda.manager import setup_logger; setup_logger('CRITICAL')
import codecs
from ferenda.sources.legal.se import SFS
from ferenda.elements import serialize, LinkSubject
from ferenda import TextReader
class FakeParser(object):
def parse(self, s, baseuri, rdftype):
return [s]
class Parse(unittest.TestCase):
def parametric_test(self, filename):
p = SFS()
p.id = '(test)'
p.reader = TextReader(filename=filename, encoding='iso-8859-1',
linesep=TextReader.DOS)
p.reader.autostrip = True
# p.lagrum_parser = FakeParser()
b = p.makeForfattning()
elements = p._count_elements(b)
if 'K' in elements and elements['K'] > 1 and elements['P1'] < 2:
# should be "skipfragments = ['A','K']", but this breaks test cases
skipfragments = ['A', 'K']
else:
skipfragments = ['A']
p._construct_ids(b, '', 'http://rinfo.lagrummet.se/publ/sfs/9999:999',
skipfragments)
self._remove_uri_for_testcases(b)
resultfilename = filename.replace(".txt", ".xml")
self.maxDiff = 4096
if os.path.exists(resultfilename):
with codecs.open(resultfilename, encoding="utf-8") as fp:
result = fp.read().strip()
self.assertEqual(result, serialize(b).strip())
else:
self.assertEqual("", serialize(b).strip())
def _remove_uri_for_testcases(self, part):
if hasattr(part,'uri'):
del part.uri
for subpart in part:
if not isinstance(subpart, str):
self._remove_uri_for_testcases(subpart)
elif hasattr(subpart, 'uri') and not isinstance(subpart, LinkSubject):
del subpart.uri
from ferenda.testutil import file_parametrize
# tests that are broken
brokentests = ['definition-no-definition.txt',
'definition-paranthesis-lista.txt',
'definition-paranthesis-multiple.txt',
'definition-strecksatslista-andrastycke.txt',
'extra-overgangsbestammelse-med-rubriker.txt',
'regression-10kap-ellagen.txt',
'tricky-felformatterad-tabell.txt',
'tricky-lang-rubrik.txt',
'tricky-lista-inte-rubrik.txt',
'tricky-lista-not-rubriker-2.txt',
'tricky-lopande-rubriknumrering.txt',
'tricky-okand-aldre-lag.txt',
'tricky-paragraf-inledande-tomrad.txt',
'tricky-tabell-overgangsbest.txt',
'tricky-tabell-sju-kolumner.txt']
def broken(testname):
return testname in brokentests
file_parametrize(Parse,"test/files/sfs/parse",".txt", broken)
from ferenda.testutil import RepoTester, parametrize_repotester
class TestSFS(RepoTester):
repoclass = SFS
docroot = os.path.dirname(__file__)+"/files/repo/sfs"
parametrize_repotester(TestSFS)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os,sys
if sys.version_info < (2, 7, 0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from ferenda.manager import setup_logger; setup_logger('CRITICAL')
import shutil
from ferenda import TextReader, util
from ferenda.testutil import RepoTester, file_parametrize
# SUT
from ferenda.sources.legal.se import MyndFskr
class Parse(RepoTester):
repoclass = MyndFskr
def setUp(self):
super(Parse,self).setUp()
resource_src = "%s/files/myndfskr/resources.xml"%os.path.dirname(__file__)
resource_dest = self.repo.store.path('resourcelist','intermediate','.rdf')
util.ensure_dir(resource_dest)
shutil.copy2(resource_src, resource_dest)
@unittest.skipIf('FERENDA_TEST_NET' not in os.environ,
'Not running net tests unless FERENDA_TEST_NET is set')
def test_download_resource_lists(self):
graph_path = self.datadir+"/resources.xml"
graph_path = "resources.xml"
self.repo.download_resource_lists("http://service.lagrummet.se/var/common",
graph_path)
self.assertTrue(os.path.exists(graph_path))
def parametric_test(self,filename):
reader = TextReader(filename,encoding='utf-8')
doc = self.repo.parse_from_textreader(reader,"[basefile]")
wantfile = filename.replace(".txt", ".n3")
self.assertEqualGraphs(wantfile, doc.meta)
file_parametrize(Parse, "test/files/myndfskr", ".txt")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import tempfile
import shutil
import os
from datetime import datetime
from ferenda import DocumentRepository, util
# SUT
from ferenda import DocumentEntry
class DocEntry(unittest.TestCase):
basic_json = """{
"content": {
"hash": null,
"markup": null,
"src": null,
"type": null
},
"id": null,
"link": {
"hash": null,
"href": null,
"length": null,
"type": null
},
"orig_checked": "2013-03-27T20:46:37.925528",
"orig_updated": null,
"orig_url": "http://source.example.org/doc/123/a",
"published": null,
"summary": null,
"title": null,
"updated": null,
"url": null
}"""
modified_json = """{
"content": {
"hash": null,
"markup": "<div>xhtml fragment</div>",
"src": null,
"type": "xhtml"
},
"id": null,
"link": {
"hash": null,
"href": null,
"length": null,
"type": null
},
"orig_checked": "2013-03-27T20:46:37.925528",
"orig_updated": "2013-03-27T20:59:42.325067",
"orig_url": "http://source.example.org/doc/123/a",
"published": null,
"summary": null,
"title": null,
"updated": null,
"url": null
}"""
def setUp(self):
self.datadir = tempfile.mkdtemp()
self.repo = DocumentRepository(datadir=self.datadir)
def tearDown(self):
shutil.rmtree(self.datadir)
def test_init(self):
d = DocumentEntry()
self.assertIsNone(d.id) # same for .updated, .published,
# .title, .summary, .url and .content
self.assertEqual(d.content, {'src':None, 'type':None, 'markup': None, 'hash':None})
self.assertEqual(d.link, {'href':None, 'type':None, 'length': None, 'hash':None})
path = self.repo.store.documententry_path("123/b")
d = DocumentEntry(path=path)
self.assertIsNone(d.id) # same for .updated, .published,
# .title, .summary, .url and .content
self.assertEqual(d.content, {'src':None, 'type':None, 'markup': None, 'hash':None})
self.assertEqual(d.link, {'href':None, 'type':None, 'length': None, 'hash':None})
def test_load(self):
path = self.repo.store.documententry_path("123/a")
util.ensure_dir(path)
with open(path, "w") as fp:
fp.write(self.basic_json)
d = DocumentEntry(path=path)
self.assertEqual(d.orig_checked, datetime(2013,3,27,20,46,37,925528))
self.assertIsNone(d.orig_updated)
self.assertEqual(d.orig_url,'http://source.example.org/doc/123/a')
def test_save(self):
path = self.repo.store.documententry_path("123/a")
d = DocumentEntry()
d.orig_checked = datetime(2013,3,27,20,46,37,925528)
d.orig_url = 'http://source.example.org/doc/123/a'
d.save(path=path)
self.maxDiff = None
self.assertEqual(util.readfile(path), self.basic_json)
def test_modify(self):
path = self.repo.store.documententry_path("123/a")
util.ensure_dir(path)
with open(path, "w") as fp:
fp.write(self.basic_json)
d = DocumentEntry(path=path)
d.orig_updated = datetime(2013, 3, 27, 20, 59, 42, 325067)
# do this in setUp?
with open(self.datadir+"/xhtml","w") as f:
f.write("<div>xhtml fragment</div>")
d.set_content(self.datadir+"/xhtml", "http://example.org/test",
mimetype="xhtml", inline=True)
d.save()
self.assertEqual(util.readfile(path), self.modified_json)
def test_set_content(self):
t = tempfile.mktemp()
with open(t,"w") as f:
f.write("<div>xhtml fragment</div>")
d = DocumentEntry()
d.set_content(t, "http://example.org/test", mimetype="xhtml", inline=True)
# type must be either "text", "html", "xhtml" or a MIME media type (RFC 4287, 4.1.3.1)
self.assertEqual(d.content['type'],"xhtml")
self.assertEqual(d.content['markup'],"<div>xhtml fragment</div>")
self.assertIsNone(d.content['src'])
d = DocumentEntry()
d.set_content(t, "http://example.org/test", mimetype="xhtml")
self.assertEqual(d.content['type'],"xhtml")
self.assertIsNone(d.content['markup'])
self.assertEqual(d.content['src'], "http://example.org/test")
self.assertEqual(d.content['hash'], "md5:ca8d87b5cf6edbbe88f51d45926c9a8d")
os.unlink(t)
t = tempfile.mktemp()
with open(t+".pdf","w") as f:
f.write("This is not a real PDF file")
d = DocumentEntry()
d.set_content(t+".pdf", "http://example.org/test")
self.assertEqual(d.content['type'],"application/pdf")
self.assertIsNone(d.content['markup'])
self.assertEqual(d.content['src'], "http://example.org/test")
self.assertEqual(d.content['hash'], "md5:0a461f0621ede53f1ea8471e34796b6f")
d = DocumentEntry()
with self.assertRaises(AssertionError):
d.set_content(t+".pdf", "http://example.org/test", inline=True)
os.unlink(t+".pdf")
def test_set_link(self):
t = tempfile.mktemp()
with open(t+".html","w") as f:
f.write("<div>xhtml fragment</div>")
d = DocumentEntry()
d.set_link(t+".html", "http://example.org/test")
self.assertEqual(d.link['href'],"http://example.org/test")
self.assertEqual(d.link['type'], "text/html")
self.assertEqual(d.link['length'],25)
self.assertEqual(d.link['hash'],"md5:ca8d87b5cf6edbbe88f51d45926c9a8d")
def test_guess_type(self):
d = DocumentEntry()
self.assertEqual(d.guess_type("test.pdf"), "application/pdf")
self.assertEqual(d.guess_type("test.rdf"), "application/rdf+xml")
self.assertEqual(d.guess_type("test.html"), "text/html")
self.assertEqual(d.guess_type("test.xhtml"),"application/html+xml")
self.assertEqual(d.guess_type("test.bin"), "application/octet-stream")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import time
import subprocess
import os
import tempfile
import shutil
import logging
from rdflib import Graph
from rdflib.util import guess_format
from rdflib.compare import graph_diff, isomorphic
from ferenda import util
from ferenda.triplestore import TripleStore
from ferenda.testutil import FerendaTestCase
class TripleStoreTestCase(FerendaTestCase):
# Set this to True if you want module-level text fixtures to
# automatically start and stop the triple store's process for you.
manage_server = False
dataset = """<http://localhost/publ/dir/2012:35> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#Direktiv> .
<http://localhost/publ/dir/2012:35> <http://purl.org/dc/terms/identifier> "Dir. 2012:35" .
<http://localhost/publ/dir/2012:35> <http://purl.org/dc/terms/title> "Ett minskat och f\\u00F6renklat uppgiftsl\\u00E4mnande f\\u00F6r f\\u00F6retagen"@sv .
<http://localhost/publ/dir/2012:35> <http://purl.org/dc/terms/published> "2012-04-26"^^<http://www.w3.org/2001/XMLSchema#date> .
<http://localhost/publ/dir/2012:35> <http://www.w3.org/2002/07/owl#sameAs> <http://rinfo.lagrummet.se/publ/dir/2012:35> .
<http://localhost/publ/dir/2012:35> <http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#departement> <http://lagen.nu/org/2008/naringsdepartementet> .
<http://localhost/publ/dir/2012:35> <http://www.w3.org/ns/prov-o/wasGeneratedBy> "ferenda.sources.Direktiv.DirPolopoly" .
"""
dataset2 = """
<http://localhost/publ/dir/2012:36> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://rinfo.lagrummet.se/ns/2008/11/rinfo/publ#Direktiv> .
<http://localhost/publ/dir/2012:36> <http://purl.org/dc/terms/identifier> "Dir. 2012:36" .
<http://localhost/publ/dir/2012:36> <http://purl.org/dc/terms/title> "Barns s\\u00E4kerhet i f\\u00F6rskolan"@sv .
"""
movies = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix schema: <http://schema.org/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix a: <http://example.org/actors/> .
@prefix m: <http://example.org/movies/> .
m:tt0117665 rdf:type schema:Movie;
schema:name "Sleepers"@en,
"Kardeş Gibiydiler"@tr;
schema:actor a:nm0000102,
a:nm0000134,
a:nm0000093;
schema:datePublished "1996-10-18"^^xsd:date;
owl:sameAs <http://www.imdb.com/title/tt0117665/> .
m:tt0137523 rdf:type schema:Movie;
schema:name "Fight Club"@en,
"Бойцовский клуб"@ru;
schema:actor a:nm0000093,
a:nm0001570;
owl:sameAs <http://www.imdb.com/title/tt0137523/> .
m:tt0099685 rdf:type schema:Movie;
schema:name "Goodfellas"@en,
"Maffiabröder"@sv;
schema:actor a:nm0000134,
a:nm0000501,
a:nm0000582;
owl:sameAs <http://www.imdb.com/title/tt099685/> .
"""
actors = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix a: <http://example.org/actors/> .
a:nm0000102 rdf:type foaf:Person;
foaf:name "Kevin Bacon";
owl:sameAs <http://live.dbpedia.org/resource/Kevin_Bacon> .
a:nm0000134 rdf:type foaf:Person;
foaf:name "Robert De Niro";
owl:sameAs <http://live.dbpedia.org/resource/Robert_De_Niro> .
a:nm0000093 rdf:type foaf:Person;
foaf:name "Brad Pitt";
owl:sameAs <http://live.dbpedia.org/resource/Brad_Pitt> .
a:nm0001570 rdf:type foaf:Person;
foaf:name "Edward Norton";
owl:sameAs <http://live.dbpedia.org/resource/Edward_Norton> .
a:nm0000501 rdf:type foaf:Person;
foaf:name "Ray Liotta";
owl:sameAs <http://live.dbpedia.org/resource/Ray_Liotta> .
a:nm0000582 rdf:type foaf:Person;
foaf:name "Joe Pesci";
owl:sameAs <http://live.dbpedia.org/resource/Joe_Pesci> .
"""
def test_add_serialized(self):
# test adding to default graph
self.assertEqual(0,self.store.triple_count())
self.store.add_serialized(self.dataset,format="nt")
self.assertEqual(7,self.store.triple_count())
def test_add_serialized_named_graph(self):
self.test_add_serialized() # set up environment for this case
self.store.context = "http://example.org/ctx1"
self.store.add_serialized(self.dataset2,format="nt")
self.assertEqual(3,self.store.triple_count())
self.store.context = None
self.assertEqual(10,self.store.triple_count())
def test_add_contexts(self):
self.store.context = "http://example.org/movies"
self.store.add_serialized(self.movies,format="turtle")
self.assertEqual(21,self.store.triple_count())
self.store.context = "http://example.org/actors"
self.store.add_serialized(self.actors,format="turtle")
# print(self.store.get_serialized())
self.assertEqual(18,self.store.triple_count())
self.store.context = None
self.assertEqual(39,self.store.triple_count())
self.store.context = "http://example.org/movies"
self.store.clear()
# print(self.store.get_serialized())
self.assertEqual(0,self.store.triple_count())
self.store.context = None
self.assertEqual(18,self.store.triple_count())
self.store.context = "http://example.org/actors"
self.store.clear()
self.store.context = None
self.assertEqual(0,self.store.triple_count())
def test_add_serialized_file(self):
self.assertEqual(0,self.store.triple_count())
tmp1 = tempfile.mktemp()
with open(tmp1,"w") as fp:
fp.write(self.dataset)
tmp2 = tempfile.mktemp()
with open(tmp2,"w") as fp:
fp.write(self.dataset2)
# default graph
self.store.add_serialized_file(tmp1,format="nt")
self.assertEqual(7,self.store.triple_count())
# named graph
self.store.context = "http://example.org/ctx1"
self.store.add_serialized_file(tmp2,format="nt")
self.assertEqual(3,self.store.triple_count())
self.store.context = None
self.assertEqual(10,self.store.triple_count())
os.unlink(tmp1)
os.unlink(tmp2)
def test_roundtrip(self):
data = """<http://example.org/1> <http://purl.org/dc/terms/title> "language literal"@sv ."""
self.store.add_serialized(data, format="nt")
res = self.store.get_serialized(format="nt")
self.assertEqual(res, data)
def test_clear(self):
data = """<http://example.org/1> <http://purl.org/dc/terms/title> "language literal"@sv .\n\n"""
self.store.add_serialized(data, format="nt")
res = self.store.clear()
self.assertEqual(0,self.store.triple_count())
def test_get_serialized(self):
self.store.add_serialized(self.dataset,format="nt")
res = self.store.get_serialized(format="nt")
self.assertEqualGraphs(Graph().parse(data=self.dataset, format="nt"),
Graph().parse(data=res, format="nt"))
def test_get_serialized_file(self):
want = tempfile.mktemp(suffix=".nt")
util.writefile(want, self.dataset)
got = tempfile.mktemp(suffix=".nt")
self.store.add_serialized(self.dataset,format="nt")
self.store.get_serialized_file(got, format="nt")
self.assertEqualGraphs(want,got)
def test_select(self):
self.store.context = "http://example.org/movies"
self.store.add_serialized(self.movies,format="turtle")
self.store.context = "http://example.org/actors"
self.store.add_serialized(self.actors,format="turtle")
sq = """PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
SELECT ?name
WHERE { GRAPH <http://example.org/actors> { ?uri foaf:name ?name .
?uri owl:sameAs <http://live.dbpedia.org/resource/Kevin_Bacon> } }"""
self.store.context = None # note the graph identifier in the Sparql query
p = self.store.select(sq,"python")
self.assertEqual(p,[{'name':'Kevin Bacon'}])
if self.store.storetype == self.store.SLEEPYCAT:
self.store.graph.close()
def test_construct(self):
self.store.add_serialized("""
@prefix ab: <http://learningsparql.com/ns/addressbook#> .
@prefix d: <http://learningsparql.com/ns/data#> .
d:i0432 ab:firstName "Richard" .
d:i0432 ab:lastName "Mutt" .
d:i0432 ab:homeTel "(229) 276-5135" .
d:i0432 ab:email "richard49@hotmail.com" .
d:i9771 ab:firstName "Cindy" .
d:i9771 ab:lastName "Marshall" .
d:i9771 ab:homeTel "(245) 646-5488" .
d:i9771 ab:email "cindym@gmail.com" .
d:i8301 ab:firstName "Craig" .
d:i8301 ab:lastName "Ellis" .
d:i8301 ab:email "craigellis@yahoo.com" .
d:i8301 ab:email "c.ellis@usairwaysgroup.com" .
""", format="turtle")
sq = """
PREFIX ab: <http://learningsparql.com/ns/addressbook#>
PREFIX d: <http://learningsparql.com/ns/data#>
CONSTRUCT
{ ?person ?p ?o . }
WHERE {
?person ab:firstName "Craig" ; ab:lastName "Ellis" ;
?p ?o . }
"""
want = Graph()
want.parse(data="""
@prefix d:<http://learningsparql.com/ns/data#> .
@prefix ab:<http://learningsparql.com/ns/addressbook#> .
d:i8301
ab:email "c.ellis@usairwaysgroup.com",
"craigellis@yahoo.com" ;
ab:firstName "Craig" ;
ab:lastName "Ellis" .
""", format="turtle")
got = self.store.construct(sq)
self.assertTrue(isomorphic(want,got))
if self.store.storetype == self.store.SLEEPYCAT:
self.store.graph.close()
@unittest.skipIf('SKIP_FUSEKI_TESTS' in os.environ,
"Skipping Fuseki tests")
class Fuseki(TripleStoreTestCase,unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls.manage_server:
# Note: In order for this to work, the script "fuseki"
# must be in PATH, and FUSEKI_HOME must be set to the
# directory of that script (which should also contain
# fuseki-server.jar)
# assume that the config.ttl from the fuseki distribution is
# used, creating an updateable in-memory dataset at /ds
subprocess.check_call("fuseki start > /dev/null", shell=True)
# It seems to take a little while from the moment that `fuseki
# start' returns to when the HTTP service actually is up and
# running
time.sleep(3)
@classmethod
def tearDownClass(cls):
if cls.manage_server:
subprocess.check_call("fuseki stop > /dev/null", shell=True)
pass
def setUp(self):
# to filter out spurious warnings from requests/urllib3 under
# py3. Does not work when running the entire test suite, for
# some reason, but works fine when only testing with this module.
# logging.captureWarnings(True)
self.store = TripleStore("http://localhost:3030/", "ds", storetype=TripleStore.FUSEKI)
self.store.clear()
def tearDown(self):
# logging.captureWarnings(False)
pass
@unittest.skipIf('SKIP_SESAME_TESTS' in os.environ,
"Skipping Sesame tests")
class Sesame(TripleStoreTestCase,unittest.TestCase):
@classmethod
def setUpClass(cls):
# start up tomcat/sesame on port 8080
if cls.manage_server:
subprocess.check_call("catalina.sh start > /dev/null", shell=True)
# It seems to take a little while from the moment that
# `catalina.sh start' returns to when the HTTP service
# actually is up and answering.
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.manage_server:
subprocess.check_call("catalina.sh stop > /dev/null", shell=True)
def setUp(self):
# to filter out spurious warnings from requests/urllib3 under py3
# logging.captureWarnings(True)
self.store = TripleStore("http://localhost:8080/openrdf-sesame", "ferenda", storetype=TripleStore.SESAME)
self.store.clear()
def tearDown(self):
pass
# logging.captureWarnings(False)
class SQLite(TripleStoreTestCase,unittest.TestCase):
def setUp(self):
self.store = TripleStore("ferenda.sqlite", "ferenda", storetype=TripleStore.SQLITE)
self.store.clear()
def tearDown(self):
self.store.close()
del self.store
os.remove("ferenda.sqlite")
@unittest.skipIf('SKIP_SLEEPYCAT_TESTS' in os.environ,
"Skipping Fuseki tests")
class Sleepycat(TripleStoreTestCase,unittest.TestCase):
def setUp(self):
self.store = TripleStore("ferenda.db", "ferenda", storetype=TripleStore.SLEEPYCAT)
self.store.clear()
def tearDown(self):
del self.store
if os.path.exists("ferenda.db"):
shutil.rmtree("ferenda.db")
| Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import json
from ferenda import URIFormatter
import ferenda.uriformats
from ferenda.testutil import file_parametrize
class FakeParseResult(dict):
def __init__(self,*args,**kwargs):
if 'name' in kwargs:
self._name = kwargs['name']
del kwargs['name']
super(FakeParseResult,self).__init__(*args, **kwargs)
def getName(self):
return self._name
class ParametricBase(unittest.TestCase):
def get_formatter(self):
return ("Base",ferenda.uriformats.generic)
def parametric_test(self,filename):
with open(filename) as fp:
testdata = fp.read()
d = json.loads(testdata)
d = FakeParseResult(d,name=self.get_formatter()[0])
uf = URIFormatter(self.get_formatter())
uri = uf.format(d)
resultfile = os.path.splitext(filename)[0] + ".txt"
if os.path.exists(resultfile):
with open(resultfile) as fp:
result = fp.read().strip()
else:
print("format() returns: %s" % uri)
self.fail("%s not found" % resultfile)
self.assertEqual(uri,result)
class URL(ParametricBase):
def get_formatter(self):
return ("url",ferenda.uriformats.url)
class EULaw(ParametricBase):
def get_formatter(self):
return ("eulaw",ferenda.uriformats.eulaw)
file_parametrize(URL,"test/files/uriformat/url", ".json")
# file_parametrize(EULaw,"test/files/uriformat/eulaw", ".json")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import rdflib
#SUT
from ferenda import Document
class TestDocument(unittest.TestCase):
def test_init(self):
d = Document()
self.assertIsInstance(d.meta, rdflib.Graph)
self.assertEqual(d.body, [])
self.assertIsNone(d.uri)
self.assertIsNone(d.lang)
self.assertIsNone(d.basefile)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from ferenda.manager import setup_logger; setup_logger('CRITICAL')
from datetime import datetime,timedelta
from operator import attrgetter
import codecs
import collections
import shutil
import tempfile
import time
import calendar
import lxml.etree as etree
from lxml.etree import XSLT
from lxml.builder import ElementMaker
import rdflib
# import six
try:
# assume we're on py3.3 and fall back if not
from unittest.mock import Mock, MagicMock, patch, call
except ImportError:
from mock import Mock, patch, call
# from requests.exceptions import HTTPError
from bs4 import BeautifulSoup
import doctest
from ferenda import DocumentEntry, TocPageset, TocPage, \
TocCriteria, Describer, LayeredConfig, TripleStore, FulltextIndex
from ferenda.errors import *
# The main system under test (SUT)
from ferenda import DocumentRepository
from ferenda.testutil import RepoTester
# various utility functions which occasionally needs patching out
from ferenda import util
from ferenda.elements import serialize, Link
class Repo(RepoTester):
# TODO: Many parts of this class could be divided into subclasses
# (like Generate, Toc, News, Storage and Archive already has)
# class Repo(RepoTester)
def test_context(self):
repo = DocumentRepository()
self.assertEqual(repo.context(), "http://example.org/ctx/base")
def test_qualified_class_name(self):
repo = DocumentRepository()
self.assertEqual(repo.qualified_class_name(),
"ferenda.documentrepository.DocumentRepository")
# class Download(RepoTester)
def test_download(self):
# test index file contains four links that matches
# d.document_url. Three of these contains link text that
# matches d.basefile_template, and should thus be downloaded
d = DocumentRepository(loglevel='CRITICAL',datadir=self.datadir)
d.start_url = "http://localhost/fake/url"
d.download_single = Mock()
d.download_single.return_value = True
d.log = Mock()
# test1: run download, make sure download_single is hit the
# right amount of times, make sure d.log.error is called once,
# and ensure lastdownload is set
mockresponse = Mock()
with open("%s/files/base/downloaded/index.htm" % os.path.dirname(__file__)) as fp:
mockresponse.text = fp.read()
with patch('requests.get',return_value=mockresponse):
self.assertTrue(d.download())
self.assertEqual(d.download_single.call_count,3)
d.download_single.assert_has_calls([call("123/a","http://example.org/docs/1.html"),
call("123/b","http://example.org/docs/2.html"),
call("124/a","http://example.org/docs/3.html")])
self.assertAlmostEqualDatetime(d.config.lastdownload,
datetime.now())
d.download_single.reset_mock()
# test1.1: Run download with a different index file, where the
# link text provides no value and instead the links themselves
# must match document_url_regex.
mockresponse = Mock()
with open("%s/files/base/downloaded/index2.htm" % os.path.dirname(__file__)) as fp:
mockresponse.text = fp.read()
with patch('requests.get',return_value=mockresponse):
self.assertTrue(d.download())
self.assertEqual(d.download_single.call_count,3)
d.download_single.assert_has_calls([call("1","http://example.org/docs/1.html"),
call("2","http://example.org/docs/2.html"),
call("3","http://example.org/docs/3.html")])
self.assertAlmostEqualDatetime(d.config.lastdownload,
datetime.now())
d.download_single.reset_mock()
# test2: create 2 out of 3 files. make sure download_single is
# hit only for the remaining file.
util.ensure_dir(self.datadir+"/base/downloaded/123/a.html")
open(self.datadir+"/base/downloaded/123/a.html","w").close()
open(self.datadir+"/base/downloaded/123/b.html","w").close()
with open("%s/files/base/downloaded/index.htm" % os.path.dirname(__file__)) as fp:
mockresponse.text = fp.read()
with patch('requests.get',return_value=mockresponse):
self.assertTrue(d.download())
d.download_single.assert_called_once_with("124/a","http://example.org/docs/3.html")
d.download_single.reset_mock()
# test3: set refresh = True, make sure download_single is hit thrice again.
d.config.refresh = True
with patch('requests.get',return_value=mockresponse):
self.assertTrue(d.download())
self.assertEqual(d.download_single.call_count,3)
d.download_single.assert_has_calls([call("123/a","http://example.org/docs/1.html"),
call("123/b","http://example.org/docs/2.html"),
call("124/a","http://example.org/docs/3.html")])
d.download_single.reset_mock()
# test4: set refresh = False, create the 3rd file, make sure
# download returns false as nothing changed
util.ensure_dir(self.datadir+"/base/downloaded/124/a.html")
open(self.datadir+"/base/downloaded/124/a.html","w").close()
d.download_single.return_value = False
d.config.refresh = False
with patch('requests.get',return_value=mockresponse):
self.assertFalse(d.download())
self.assertFalse(d.download_single.error.called)
d.download_single.reset_mock()
def test_download_single(self):
url_location = None # The local location of the URL.
def my_get(url,**kwargs):
res = Mock()
with open(url_location,"rb") as fp:
res.content = fp.read()
res.headers = collections.defaultdict(lambda:None)
res.headers['X-These-Headers-Are'] = 'Faked'
res.status_code = 200
return res
d = DocumentRepository(loglevel='CRITICAL', datadir=self.datadir)
# test1: New file
url_location = "test/files/base/downloaded/123/a-version1.htm"
self.assertFalse(os.path.exists(self.datadir+"/base/downloaded/123/a.html"))
# the url will be dynamically constructed using the
# document_url template
with patch('requests.get',side_effect = my_get) as mock_get:
self.assertTrue(d.download_single("123/a"))
self.assertEqual(mock_get.call_args[0][0],
"http://example.org/docs/123/a.html")
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/123/a.html"))
self.assertTrue(os.path.exists(self.datadir+"/base/entries/123/a.json"))
p = DocumentEntry(self.datadir+"/base/entries/123/a.json")
self.assertIsInstance(p, DocumentEntry)
self.assertAlmostEqualDatetime(p.orig_created, datetime.now())
self.assertEqual(p.orig_created, p.orig_updated)
self.assertEqual(p.orig_created, p.orig_checked)
self.assertEqual(p.orig_url, "http://example.org/docs/123/a.html")
self.assertEqual(util.readfile(self.datadir+"/base/downloaded/123/a.html"),
util.readfile("test/files/base/downloaded/123/a-version1.htm"))
# d.browser.retrieve.reset_mock()
# test2: updated file
time.sleep(0.1)
url_location = "test/files/base/downloaded/123/a-version2.htm"
with patch('requests.get',side_effect = my_get) as mock_get:
self.assertTrue(d.download_single("123/a", "http://example.org/very/specific/url"))
self.assertEqual(mock_get.call_args[0][0],
"http://example.org/very/specific/url")
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/123/a.html"))
# make sure download_single tucked away the previous version
self.assertTrue(os.path.exists(self.datadir+"/base/archive/downloaded/123/a/1.html"))
self.assertTrue(os.path.exists(self.datadir+"/base/entries/123/a.json"))
p = DocumentEntry(self.datadir+"/base/entries/123/a.json")
self.assertAlmostEqualDatetime(p.orig_updated, datetime.now())
self.assertNotEqual(p.orig_created, p.orig_updated)
self.assertEqual(p.orig_updated, p.orig_checked)
self.assertEqual(p.orig_url, "http://example.org/very/specific/url") # orig_url has been modified from test1
self.assertEqual(util.readfile(self.datadir+"/base/downloaded/123/a.html"),
util.readfile("test/files/base/downloaded/123/a-version2.htm"))
self.assertEqual(util.readfile(self.datadir+"/base/archive/downloaded/123/a/1.html"),
util.readfile("test/files/base/downloaded/123/a-version1.htm"))
# test3: unchanged file
time.sleep(0.1)
url_location = "test/files/base/downloaded/123/a-version2.htm" # same as above, ie unchanged
# d.browser.retrieve.return_value = util.readfile("test/files/base/downloaded/123/a-version2.htm")
with patch('requests.get',side_effect = my_get) as mock_get:
self.assertFalse(d.download_single("123/a", "http://example.org/123/a.htm"))
self.assertEqual(mock_get.call_args[0][0],
"http://example.org/123/a.htm")
p = DocumentEntry(self.datadir+"/base/entries/123/a.json")
self.assertAlmostEqualDatetime(p.orig_checked, datetime.now())
self.assertNotEqual(p.orig_created, p.orig_updated)
self.assertNotEqual(p.orig_created, p.orig_checked)
self.assertEqual(p.orig_url, "http://example.org/123/a.htm")
self.assertEqual(util.readfile(self.datadir+"/base/downloaded/123/a.html"),
util.readfile("test/files/base/downloaded/123/a-version2.htm"))
@patch('requests.get')
def test_download_if_needed(self, mock_get):
def my_get(url,headers):
# observes the scoped variables "last_modified" (should
# contain a formatted date string according to HTTP rules)
# and "etag" (opaque string).
resp = Mock()
resp.status_code=200
if "If-modified-since" in headers:
if not expect_if_modified_since:
resp.status_code = 400
return resp
if (util.parse_rfc822_date(headers["If-modified-since"]) >
util.parse_rfc822_date(last_modified)):
resp.status_code=304
return resp
if "If-none-match" in headers:
if not expect_if_none_match:
resp.status_code=400
return resp
if headers["If-none-match"] == etag:
resp.status_code=304
return resp
# Then make sure the response contains appropriate headers
headers = {}
if last_modified:
headers["last-modified"] = last_modified
else:
headers["last-modified"] = None
if etag:
headers["etag"] = etag
else:
headers["etag"] = None
# And if needed, slurp content from a specified file
content = None
if url_location:
with open(url_location,"rb") as fp:
content = fp.read()
resp.content = content
resp.headers = headers
return resp
url_location = None
last_modified = None
etag = None
expect_if_modified_since = False
expect_if_none_match = False
mock_get.side_effect = my_get
d = DocumentRepository(loglevel='CRITICAL',datadir=self.datadir)
# test1: file does not exist, we should not send a
# if-modified-since, recieve a last-modified header and verify
# file mtime
last_modified = "Mon, 4 Aug 1997 02:14:00 EST"
etag = None
expect_if_modified_since = False
expect_if_none_match = False
url_location = "test/files/base/downloaded/123/a-version1.htm"
self.assertFalse(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertTrue(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertFalse(os.path.exists(self.datadir+"/base/downloaded/example.html.etag"))
self.assertEqual(os.stat(self.datadir+"/base/downloaded/example.html").st_mtime,
calendar.timegm((1997,8,4,2,14,0,0,0,0)) + (60*60*5)) # EST = UTC-5
mock_get.reset_mock()
# test2: file exists, we use if-modified-since, we recieve a 304
last_modified = "Mon, 4 Aug 1997 02:14:00 EST"
etag = None
url_location = "test/files/base/downloaded/123/a-version1.htm"
expect_if_modified_since = True # since file now exists since test1
expect_if_none_match = False # since no .etag file was created by test1
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertFalse(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertFalse(os.path.exists(self.datadir+"/base/downloaded/example.html.etag"))
self.assertEqual(os.stat(self.datadir+"/base/downloaded/example.html").st_mtime,
calendar.timegm((1997,8,4,2,14,0,0,0,0)) + (60*60*5)) # EST = UTC-5
mock_get.reset_mock()
# test3: file exists, we use if-modified-since, we recieve a
# 200 with later last-modified. Also test the setting of an
# etag from the server
last_modified = "Tue, 5 Aug 1997 02:14:00 EST"
etag = "this-is-my-etag-v1" # will be used in test4
url_location = "test/files/base/downloaded/123/a-version2.htm"
expect_if_modified_since = True # since file now exists since test1
expect_if_none_match = False # since no .etag file was created by test1
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertTrue(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html")) # since etag is set
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html.etag"))
self.assertEqual(os.stat(self.datadir+"/base/downloaded/example.html").st_mtime,
calendar.timegm((1997,8,5,2,14,0,0,0,0)) + (60*60*5)) # EST = UTC-5
self.assertEqual(etag, util.readfile(self.datadir+"/base/downloaded/example.html.etag"))
mock_get.reset_mock()
# test4: file and etag exists, we use if-none-match, we recieve a 304
last_modified = None
etag = "this-is-my-etag-v1"
url_location = "test/files/base/downloaded/123/a-version2.htm"
expect_if_modified_since = True
expect_if_none_match = True
self.assertFalse(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html.etag"))
self.assertEqual(etag, util.readfile(self.datadir+"/base/downloaded/example.html.etag"))
mock_get.reset_mock()
# test5: file and etag exists, we use if-none-match, we recieve a 200 with a new etag
last_modified = None
etag = "this-is-my-etag-v2"
url_location = "test/files/base/downloaded/123/a-version1.htm"
expect_if_modified_since = False
expect_if_none_match = True
self.assertTrue(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html"))
self.assertTrue(os.path.exists(self.datadir+"/base/downloaded/example.html.etag"))
self.assertEqual(etag, util.readfile(self.datadir+"/base/downloaded/example.html.etag"))
os.unlink(self.datadir+"/base/downloaded/example.html.etag")
mock_get.reset_mock()
# test6: file exists, conditionalget is False, document hasn't changed
d.config.conditionalget = False
last_modified = None
etag = None
url_location = "test/files/base/downloaded/123/a-version1.htm"
expect_if_modified_since = False
expect_if_none_match = False
self.assertFalse(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertFalse(os.path.exists(self.datadir+"/base/downloaded/example.html.etag"))
self.assertEqual(util.readfile("test/files/base/downloaded/123/a-version1.htm"),
util.readfile(self.datadir+"/base/downloaded/example.html"))
mock_get.reset_mock()
# test7: file exists, conditionalget is False, document has changed
d.config.conditionalget = False
last_modified = None
etag = None
url_location = "test/files/base/downloaded/123/a-version2.htm"
expect_if_modified_since = False
expect_if_none_match = False
self.assertTrue(d.download_if_needed("http://example.org/document",
"example"))
self.assertTrue(mock_get.called)
self.assertEqual(util.readfile("test/files/base/downloaded/123/a-version2.htm"),
util.readfile(self.datadir+"/base/downloaded/example.html"))
mock_get.reset_mock()
def test_remote_url(self):
d = DocumentRepository()
d.config = LayeredConfig(defaults=d.get_default_options(),inifile="ferenda.ini",cascade=True)
self.assertEqual(d.remote_url("123/a"), "http://example.org/docs/123/a.html")
self.assertEqual(d.remote_url("123:a"), "http://example.org/docs/123%3Aa.html")
self.assertEqual(d.remote_url("123 a"), "http://example.org/docs/123%20a.html")
# class Parse(RepoTester)
def test_parse(self):
xhtmlns = "{http://www.w3.org/1999/xhtml}"
xmlns = "{http://www.w3.org/XML/1998/namespace}"
# test1: make sure that default parsing of a document w/o
# title and lang tags work
d = DocumentRepository(loglevel="CRITICAL", datadir=self.datadir)
d.config = LayeredConfig(defaults=d.get_default_options(),inifile="ferenda.ini",cascade=True)
path = d.store.downloaded_path("123/a")
# print("test_parse: d.store.downloaded_path('123/a') is %s" % path)
util.ensure_dir(path)
shutil.copy2("test/files/base/downloaded/123/a-version1.htm",path)
ret = d.parse("123/a")
g = rdflib.Graph()
uri = d.canonical_uri("123/a")
desc = Describer(g,uri)
g.parse(d.store.distilled_path("123/a"))
self.assertEqual(len(g),3)
self.assertEqual(desc.getvalue(d.ns['dct'].identifier), "123/a")
self.assertEqual(len(desc.getvalues(d.ns['dct'].title)),0)
t = etree.parse(d.store.parsed_path("123/a"))
util.indent_et(t.getroot())
h = t.getroot()
self.assertEqual("en", h.get(xmlns+"lang"))
b = t.find(xhtmlns+"body")
self.assertEqual("http://localhost:8000/res/base/123/a", b.get("about"))
ps = t.findall(xhtmlns+"body/"+xhtmlns+"p")
self.assertEqual(1,len(list(ps)))
os.unlink(d.store.parsed_path("123/a"))
os.unlink(d.store.distilled_path("123/a"))
# test2: make sure that default parsing of a document with a
# title, lang tag and multiple body elements work.
d = DocumentRepository(loglevel="CRITICAL",datadir=self.datadir)
path = d.store.downloaded_path("123/a")
util.ensure_dir(path)
shutil.copy2("test/files/base/downloaded/123/a-version2.htm",path)
ret = d.parse("123/a")
g = rdflib.Graph()
uri = d.canonical_uri("123/a")
desc = Describer(g,uri)
g.parse(d.store.distilled_path("123/a"))
self.assertEqual(len(g),4)
self.assertEqual(desc.getvalue(d.ns['dct'].identifier), "123/a")
self.assertEqual(desc.getvalue(d.ns['dct'].title), "A document")
t = etree.parse(d.store.parsed_path("123/a"))
util.indent_et(t.getroot())
h = t.getroot()
self.assertEqual("en-GB", h.get(xmlns+"lang"))
b = t.find(xhtmlns+"body")
self.assertEqual("http://localhost:8000/res/base/123/a", b.get("about"))
ps = t.findall(xhtmlns+"body/"+xhtmlns+"p")
self.assertEqual(2,len(list(ps)))
os.unlink(d.store.parsed_path("123/a"))
os.unlink(d.store.distilled_path("123/a"))
def test_soup_from_basefile(self):
d = DocumentRepository(datadir=self.datadir)
util.ensure_dir(d.store.downloaded_path("testbasefile"))
# test 1: Empty tags
with open(d.store.downloaded_path("testbasefile"), "w") as fp:
fp.write("<h1>Hello<br>World</h1>")
soup = d.soup_from_basefile("testbasefile")
# This fails on py33, since we can't use the lxml parser, and
# beautifulsoup's html.parser does not know that <br> is a
# self-closing tag. What are you gonna do?
self.assertEqual(soup.h1.decode(), '<h1>Hello<br/>World</h1>')
# test 2: Non-ascii characters
with codecs.open(d.store.downloaded_path("testbasefile"), "w", encoding="utf-8") as fp:
fp.write("<h1>R\xe4ksm\xf6rg\xe5s</h1>")
soup = d.soup_from_basefile("testbasefile")
self.assertEqual(soup.h1.decode(), '<h1>R\xe4ksm\xf6rg\xe5s</h1>')
os.unlink(d.store.downloaded_path("testbasefile"))
def test_parse_document_from_soup(self):
parser = "lxml" if sys.version_info < (3,3) else "html.parser"
d = DocumentRepository()
doc = d.make_document("testbasefile")
# test 1: default selector/filters
testdoc = """
<html>
<head>
<title>Test doc</title>
</head>
<body>
<div id="header">
<h1>Hello</h1>
</div>
<div id="main">
<div class="navbar">
<ul>
<li>Navigation</li>
</ul>
</div>
<script type="javascript">
// inline javascript code
</script>
<p>This is the main content</p>
</div>
</body>
</html>"""
soup = BeautifulSoup(testdoc,parser)
d.parse_document_from_soup(soup,doc)
#print("Defaults")
#print(serialize(doc.body))
self.assertEqual(serialize(doc.body),"""<Body>
<Div id="header">
<H1>
<str>Hello</str>
</H1>
</Div>
<Div id="main">
<Div class="navbar">
<UL>
<LI>
<str>Navigation</str>
</LI>
</UL>
</Div><P>
<str>This is the main content</str>
</P>
</Div>
</Body>
""")
# test 2: adjusted selector/filters
d.parse_content_selector = "div#main"
d.parse_filter_selectors = ["script","div.navbar"]
d.parse_document_from_soup(soup,doc)
#print("Adjusted")
#print(serialize(doc.body))
self.assertEqual(serialize(doc.body),"""<Div id="main">
<P>
<str>This is the main content</str>
</P>
</Div>
""")
# class RenderXHTML(RepoTester) # maybe
def _test_render_xhtml(self, body, want):
doc = self.repo.make_document('basefile')
doc.body = body
outfile = self.datadir + "/test.xhtml"
self.repo.render_xhtml(doc, outfile)
self.assertEqualXML(want, util.readfile(outfile, "rb"))
def test_render_xhtml_simple(self):
# Test 1: Simple document using our own element objects
from ferenda import elements as el
body = el.Body([el.Heading(['Toplevel heading'], level=1),
el.Paragraph(['Introductory preamble']),
el.Section([el.Paragraph(['Some text']),
el.Subsection([el.Paragraph(['More text'])],
ordinal='1.1',
title="First subsection")],
ordinal='1', title='First section'),
el.Section([el.Paragraph(['Even more text'])],
ordinal='2', title='Second section')])
want = """<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:bibo="http://purl.org/ontology/bibo/"
xmlns:dct="http://purl.org/dc/terms/">
<head about="http://localhost:8000/res/base/basefile"/>
<body about="http://localhost:8000/res/base/basefile">
<h1>Toplevel heading</h1>
<p>Introductory preamble</p>
<div content="First section"
about="http://localhost:8000/res/base/basefile#S1"
property="dct:title"
typeof="bibo:DocumentPart"
class="section">
<span content="1" about="http://localhost:8000/res/base/basefile#S1"
property="bibo:chapter"/>
<p>Some text</p>
<div content="First subsection"
about="http://localhost:8000/res/base/basefile#S1.1"
property="dct:title"
typeof="bibo:DocumentPart"
class="subsection">
<span content="1.1" about="http://localhost:8000/res/base/basefile#S1.1"
property="bibo:chapter"/>
<p>More text</p>
</div>
</div>
<div content="Second section"
about="http://localhost:8000/res/base/basefile#S2"
property="dct:title"
typeof="bibo:DocumentPart"
class="section">
<span content="2" about="http://localhost:8000/res/base/basefile#S2"
property="bibo:chapter"/>
<p>Even more text</p>
</div>
</body>
</html>"""
self._test_render_xhtml(body, want)
def test_render_xhtml_html(self):
# test 2: use element.html elements only, to make a similar
# document (although without metadata about
# sections/subsection and classses). Uses some HTML5 elements
# that are converted to divs when rendering as XHTML 1.1
from ferenda.elements import html
body = html.Body([html.H1(['Toplevel heading']),
html.Summary(['Introductory preamble']),
html.Section([html.H2(['First section']),
html.P(['Some text']),
html.Section([
html.H3(['First subsection']),
html.P(['More text'])])]),
html.Section([html.H2(['Second section']),
html.P(['Even more text'])])])
want = """<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:bibo="http://purl.org/ontology/bibo/"
xmlns:dct="http://purl.org/dc/terms/">
<head about="http://localhost:8000/res/base/basefile"/>
<body about="http://localhost:8000/res/base/basefile">
<h1>Toplevel heading</h1>
<div class="summary">Introductory preamble</div>
<div class="section">
<h2>First section</h2>
<p>Some text</p>
<div class="section">
<h3>First subsection</h3>
<p>More text</p>
</div>
</div>
<div class="section">
<h2>Second section</h2>
<p>Even more text</p>
</div>
</body>
</html>
"""
self._test_render_xhtml(body, want)
def test_render_xhtml_meta(self):
from ferenda import elements as el
from ferenda.elements import html
# test 3: use a mix of our own elements and html elements,
# with meta + uri attached to some nodes
g1 = rdflib.Graph().parse(format='n3', data="""
@prefix bibo: <http://purl.org/ontology/bibo/> .
@prefix dct: <http://purl.org/dc/terms/> .
<http://localhost:8000/res/base/basefile#S1> a bibo:DocumentPart;
dct:title "First section";
bibo:chapter "1" .
""")
g2 = rdflib.Graph().parse(format='n3', data="""
@prefix bibo: <http://purl.org/ontology/bibo/> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://localhost:8000/res/base/basefile#S2> a bibo:DocumentPart;
dct:title "Second section";
bibo:chapter "2";
dct:creator "Fred Bloggs"@en-GB;
dct:issued "2013-05-10"^^xsd:date;
owl:sameAs <http://example.org/s2> .
""")
body = el.Body([el.Heading(['Toplevel heading'], level=1),
html.P(['Introductory preamble']),
html.Div([html.P(['Some text']),
el.Subsection([el.Paragraph(['More text'])],
ordinal='1.1',
title="First subsection")],
uri = 'http://localhost:8000/res/base/basefile#S1',
meta = g1),
el.Section([el.Paragraph(['Even more text'])],
uri = 'http://localhost:8000/res/base/basefile#S2',
meta = g2)])
want = """<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:bibo="http://purl.org/ontology/bibo/"
xmlns:owl="http://www.w3.org/2002/07/owl#"
xmlns:dct="http://purl.org/dc/terms/">
<head about="http://localhost:8000/res/base/basefile"/>
<body about="http://localhost:8000/res/base/basefile">
<h1>Toplevel heading</h1>
<p>Introductory preamble</p>
<div about="http://localhost:8000/res/base/basefile#S1"
content="First section"
property="dct:title"
typeof="bibo:DocumentPart">
<span content="1"
property="bibo:chapter"
xml:lang=""/>
<p>Some text</p>
<div about="http://localhost:8000/res/base/basefile#S1.1"
content="First subsection"
property="dct:title"
typeof="bibo:DocumentPart"
class="subsection">
<span about="http://localhost:8000/res/base/basefile#S1.1"
content="1.1"
property="bibo:chapter"/>
<p>More text</p>
</div>
</div>
<div about="http://localhost:8000/res/base/basefile#S2"
class="section"
content="Second section"
property="dct:title"
typeof="bibo:DocumentPart">
<span rel="owl:sameAs"
href="http://example.org/s2"/>
<span content="2"
property="bibo:chapter"
xml:lang=""/>
<span content="2013-05-10"
property="dct:issued"
datatype="xsd:date"/>
<span content="Fred Bloggs"
property="dct:creator"
xml:lang="en-GB"/>
<p>Even more text</p>
</div>
</body>
</html>"""
self._test_render_xhtml(body, want)
def test_render_xhtml_custom(self):
# test 4: define a CompoundElement subclass and override
# as_xhtml
from ferenda import elements as el
class Preamble(el.CompoundElement):
tagname = "div"
classname = "preamble"
def as_xhtml(self, uri):
# a fairly complicated custom serialization that
# inserts a new child node where before there was only
# text, and so that text has to be moved from the
# parent.text to child.tail
E = ElementMaker(namespace="http://www.w3.org/1999/xhtml")
element = super(Preamble, self).as_xhtml(uri)
note = E('span', {'class': 'preamble-note'},
self.note + ": ")
note.tail = element.text
element.text = None
element.insert(0, note)
return element
body = el.Body([el.Heading(['Toplevel heading'], level=1),
Preamble(['Introductory preamble'],
note='Read this first'),
el.Section([el.Paragraph(['Some text'])],
ordinal='1', title='First section')])
want = """<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:bibo="http://purl.org/ontology/bibo/"
xmlns:dct="http://purl.org/dc/terms/">
<head about="http://localhost:8000/res/base/basefile"/>
<body about="http://localhost:8000/res/base/basefile">
<h1>Toplevel heading</h1>
<div class="preamble"><span class="preamble-note">Read this first: </span>Introductory preamble</div>
<div content="First section"
about="http://localhost:8000/res/base/basefile#S1"
property="dct:title"
typeof="bibo:DocumentPart"
class="section">
<span content="1" about="http://localhost:8000/res/base/basefile#S1"
property="bibo:chapter"/>
<p>Some text</p>
</div>
</body>
</html>
"""
self._test_render_xhtml(body,want)
# FIXME: Move this test to a new test case file (testElements.py or even testElementsHtml.py)
# class Elements(RepoTester)
def test_elements_from_soup(self):
from ferenda.elements import html
# see comment in documentrepository.soup_from_basefile
parser = "lxml" if sys.version_info < (3,3) else "html.parser"
soup = BeautifulSoup("""<body>
<h1>Sample</h1>
<div class="main">
<img src="xyz.png"/>
<p>Some <b>text</b></p>
<dl>
<dt>Term 1</dt>
<dd>Definition 1</dd>
</dl>
</div>
<div id="foot">
<hr/>
<a href="/">home</a> - <a href="/about">about</a>
</div>
</body>""",parser)
body = html.elements_from_soup(soup.body)
# print("Body: \n%s" % serialize(body))
result = html.Body([html.H1(["Sample"]),
html.Div([html.Img(src="xyz.png"),
html.P(["Some ",
html.B(["text"])]),
html.DL([html.DT(["Term 1"]),
html.DD(["Definition 1"])])
],**{"class":"main"}),
html.Div([html.HR(),
html.A(["home"],href="/"),
" - ",
html.A(["about"],href="/about")
],id="foot")])
self.maxDiff = 4096
self.assertEqual(serialize(body),serialize(result))
# Move to Generate?
def test_transform_html(self):
base = self.datadir+os.sep
with open(base+"style.xslt","w") as fp:
fp.write("""<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:param name="value"/>
<xsl:param name="file"/>
<xsl:variable name="content" select="document($file)/root/*"/>
<xsl:template match="/">
<output>
<paramvalue><xsl:value-of select="$value"/></paramvalue>
<paramfile><xsl:copy-of select="$content"/></paramfile>
<infile><xsl:value-of select="/doc/title"/></infile>
</output>
</xsl:template>
</xsl:stylesheet>
""")
with open(base+"paramfile.xml","w") as fp:
fp.write("""<root><node key='value'><subnode>textnode</subnode></node></root>""")
with open(base+"infile.xml","w") as fp:
fp.write("""<doc><title>Document title</title></doc>""")
d = DocumentRepository()
parampath = base+"paramfile.xml"
d.transform_html(base+"style.xslt",
base+"infile.xml",
base+"outfile.xml",
{'value':XSLT.strparam('blahonga'),
'file' :XSLT.strparam(parampath.replace(os.sep,"/"))})
self.assertEqualXML(util.readfile(base+"outfile.xml"),"""
<output>
<paramvalue>blahonga</paramvalue>
<paramfile><node key='value'><subnode>textnode</subnode></node></paramfile>
<infile>Document title</infile>
</output>""")
# class Relate(RepoTester)
def test_relate_fulltext(self):
d = DocumentRepository(datadir=self.datadir,
indexlocation=self.datadir+os.sep+"index") # FIXME: derive from datadir
# prepare test document
util.ensure_dir(d.store.parsed_path("123/a"))
util.ensure_dir(d.store.distilled_path("123/a"))
shutil.copy2("%s/files/base/parsed/123/a.xhtml" %
os.path.dirname(__file__),
d.store.parsed_path("123/a"))
g = rdflib.Graph()
with codecs.open("%s/files/base/distilled/123/a.ttl" %
os.path.dirname(__file__),encoding="utf-8") as fp:
g.parse(fp, format="turtle")
with open(d.store.distilled_path("123/a"),"wb") as fp:
g.serialize(fp,"pretty-xml")
with patch.object(FulltextIndex,'update') as mock_method:
d.relate_fulltext("123/a")
calls = [call(title='Example', basefile='123/a',
uri='http://example.org/base/123/a', repo='base',
text='This is part of the main document, but not part of any sub-resource. This is the tail end of the main document ',
identifier='123(A)'),
call(title='Introduction', basefile='123/a',
uri='http://example.org/base/123/a#S1', repo='base',
text='This is part of document-part section 1 ',
identifier='123(A)\xb61'), # \xb6 = Pilcrow
call(title='Requirements Language', basefile='123/a',
uri='http://example.org/base/123/a#S1.1', repo='base',
text='This is the text in subsection 1.1 ',
identifier='123(A)\xb61.1'),
call(title='Definitions and Abbreviations', basefile='123/a',
uri='http://example.org/base/123/a#S2', repo='base',
text='This is the second main document part ',
identifier='123(A)\xb62')]
mock_method.assert_has_calls(calls)
test_rdf_xml = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:bibo="http://purl.org/ontology/bibo/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
>
<bibo:Document rdf:about="http://localhost:8000/res/base/root">
<dcterms:updates rdf:resource="http://localhost:8000/res/base/res-a"/>
<dcterms:references rdf:resource="http://localhost:8000/res/other/res-b"/>
<rdf:seeAlso rdf:resource="http://localhost:8000/somewhere/else"/>
</bibo:Document>
</rdf:RDF>"""
def test_relate_triples(self):
# dump known triples as rdf/xml (want) to self.repo.store.distilled_path
with self.repo.store.open_distilled('root', 'w') as fp:
fp.write(self.test_rdf_xml)
import ferenda.documentrepository
assert ferenda.documentrepository
# We mock out TripleStore to avoid creating an actual triplestore
with patch('ferenda.documentrepository.TripleStore') as mock:
self.repo.relate_triples("root")
self.assertTrue(mock.called) # ie a TripleStore class has been instantiated
# add_serialized is a new MagicMock object
add_serialized = self.repo._triplestore.add_serialized
self.assertTrue(add_serialized.called)
got = add_serialized.call_args[0][0]
format = add_serialized.call_args[1]['format']
self.assertEqual(self.test_rdf_xml,
got)
self.assertEqual("xml", format)
def test_relate_dependencies(self):
# 1. create two docrepos A (self.repo?) and B
class OtherRepo(DocumentRepository):
alias = "other"
# 2. create distilled for basefile 'root' in repo A that refers to
# 2.1. one resource res-a in repo A, and
# 2.2. another resource res-b in repo B
with self.repo.store.open_distilled('root', 'w') as fp:
fp.write(self.test_rdf_xml)
# 3. relate_dependencies on repo A for basefile root
otherrepo = OtherRepo(datadir=self.datadir)
repos = [self.repo,otherrepo]
self.repo.relate_dependencies("root", repos)
# 4. Assert that
# 4.1 self.repo.store.dependencies_path contains parsed_path('root')
dependencyfile = self.repo.store.parsed_path('root') + "\n"
self.assertEqual(util.readfile(self.repo.store.dependencies_path("res-a")),
dependencyfile)
# 4.2 otherrepo.store.dependencies_path contains parsed_path('root')
self.assertEqual(util.readfile(otherrepo.store.dependencies_path("res-b")),
dependencyfile)
# 4.3 no other deps files exists in datadir
self.assertEqual(2,
len(list(util.list_dirs(self.datadir, '.txt'))))
class Generate(RepoTester):
repo_a = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix : <http://example.org/repo/a/> .
:1 a :FooDoc;
dct:title "The title of Document A 1";
dct:identifier "A1" .
:1part a :DocumentPart;
dct:isPartOf :1;
dct:identifier "A1(part)" .
:2 a :FooDoc;
dct:title "The title of Document A 2";
dct:identifier "A2";
dct:references :1 .
:2part1 a :DocumentPart;
dct:isPartOf :2;
dct:identifier "A2(part1)";
dct:references :1 .
:2part2 a :DocumentPart;
dct:isPartOf :2;
dct:identifier "A2(part2)";
dct:references <http://example.org/repo/a/1part> .
:3 a :FooDoc;
dct:title "The title of Document A 3";
dct:identifier "A3" .
"""
repo_b = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix a: <http://example.org/repo/a/> .
@prefix : <http://example.org/repo/b/> .
:1 a :BarDoc;
dct:title "The title of Document B 1";
dct:identifier "B1";
dct:references a:1 .
:1part a a:DocumentPart;
dct:isPartOf :1;
dct:identifier "B1(part)";
dct:references a:1 .
:2 a :BarDoc;
dct:title "The title of Document B 2";
dct:identifier "B2" .
"""
# this is the graph we expect when querying for
# http://example.org/repo/a/1
annotations_a1 = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix : <http://example.org/repo/a/> .
@prefix b: <http://example.org/repo/b/> .
:1 a :FooDoc;
dct:title "The title of Document A 1";
dct:identifier "A1" ;
dct:isReferencedBy :2,
:2part1,
b:1,
b:1part .
:1part a :DocumentPart;
dct:isPartOf :1;
dct:identifier "A1(part)";
dct:isReferencedBy :2part2 .
:2 a :FooDoc;
dct:references :1;
dct:title "The title of Document A 2";
dct:identifier "A2" .
:2part1 a :DocumentPart;
dct:references :1;
dct:isPartOf :2;
dct:identifier "A2(part1)" .
:2part2 a :DocumentPart;
dct:references :1part;
dct:isPartOf :2;
dct:identifier "A2(part2)" .
b:1 a b:BarDoc;
dct:references :1;
dct:title "The title of Document B 1";
dct:identifier "B1" .
b:1part a :DocumentPart;
dct:isPartOf b:1;
dct:references :1;
dct:identifier "B1(part)" .
"""
annotations_b1 = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix a: <http://example.org/repo/a/> .
@prefix : <http://example.org/repo/b/> .
:1 a :BarDoc;
dct:isReferencedBy :1part;
dct:title "The title of Document B 1";
dct:identifier "B1";
dct:references a:1 .
:1part a a:DocumentPart;
dct:isPartOf :1;
dct:identifier "B1(part)";
dct:references a:1 .
"""
class TestRepo(DocumentRepository):
alias = "test"
def canonical_uri(self,basefile):
return "http://example.org/repo/a/%s" % basefile
def setUp(self):
self.datadir = tempfile.mkdtemp()
self.storetype = None
resources = self.datadir+os.sep+"rsrc"+os.sep+"resources.xml"
util.ensure_dir(resources)
shutil.copy2("%s/files/base/rsrc/resources.xml"%os.path.dirname(__file__),
resources)
def tearDown(self):
if self.storetype:
store = TripleStore(storetype=self.repo.config.storetype,
location=self.repo.config.storelocation,
repository=self.repo.config.storerepository)
store.clear()
if self.repo.config.storetype == "SLEEPYCAT":
store.graph.close()
shutil.rmtree(self.datadir)
def _load_store(self, repo):
store = TripleStore(storetype=repo.config.storetype,
location=repo.config.storelocation,
repository=repo.config.storerepository)
store.add_serialized(self.repo_a, format="turtle")
store.add_serialized(self.repo_b, format="turtle")
if repo.config.storetype == "SLEEPYCAT":
store.graph.close()
# return store
def _test_construct_annotations(self, repo):
want = rdflib.Graph()
want.parse(data=self.annotations_a1,format="turtle")
got = repo.construct_annotations("http://example.org/repo/a/1")
self.assertEqualGraphs(want, got, exact=True)
def _get_repo(self, storetype=None):
params = {'storetype':storetype,
'datadir':self.datadir,
'storerepository':'ferenda'}
self.storetype = None
if storetype == 'SQLITE':
params['storelocation'] = self.datadir+"/ferenda.sqlite"
elif storetype == 'SLEEPYCAT':
params['storelocation'] = self.datadir+"/ferenda.db"
elif storetype == 'FUSEKI':
params['storelocation'] = 'http://localhost:3030/'
params['storerepository'] = 'ds'
elif storetype == 'SESAME':
params['storelocation'] = 'http://localhost:8080/openrdf-sesame'
elif storetype == None:
del params['storetype']
del params['storerepository']
params['storelocation'] = None
else:
self.fail("Storetype %s not valid" % storetype)
return self.TestRepo(**params)
def test_construct_annotations_sqlite(self):
self.repo = self._get_repo('SQLITE')
self._load_store(self.repo)
self._test_construct_annotations(self.repo)
@unittest.skipIf('SKIP_SLEEPYCAT_TESTS' in os.environ,
"Skipping Sleepycat tests")
def test_construct_annotations_sleepycat(self):
self.repo = self._get_repo('SLEEPYCAT')
self._load_store(self.repo)
self._test_construct_annotations(self.repo)
@unittest.skipIf('SKIP_FUSEKI_TESTS' in os.environ,
"Skipping Fuseki tests")
def test_construct_annotations_fuseki(self):
self.repo = self._get_repo('FUSEKI')
self._load_store(self.repo)
self._test_construct_annotations(self.repo)
@unittest.skipIf('SKIP_SESAME_TESTS' in os.environ,
"Skipping Sesame tests")
def test_construct_annotations_sesame(self):
self.repo = self._get_repo('SESAME')
self._load_store(self.repo)
self._test_construct_annotations(self.repo)
def test_graph_to_annotation_file(self):
testgraph = rdflib.Graph()
testgraph.parse(data=self.annotations_b1,format="turtle")
testgraph.bind("a", rdflib.Namespace("http://example.org/repo/a/"))
testgraph.bind("b", rdflib.Namespace("http://example.org/repo/b/"))
testgraph.bind("dct", rdflib.Namespace("http://purl.org/dc/terms/"))
self.repo = self._get_repo()
annotations = self.repo.graph_to_annotation_file(testgraph)
self.maxDiff = None
want = """<graph xmlns:dct="http://purl.org/dc/terms/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:b="http://example.org/repo/b/"
xmlns:a="http://example.org/repo/a/">
<resource uri="http://example.org/repo/b/1">
<a><b:BarDoc/></a>
<dct:identifier>B1</dct:identifier>
<dct:isReferencedBy ref="http://example.org/repo/b/1part"/>
<dct:references ref="http://example.org/repo/a/1"/>
<dct:title>The title of Document B 1</dct:title>
</resource>
<resource uri="http://example.org/repo/b/1part">
<a><a:DocumentPart/></a>
<dct:identifier>B1(part)</dct:identifier>
<dct:isPartOf ref="http://example.org/repo/b/1"/>
<dct:references ref="http://example.org/repo/a/1"/>
</resource>
</graph>"""
self.assertEqualXML(want,annotations)
def _test_generated(self):
with self.repo.store.open_parsed("1", "w") as fp:
fp.write("""<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns:a="http://example.org/repo/a/" xmlns:b="http://example.org/repo/b/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:dct="http://purl.org/dc/terms/" xmlns="http://www.w3.org/1999/xhtml">
<head about="http://example.org/repo/a/1">
<link href="http://example.org/repo/a/FooDoc" rel="rdf:type"/>
<meta content="A1" property="dct:identifier"/>
<title property="dct:title" xml:lang="">The title of Document A 1</title>
</head>
<body about="http://example.org/repo/a/1">
<div><p>Main document text</p></div>
<div content="A1(part)" about="http://example.org/repo/a/1part" property="dct:identfier" typeof="a:DocumentPart">
<p>Document part text</p>
</div>
</body>
</html>""")
self.assertEqual("http://example.org/repo/a/1",
self.repo.canonical_uri("1"))
self.repo.generate("1")
# print("-----------------ANNOTATIONS--------------")
# with self.repo.store.open_annotation("1") as fp:
# print(fp.read())
# print("-----------------GENERATED RESULT--------------")
# with self.repo.store.open_generated("1") as fp:
# print(fp.read())
t = etree.parse(self.repo.store.generated_path("1"))
# find top node .annotations,
anode = t.find(".//aside[@class='annotations']")
annotations = anode.findall("a")
# confirm that exactly a:2, a:2#part1, b:1, b:1#part is there
self.assertEqual(4, len(annotations))
labels = set([a.text for a in annotations])
self.assertEqual(set(['B1(part)',
'A2(part1)',
'B1',
'A2']),
labels)
refs = set([a.get('href') for a in annotations])
self.assertEqual(set(['http://example.org/repo/b/1',
'http://example.org/repo/a/2',
'http://example.org/repo/b/1part',
'http://example.org/repo/a/2part1']),
refs)
anode = t.find(".//div[@about='http://example.org/repo/a/1part']/aside")
annotations = anode.findall("a")
self.assertEqual(1, len(annotations))
self.assertEqual('http://example.org/repo/a/2part2',
annotations[0].get('href'))
self.assertEqual('A2(part2)',
annotations[0].text)
@unittest.skipIf('SKIP_FUSEKI_TESTS' in os.environ,
"Skipping Fuseki tests")
def test_generate_fuseki(self):
self.repo = self._get_repo('FUSEKI')
self.store = self._load_store(self.repo)
self._test_generated()
@unittest.skipIf('SKIP_SESAME_TESTS' in os.environ,
"Skipping Sesame tests")
def test_generate_sesame(self):
self.repo = self._get_repo('SESAME')
self.store = self._load_store(self.repo)
self._test_generated()
@unittest.skipIf('SKIP_SLEEPYCAT_TESTS' in os.environ,
"Skipping Sleepycat tests")
def test_generate_sleepycat(self):
self.repo = self._get_repo('SLEEPYCAT')
self.store = self._load_store(self.repo)
self._test_generated()
def test_generate_sqlite(self):
self.repo = self._get_repo('SQLITE')
self.store = self._load_store(self.repo)
self._test_generated()
def test_flatten(self):
self.repo = self._get_repo()
# just make sure that the XSLT generation flattens out our
# nested structure so that every URI-named section is enclosed
# in a <div> just beneath the <article>
test = """<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>X</title>
</head>
<body about="http://example.org/a">
<div about="http://example.org/a#S1">
<p>First top-level section</p>
<div about="http://example.org/a#S1.1">
<p>First subsection</p>
<div>A non-about div</div>
<div about="http://example.org/a#S1.1.1">
<p>First subsubsection</p>
</div>
</div>
</div>
<div about="http://example.org/a#S2">
<p>Second top-level section</p>
<div about="http://example.org/a#S2.1">
<p>Second subsection</p>
</div>
</div>
</body>
</html>"""
with self.repo.store.open_parsed("a", mode="w") as fp:
fp.write(test)
self.repo.generate("a")
t = etree.parse(self.repo.store.generated_path("a"))
# assert that there are exactly 5 sections, corresponding to
# the 5 URI-named divs above
self.assertEqual(5, len(t.findall(".//article/div/section")))
class TOC(RepoTester):
# General datasets being reused in tests
books = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix bibo: <http://purl.org/ontology/bibo/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.org/books/> .
# From http://en.wikipedia.org/wiki/List_of_best-selling_books
ex:A_Tale_of_Two_Cities a bibo:Book;
dct:title "A Tale of Two Cities";
dct:creator "Charles Dickens";
dct:issued "1859-04-30"^^xsd:date;
dct:publisher "Chapman & Hall" .
ex:The_Lord_of_the_Rings a bibo:Book;
dct:title "The Lord of the Rings";
dct:creator "J. R. R. Tolkien";
dct:issued "1954-07-29"^^xsd:date;
dct:publisher "George Allen & Unwin" .
ex:The_Little_Prince a bibo:Book;
dct:title "The Little Prince";
dct:creator "Antoine de Saint-Exup\xe9ry";
dct:issued "1943-01-01"^^xsd:date;
dct:publisher "Reynal & Hitchcock" .
ex:The_Hobbit a bibo:Book;
dct:title "The Hobbit";
dct:creator "J. R. R. Tolkien";
dct:issued "1937-09-21"^^xsd:date;
dct:publisher "George Allen & Unwin" .
ex:Dream_of_the_Red_Chamber a bibo:Book;
dct:title "Dream of the Red Chamber";
dct:creator "Cao Xueqin";
dct:issued "1791-01-01"^^xsd:date;
dct:publisher "Cheng Weiyuan & Gao E" .
ex:And_Then_There_Were_None a bibo:Book;
dct:title "And Then There Were None";
dct:creator "Agatha Christie";
dct:issued "1939-11-06"^^xsd:date;
dct:publisher "Collins Crime Club" .
"""
articles = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix bibo: <http://purl.org/ontology/bibo/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.org/articles/> .
# http://www.the-scientist.com/?articles.view/articleNo/9678/title/The-4-Most-Cited-Papers--Magic-In-These-Methods/
ex:pm14907713 a bibo:AcademicArticle;
dct:title "Protein measurement with the Folin phenol reagent";
dct:creator "Oliver H. Lowry",
"Nira J. Rosenbrough",
"A. Lewis Farr",
"R.J. Randall";
dct:issued "1951-11-01"^^xsd:date;
dct:publisher "Journal of Biological Chemistry" .
ex:pm5432063 a bibo:AcademicArticle;
dct:title "Cleavage of structural proteins during the assembly of the head of bacteriophage T4";
dct:creator "Ulrich Karl Laemmli";
dct:issued "1970-08-15"^^xsd:date;
dct:publisher "Nature" .
ex:pm5806584 a bibo:AcademicArticle;
dct:title "Reliability of molecular weight determinations by dodecyl sulfate-polyacrylamide gel electrophoresis";
dct:creator "K. Weber",
"M. Osborn";
dct:issued "1969-08-25"^^xsd:date;
dct:publisher "Journal of Biological Chemistry" .
ex:pm942051 a bibo:AcademicArticle;
dct:title "A rapid and sensitive method for the quantitation of microgram quantities of protein utilizing the principle of protein dye-binding";
dct:creator "Marion M. Bradford";
dct:issued "1976-05-07"^^xsd:date;
dct:publisher "Analytical Biochemistry" .
"""
results1 = [{'uri':'http://example.org/books/A_Tale_of_Two_Cities',
'title': 'A Tale of Two Cities',
'issued': '1859-04-30'},
{'uri':'http://example.org/books/The_Lord_of_the_Rings',
'title': 'The Lord of the Rings',
'issued': '1954-07-29'},
{'uri':'http://example.org/books/The_Little_Prince',
'title': 'The Little Prince',
'issued': '1943-01-01'},
{'uri':'http://example.org/books/The_Hobbit',
'title': 'The Hobbit',
'issued': '1937-09-21'},
{'uri':'http://example.org/books/Dream_of_the_Red_Chamber',
'title': 'Dream of the Red Chamber',
'issued': '1791-01-01'},
{'uri':'http://example.org/books/And_Then_There_Were_None',
'title': 'And Then There Were None',
'issued': '1939-11-06'}]
results2 = [{'uri':'http://example.org/articles/pm14907713',
'title': 'Protein measurement with the Folin phenol reagent',
'issued': '1951-11-01'},
{'uri':'http://example.org/articles/pm5432063',
'title': 'Cleavage of structural proteins during the assembly of the head of bacteriophage T4',
'issued': '1970-08-15'},
{'uri':'http://example.org/articles/pm5806584',
'title': 'Reliability of molecular weight determinations by dodecyl sulfate-polyacrylamide gel electrophoresis',
'issued': '1969-08-25'},
{'uri':'http://example.org/articles/pm942051',
'title': 'A rapid and sensitive method for the quantitation of microgram quantities of protein utilizing the principle of protein dye-binding',
'issued': '1976-05-07'}]
pagesets = [TocPageset('Sorted by title',[
TocPage('a','Documents starting with "a"','title/a'),
TocPage('d','Documents starting with "d"','title/d'),
TocPage('h','Documents starting with "h"','title/h'),
TocPage('l','Documents starting with "l"','title/l')
]),
TocPageset('Sorted by publication year',[
TocPage('1791','Documents published in 1791','issued/1791'),
TocPage('1859','Documents published in 1859','issued/1859'),
TocPage('1937','Documents published in 1937','issued/1937'),
TocPage('1939','Documents published in 1939','issued/1939'),
TocPage('1943','Documents published in 1943','issued/1943'),
TocPage('1954','Documents published in 1954','issued/1954')
])]
documentlists = {
'issued/1791': [[Link("Dream of the Red Chamber",uri='http://example.org/books/Dream_of_the_Red_Chamber')]],
'issued/1859': [[Link("A Tale of Two Cities",uri='http://example.org/books/A_Tale_of_Two_Cities')]],
'issued/1937': [[Link("The Hobbit",uri='http://example.org/books/The_Hobbit')]],
'issued/1939': [[Link("And Then There Were None",uri='http://example.org/books/And_Then_There_Were_None')]],
'issued/1943': [[Link("The Little Prince",uri='http://example.org/books/The_Little_Prince')]],
'issued/1954': [[Link("The Lord of the Rings",uri='http://example.org/books/The_Lord_of_the_Rings')]],
'title/a': [[Link("And Then There Were None",uri='http://example.org/books/And_Then_There_Were_None')],
[Link("A Tale of Two Cities",uri='http://example.org/books/A_Tale_of_Two_Cities')]],
'title/d': [[Link("Dream of the Red Chamber",uri='http://example.org/books/Dream_of_the_Red_Chamber')]],
'title/h': [[Link("The Hobbit",uri='http://example.org/books/The_Hobbit')]],
'title/l': [[Link("The Little Prince",uri='http://example.org/books/The_Little_Prince')],
[Link("The Lord of the Rings",uri='http://example.org/books/The_Lord_of_the_Rings')]]
}
criteria = [TocCriteria(binding='title',
label='Sorted by title',
pagetitle='Documents starting with "%s"',
selector = lambda x: x['title'][4].lower() if x['title'].lower().startswith("the ") else x['title'][0].lower(),
key = lambda x: "".join((x['title'][4:] if x['title'].lower().startswith("the ") else x['title']).lower().split())),
TocCriteria(binding='issued',
label='Sorted by publication year',
pagetitle='Documents published in %s',
selector=lambda x: x['issued'][:4],
key=lambda x: x['issued'][:4])]
def setUp(self):
super(TOC, self).setUp()
resources = self.datadir+os.sep+"rsrc"+os.sep+"resources.xml"
util.ensure_dir(resources)
shutil.copy2("%s/files/base/rsrc/resources.xml"%os.path.dirname(__file__),
resources)
# (set up a triple store) and fill it with appropriate data
d = DocumentRepository()
defaults = d.get_default_options()
# FIXME: We really need to subclass at least the toc_select
# test to handle the four different possible storetypes. For
# now we go with the default type (SQLITE, guaranteed to
# always work) but the non-rdflib backends use different code
# paths.
self.store = TripleStore(storetype=defaults['storetype'],
location=self.datadir+os.sep+"test.sqlite",
repository=defaults['storerepository'])
self.store.clear()
self.store.context = "http://example.org/ctx/base"
self.store.add_serialized(self.books,format="turtle")
self.store.context = "http://example.org/ctx/other"
self.store.add_serialized(self.articles,format="turtle")
def tearDown(self):
# clear triplestore
self.store.context = None
self.store.clear()
del self.store
super(TOC, self).tearDown()
def test_toc_select(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL',
storelocation=self.datadir+os.sep+"test.sqlite")
# make sure only one named graph, not entire store, gets searched
got = d.toc_select("http://example.org/ctx/base")
self.assertEqual(len(got),6)
want = self.results1
for row in want:
self.assertIn(row, got)
got = d.toc_select("http://example.org/ctx/other")
self.assertEqual(len(got),4)
want2 = self.results2
for row in want2:
self.assertIn(row, got)
got = d.toc_select()
self.assertEqual(len(got),10)
want3 = want+want2
for row in want3:
self.assertIn(row, got)
# toc_query is tested by test_toc_select
def test_toc_criteria(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL')
dct = d.ns['dct']
want = self.criteria
got = d.toc_criteria([dct.title, dct.issued])
self.assertEqual(len(want), len(got))
self.assertEqual(want[0].binding, got[0].binding)
self.assertEqual(want[0].label, got[0].label)
self.assertEqual(want[0].pagetitle, got[0].pagetitle)
testdict = {'title': 'The data'}
self.assertEqual(want[0].selector(testdict), got[0].selector(testdict))
self.assertEqual('d', got[0].selector(testdict))
self.assertEqual(want[1].binding, got[1].binding)
self.assertEqual(want[1].label, got[1].label)
self.assertEqual(want[1].pagetitle, got[1].pagetitle)
testdict = {'issued': '2009-01-01'}
self.assertEqual(want[1].selector(testdict), got[1].selector(testdict))
# toc_selector is tested by test_toc_criteria
def test_toc_pagesets(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL')
data = self.results1
got = d.toc_pagesets(data, self.criteria)
want = self.pagesets
self.assertEqual(len(got), 2)
self.assertEqual(got[0].label, want[0].label)
self.assertEqual(got[0].pages[0], want[0].pages[0])
self.assertEqual(got[0], want[0])
self.assertEqual(got[1], want[1])
def test_select_for_pages(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL')
got = d.toc_select_for_pages(self.results1, self.pagesets, self.criteria)
want = self.documentlists
self.maxDiff = None
self.assertEqual(got, want)
def test_generate_page(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL')
path = d.toc_generate_page('title/a', self.documentlists['title/a'], self.pagesets)
# 1. first, test intermediate XHTML file
intermediate = path.replace(".html",".xhtml")
self.assertTrue(os.path.exists(intermediate))
#with open(intermediate) as fp:
# print(fp.read().decode('utf-8'))
#print("=" * 60)
t = etree.parse(intermediate)
xhtmlns = "{http://www.w3.org/1999/xhtml}"
# 1.1 Correct page title?
self.assertEqual(t.findtext(".//"+xhtmlns+"title"),
'Documents starting with "a"')
# 1.2 Correct navigation?
# @id='nav' -> @role='navigation' ?
navlinks = t.findall(".//"+xhtmlns+"ul[@role='navigation']//"+xhtmlns+"a")
self.assertEqual(len(navlinks), 9) # 10 pages in total, but current page isn't linked
self.assertEqual(navlinks[0].text, 'd')
self.assertEqual(navlinks[0].get("href"), 'd.html')
self.assertEqual(navlinks[3].get("href"), '../issued/1791.html')
# 1.3 Correct document list?
# @id='documentlist' => @role='main'
docs = t.findall(".//"+xhtmlns+"ul[@role='main']/"+xhtmlns+"li/"+xhtmlns+"a")
self.assertEqual(len(docs),2)
# "And..." should go before "A Tale..."
self.assertEqual(docs[0].text, 'And Then There Were None')
self.assertEqual(docs[0].attrib['href'], 'http://example.org/books/And_Then_There_Were_None')
# 2. secondly, test resulting HTML file
self.assertTrue(os.path.exists(path))
t = etree.parse(path)
#with open(path) as fp:
# print(fp.read().decode('utf-8'))
# Various other tests on a.html
# 2.1 CSS links, relativized correctly?
css = t.findall("head/link[@rel='stylesheet']")
self.assertEqual(len(css),4) # normalize, main, ferenda, and fonts.googleapis.com
if sys.version_info < (3, 2, 0): # renamed method in 3.2
self.assertRegexpMatches(css[0].get('href'), '^../../../rsrc/css')
else:
self.assertRegex(css[0].get('href'), '^../../../rsrc/css')
# 2.2 JS links, relativized correctly?
js = t.findall("head/script")
self.assertEqual(len(js),3) # jquery, modernizr and ferenda
if sys.version_info < (3, 2, 0): # renamed method in 3.2
self.assertRegexpMatches(js[0].get('src'), '^../../../rsrc/js')
else:
self.assertRegex(js[0].get('src'), '^../../../rsrc/js')
# 2.3 <nav id="toc"> correct (c.f 1.2)
navlinks = t.findall(".//nav[@id='toc']//li/a")
self.assertEqual(len(navlinks),9)
self.assertEqual(navlinks[0].get("href"), 'd.html')
self.assertEqual(navlinks[3].get("href"), '../issued/1791.html')
# 2.4 div[@class='main-container']/article (c.f 1.3)
docs = t.findall(".//ul[@role='main']/li/a")
self.assertEqual(len(docs),2)
# "And..." should go before "A Tale..."
self.assertEqual(docs[0].text, 'And Then There Were None')
self.assertEqual(docs[0].attrib['href'], 'http://example.org/books/And_Then_There_Were_None')
# 2.5 <h1 class="title"> correct?
header = t.find(".//header/h1")
self.assertEqual(header.text, 'testsite')
# 2.6 div[@class='main-container']/h1 correct?
header = t.find(".//div[@class='main-container']//h1")
self.assertEqual(header.text, 'Documents starting with "a"')
def test_generate_pages(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL')
paths = d.toc_generate_pages(self.documentlists,self.pagesets)
self.assertEqual(len(paths), 10)
#print("=============%s====================" % paths[0])
#with open(paths[0]) as fp:
# print(fp.read())
for path in paths:
self.assertTrue(os.path.exists(path))
def test_generate_first_page(self):
d = DocumentRepository(datadir=self.datadir,
loglevel='CRITICAL')
path = d.toc_generate_first_page(self.documentlists,self.pagesets)
self.assertEqual(path, self.p("base/toc/index.html"))
self.assertTrue(os.path.exists(path))
tree = etree.parse(path)
# check content of path, particularly that css/js refs
# and pageset links are correct. Also, that the selected
# indexpage is indeed the first (eg. issued/1791)
self.assertEqual("title/a.html",
tree.find(".//nav[@id='toc']").findall(".//a")[0].get("href"))
self.assertEqual("../../rsrc/css/normalize.css",
tree.find(".//link").get("href"))
self.assertEqual("Documents published in 1791",
tree.find(".//article/h1").text)
class News(RepoTester):
def setUp(self):
super(News, self).setUp()
# create a bunch of DocumentEntry objects and save them
basetime = datetime(2013,1,1,12,0)
for basefile in range(25):
v = {'id':self.repo.canonical_uri(basefile),
'title':"Doc #%s" % basefile}
de = DocumentEntry()
de.orig_created = basetime + timedelta(hours=basefile)
de.orig_updated = basetime + timedelta(hours=basefile,minutes=10)
de.orig_checked = basetime + timedelta(hours=basefile,minutes=20)
de.published = basetime + timedelta(hours=basefile,minutes=30)
de.updated = basetime + timedelta(hours=basefile,minutes=40)
de.orig_url = "http://source.example.org/doc/%s" % basefile
de.save(self.repo.store.documententry_path(str(basefile)))
g = rdflib.Graph()
desc = Describer(g,self.repo.canonical_uri(basefile))
dct = self.repo.ns['dct']
desc.value(dct.title,v['title'])
#if basefile % 10 == 0:
# desc.value(dct.abstract,"This is a longer summary of document %s" % basefile)
util.ensure_dir(self.repo.store.distilled_path(str(basefile)))
with open(self.repo.store.distilled_path(str(basefile)), "wb") as fp:
g.serialize(fp, format="pretty-xml")
util.ensure_dir(self.repo.store.parsed_path(str(basefile)))
with open(self.repo.store.parsed_path(str(basefile)), "w") as fp:
fp.write("""<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head about="%(id)s">
<title>%(title)s</title>
</head>
<body about="%(id)s">
<h1>%(title)s</h1>
</body>
</html>""" % v)
util.ensure_dir(self.repo.store.generated_path(str(basefile)))
with open(self.repo.store.generated_path(str(basefile)), "w") as fp:
fp.write("""<!DOCTYPE html>
<html>
<head>
<title>%(title)s</title>
</head>
<body>
<h1>%(title)s</h1>
</body>
</html>""" % v)
def test_criteria(self):
criteria = self.repo.news_criteria()
self.assertEqual(len(criteria),1)
self.assertEqual(criteria[0].basefile, "main")
self.assertEqual(criteria[0].feedtitle, "New and updated documents")
fakeentry = Mock()
fakeentry.updated = datetime(2013,3,12,11,52)
self.assertEqual(criteria[0].key(fakeentry), datetime(2013,3,12,11,52))
self.assertTrue(criteria[0].selector(fakeentry))
def test_entries(self):
unsorted_entries = self.repo.news_entries() # not guaranteed particular order
# sort so that most recently updated first
entries = sorted(list(unsorted_entries),
key=attrgetter('updated'), reverse=True)
self.assertEqual(len(entries),25)
self.assertEqual(entries[0].title, "Doc #24")
self.assertEqual(entries[-1].title, "Doc #0")
def test_write_atom(self):
self.maxDiff = None
unsorted_entries = self.repo.news_entries() # not guaranteed
# particular order sort so that most recently updated first
# (simplified ver of what news() does)
entries = sorted(list(unsorted_entries),
key=attrgetter('updated'), reverse=True)
paths = self.repo.news_write_atom(entries, 'New and updated documents', 'main',
archivesize=6)
d = self.datadir
want = [self.p('%s/base/feed/main.atom'%d,False),
self.p('%s/base/feed/main-archive-1.atom'%d,False),
self.p('%s/base/feed/main-archive-2.atom'%d,False),
self.p('%s/base/feed/main-archive-3.atom'%d, False)]
self.assertEqual(paths, want)
tree = etree.parse('%s/base/feed/main.atom'%d)
NS = "{http://www.w3.org/2005/Atom}"
# main-archive-1 0-5
# main-archive-2 6-11
# main-archive-3 12-17
# main 18-24
# assert that prev-archive points to main-archive-3.atom
prev_archive = tree.find(NS+"link[@rel='prev-archive']")
self.assertEqual(prev_archive.get("href"), "main-archive-3.atom")
# assert that title is 'New and updated documents'
self.assertEqual(tree.find(NS+"title").text, "New and updated documents")
# assert that entries 18-24 is in main feed
entries = tree.findall(NS+"entry")
self.assertEqual(len(entries),7)
basedate = datetime(2013,1,1,12,0)
# assert that first entry is doc #24, has correct <id>,
# <updated>, <published>, <title>, <content src> <link href>
self._check_entry(entries[0],
entryid="http://localhost:8000/res/base/24",
published=basedate + timedelta(hours=24,minutes=30),
updated=basedate + timedelta(hours=24,minutes=40),
title='Doc #24',
contentsrc='../parsed/24.xhtml',
linksrc='../distilled/24.rdf')
# same for last entry (doc #18)
self._check_entry(entries[-1],
entryid="http://localhost:8000/res/base/18",
published=basedate + timedelta(hours=18,minutes=30),
updated=basedate + timedelta(hours=18,minutes=40),
title='Doc #18',
contentsrc='../parsed/18.xhtml',
linksrc='../distilled/18.rdf')
# open archive-3, assert 6 entries,
# prev-archive=main-archive-2, next-archive=main.atom
tree = etree.parse('%s/base/feed/main-archive-3.atom'%d)
self.assertEqual(len(tree.findall(NS+"entry")),6)
self.assertEqual(tree.find(NS+"link[@rel='prev-archive']").get("href"),
"main-archive-2.atom")
self.assertEqual(tree.find(NS+"link[@rel='next-archive']").get("href"),
"main.atom")
# open archive-2, assert 6 entries,
# prev-archive=main-archive-1, next-archive=main-archive-3
tree = etree.parse('%s/base/feed/main-archive-2.atom'%d)
self.assertEqual(len(tree.findall(NS+"entry")),6)
self.assertEqual(tree.find(NS+"link[@rel='prev-archive']").get("href"),
"main-archive-1.atom")
self.assertEqual(tree.find(NS+"link[@rel='next-archive']").get("href"),
"main-archive-3.atom")
# open archive-1, assert 6 entries, no
# prev-archive, next-archive=main-archive-2
tree = etree.parse('%s/base/feed/main-archive-1.atom'%d)
self.assertEqual(len(tree.findall(NS+"entry")),6)
self.assertIsNone(tree.find(NS+"link[@rel='prev-archive']"))
self.assertEqual(tree.find(NS+"link[@rel='next-archive']").get("href"),
"main-archive-2.atom")
def _check_entry(self, entry, entryid, title, published, updated, contentsrc, linksrc):
NS = "{http://www.w3.org/2005/Atom}"
self.assertEqual(entry.find(NS+"id").text,entryid)
self.assertEqual(entry.find(NS+"title").text,title)
self.assertEqual(entry.find(NS+"published").text,
util.rfc_3339_timestamp(published))
self.assertEqual(entry.find(NS+"updated").text,
util.rfc_3339_timestamp(updated))
content = entry.find(NS+"content")
self.assertEqual(content.get("src"), contentsrc)
self.assertEqual(content.get("type"), 'application/html+xml')
link = entry.find(NS+"link[@rel='alternate']")
self.assertEqual(link.get("href"), linksrc)
self.assertEqual(link.get("type"),'application/rdf+xml')
class Storage(RepoTester):
def test_list_basefiles_file(self):
files = ["base/downloaded/123/a.html",
"base/downloaded/123/b.html",
"base/downloaded/124/a.html",
"base/downloaded/124/b.html"]
basefiles = ["124/b", "124/a", "123/b", "123/a"]
for f in files:
util.writefile(self.p(f),"Nonempty")
self.assertEqual(list(self.repo.list_basefiles_for("parse")),
basefiles)
def test_list_basefiles_dir(self):
files = ["base/downloaded/123/a/index.html",
"base/downloaded/123/b/index.html",
"base/downloaded/124/a/index.html",
"base/downloaded/124/b/index.html"]
basefiles = ["124/b", "124/a", "123/b", "123/a"]
self.repo.storage_policy = "dir"
self.repo.store.storage_policy = "dir"
for f in files:
util.writefile(self.p(f),"nonempty")
self.assertEqual(list(self.repo.list_basefiles_for("parse")),
basefiles)
class Archive(RepoTester):
url_location = None
def test_archive(self):
# create an existing thing
util.writefile(self.repo.store.downloaded_path("123/a"),
"This is the original document, downloaded")
util.writefile(self.repo.store.parsed_path("123/a"),
"This is the original document, parsed")
util.writefile(self.repo.store.distilled_path("123/a"),
"This is the original document, distilled")
util.writefile(self.repo.store.generated_path("123/a"),
"This is the original document, generated")
# archive it
version = self.repo.get_archive_version("123/a")
self.repo.store.archive("123/a",version)
self.assertEqual(version, "1") # what algorithm do the default use? len(self.archived_versions)?
eq = self.assertEqual
# make sure archived files ended up in the right places
eq(util.readfile(self.repo.store.downloaded_path("123/a", version="1")),
"This is the original document, downloaded")
eq(util.readfile(self.repo.store.parsed_path("123/a", version="1")),
"This is the original document, parsed")
eq(util.readfile(self.repo.store.distilled_path("123/a", version="1")),
"This is the original document, distilled")
eq(util.readfile(self.repo.store.generated_path("123/a", version="1")),
"This is the original document, generated")
# and that no files exists in the current directories
self.assertFalse(os.path.exists(self.repo.store.downloaded_path("123/a")))
self.assertFalse(os.path.exists(self.repo.store.parsed_path("123/a")))
self.assertFalse(os.path.exists(self.repo.store.distilled_path("123/a")))
self.assertFalse(os.path.exists(self.repo.store.generated_path("123/a")))
def test_download_and_archive(self):
# print("test_download_and_archive: cwd", os.getcwd())
def my_get(url,**kwargs):
res = Mock()
with open(self.url_location,"rb") as fp:
res.content = fp.read()
res.headers = collections.defaultdict(lambda:None)
res.headers['X-These-Headers-Are'] = 'Faked'
res.status_code = 200
return res
with patch('requests.get',side_effect = my_get) as mock_get:
self.url_location = "test/files/base/downloaded/123/a-version1.htm"
self.assertTrue(self.repo.download_single("123/a"))
self.url_location = "test/files/base/downloaded/123/a-version2.htm"
self.assertTrue(self.repo.download_single("123/a"))
eq = self.assertEqual
eq(util.readfile(self.p("base/downloaded/123/a.html")),
util.readfile("test/files/base/downloaded/123/a-version2.htm"))
eq(util.readfile(self.p("base/archive/downloaded/123/a/1.html")),
util.readfile("test/files/base/downloaded/123/a-version1.htm"))
def test_list_versions_complex(self):
util.writefile(self.repo.store.downloaded_path("123/a"),
"This is the first version")
util.writefile(self.repo.store.parsed_path("123/a"),
"This is the first version (parsed)")
util.writefile(self.repo.store.generated_path("123/a"),
"This is the first version (generated)")
version = self.repo.get_archive_version("123/a")
self.repo.store.archive("123/a",version)
self.assertEqual(version, "1")
util.writefile(self.repo.store.downloaded_path("123/a"),
"This is the second version")
util.writefile(self.repo.store.parsed_path("123/a"),
"This is the second version (parsed)")
version = self.repo.get_archive_version("123/a")
self.repo.store.archive("123/a",version)
self.assertEqual(version, "2")
util.writefile(self.repo.store.downloaded_path("123/a"),
"This is the third version")
version = self.repo.get_archive_version("123/a")
self.repo.store.archive("123/a",version)
self.assertEqual(version, "3")
util.writefile(self.repo.store.generated_path("123/a"),
"This is the fourth version (generated ONLY)")
version = self.repo.get_archive_version("123/a")
self.repo.store.archive("123/a",version)
self.assertEqual(version, "4")
self.assertEqual(sorted(os.listdir(self.p("base/archive/downloaded/123/a/"))),
['1.html', '2.html', '3.html'])
self.assertEqual(sorted(os.listdir(self.p("base/archive/parsed/123/a/"))),
['1.xhtml', '2.xhtml'])
self.assertEqual(sorted(os.listdir(self.p("/base/archive/generated/123/a/"))),
['1.html', '4.html'])
self.assertEqual(list(self.repo.store.list_versions("123/a")),
['1','2','3', '4'])
util.writefile(self.repo.store.downloaded_path("123"),
"This is the first version")
version = self.repo.get_archive_version("123")
self.repo.store.archive("123", version)
self.assertEqual(version, "1")
self.assertEqual(list(self.repo.store.list_versions("123")),
['1'])
self.assertEqual(list(self.repo.store.list_versions("123/a")),
['1','2','3', '4'])
class Patch(RepoTester):
sourcedoc = """<body>
<h1>Basic document</h1>
<p>
This is some unchanged text.
1: And some more again
2: And some more again
3: And some more again
4: And some more again
(to make sure we use two separate hunks)
This is text that will be changed.
</p>
</body>
"""
targetdoc = """<body>
<h1>Patched document</h1>
<p>
This is some unchanged text.
1: And some more again
2: And some more again
3: And some more again
4: And some more again
(to make sure we use two separate hunks)
This is text that has changed.
</p>
</body>
"""
def setUp(self):
super(Patch, self).setUp()
self.repo.config.patchdir = self.datadir
self.patchstore = self.repo.documentstore_class(self.repo.config.patchdir + os.sep + self.repo.alias)
def test_successful_patch(self):
# Note that this patch's "fromfile" and "tofile" fields
# doesn't match any actual file (and that there really isn't
# any file stored on disk)
patchpath = self.patchstore.path("123/a", "patches", ".patch")
util.ensure_dir(patchpath)
with open(patchpath, "w") as fp:
fp.write("""--- basic.txt 2013-06-13 09:16:37.000000000 +0200
+++ changed.txt 2013-06-13 09:16:39.000000000 +0200
@@ -1,5 +1,5 @@ Editorial edit
<body>
- <h1>Basic document</h1>
+ <h1>Patched document</h1>
<p>
This is some unchanged text.
1: And some more again
@@ -7,6 +7,6 @@
3: And some more again
4: And some more again
(to make sure we use two separate hunks)
- This is text that will be changed.
+ This is text that has changed.
</p>
</body>
""")
result, desc = self.repo.patch_if_needed("123/a", self.sourcedoc)
self.assertEqual("Editorial edit", desc)
self.assertEqual(self.targetdoc, result)
def test_failed_patch(self):
with self.patchstore.open("123/a", "patches", ".patch", "w") as fp:
fp.write("""--- basic.txt 2013-06-13 09:16:37.000000000 +0200
+++ changed.txt 2013-06-13 09:16:39.000000000 +0200
@@ -1,5 +1,5 @@ This patch assumes that sourcedoc looks different
<body>
- <h1>Unpatched document</h1>
+ <h1>Patched document</h1>
<p>
This is some unchanged text.
1: And some more again
@@ -7,6 +7,6 @@
3: And some more again
4: And some more again
(to make sure we use two separate hunks)
- This is text that will be changed.
+ This is text that has changed.
</p>
</body>
""")
with self.assertRaises(PatchError):
result, desc = self.repo.patch_if_needed("123/a", self.sourcedoc)
def test_no_patch(self):
result, desc = self.repo.patch_if_needed("123/a", self.sourcedoc)
self.assertEqual(None, desc)
self.assertEqual(self.sourcedoc, result)
# Add doctests in the module
from ferenda import documentrepository
def load_tests(loader,tests,ignore):
tests.addTests(doctest.DocTestSuite(documentrepository))
return tests
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
from datetime import datetime
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import six
from ferenda import util
from ferenda.layeredconfig import LayeredConfig
class Main(unittest.TestCase):
def setUp(self):
with open("ferenda.ini","w") as fp:
fp.write("""
[__root__]
datadir = mydata
processes = 4
loglevel = INFO
forceparse = True
jsfiles = ['default.js','modernizr.js']
[mymodule]
loglevel = DEBUG
forceparse=False
jsfiles = ['pdfviewer.js','zepto.js']
lastrun = 2012-09-18 15:41:00
""")
def tearDown(self):
if os.path.exists("ferenda.ini"):
os.unlink("ferenda.ini")
def test_defaults(self):
cfg = LayeredConfig(defaults={'datadir':'mydata',
'processes':4,
'loglevel':'INFO',
'forceparse':True,
'jsfiles':['default.js','modernizr.js']
})
self.assertEqual(cfg.datadir,'mydata')
self.assertIs(type(cfg.datadir),six.text_type)
self.assertEqual(cfg.processes,4)
self.assertIs(type(cfg.processes),int)
self.assertEqual(cfg.loglevel,'INFO')
self.assertIs(type(cfg.loglevel),six.text_type)
self.assertEqual(cfg.forceparse,True)
self.assertIs(type(cfg.forceparse),bool)
self.assertEqual(cfg.jsfiles,['default.js','modernizr.js'])
self.assertIs(type(cfg.jsfiles),list)
def test_defaults_subsections(self):
# this tests the following datatypes:
# str, int, bool, list, datetime -- should cover most cases
cfg = LayeredConfig(defaults={'datadir':'mydata',
'processes':4,
'loglevel':'INFO',
'forceparse':True,
'jsfiles':['default.js','modernizr.js'],
'mymodule':{'loglevel':'DEBUG',
'forceparse':False,
'jsfiles':['pdfviewer.js','zepto.js'],
'lastrun':datetime(2012,9,18,15,41,0),
'arbitrary': {
'nesting': {
'depth':'works'
}
}
}
})
self.assertEqual(cfg.datadir,'mydata')
with self.assertRaises(AttributeError):
cfg.mymodule.datadir
self.assertEqual(cfg.processes,4)
with self.assertRaises(AttributeError):
cfg.mymodule.processes
self.assertEqual(cfg.loglevel,'INFO')
self.assertEqual(cfg.mymodule.loglevel,'DEBUG')
self.assertEqual(cfg.forceparse,True)
self.assertEqual(cfg.mymodule.forceparse,False)
self.assertEqual(cfg.jsfiles,['default.js','modernizr.js'])
self.assertEqual(cfg.mymodule.jsfiles,['pdfviewer.js','zepto.js'])
self.assertEqual(cfg.mymodule.arbitrary.nesting.depth, 'works')
with self.assertRaises(AttributeError):
cfg.lastrun
self.assertEqual(cfg.mymodule.lastrun,datetime(2012,9,18,15,41,0))
def test_inifile(self):
with open("ferenda.ini","w") as fp:
fp.write("""
[__root__]
datadir = mydata
processes = 4
forceparse = True
jsfiles = ['default.js','modernizr.js']
""")
cfg = LayeredConfig(inifile="ferenda.ini")
self.assertEqual(cfg.datadir,'mydata')
self.assertIs(type(cfg.datadir),str)
self.assertEqual(cfg.processes,'4')
self.assertIs(type(cfg.processes),str)
self.assertEqual(cfg.forceparse,'True')
self.assertIs(type(cfg.forceparse),str)
self.assertEqual(cfg.jsfiles,"['default.js','modernizr.js']")
self.assertIs(type(cfg.jsfiles),str)
def test_inifile_subsections(self):
cfg = LayeredConfig(inifile="ferenda.ini")
self.assertEqual(cfg.datadir,'mydata')
with self.assertRaises(AttributeError):
cfg.mymodule.datadir
self.assertEqual(cfg.processes,'4')
with self.assertRaises(AttributeError):
cfg.mymodule.processes
self.assertEqual(cfg.loglevel,'INFO')
self.assertEqual(cfg.mymodule.loglevel,'DEBUG')
self.assertEqual(cfg.forceparse,'True')
self.assertEqual(cfg.mymodule.forceparse,'False')
self.assertEqual(cfg.jsfiles,"['default.js','modernizr.js']")
self.assertEqual(cfg.mymodule.jsfiles,"['pdfviewer.js','zepto.js']")
with self.assertRaises(AttributeError):
cfg.lastrun
self.assertEqual(cfg.mymodule.lastrun,"2012-09-18 15:41:00")
def test_commandline(self):
cmdline = ['--datadir=mydata',
'--processes=4',
'--loglevel=INFO',
'--forceparse=True', # results in string, not bool - compare to --implicitboolean below
'--jsfiles=default.js',
'--jsfiles=modernizr.js',
'--implicitboolean']
cfg = LayeredConfig(commandline=cmdline)
self.assertEqual(cfg.datadir,'mydata')
self.assertIs(type(cfg.datadir),str)
self.assertEqual(cfg.processes,'4')
self.assertIs(type(cfg.processes),str)
self.assertEqual(cfg.forceparse,'True')
self.assertIs(type(cfg.forceparse),str)
self.assertEqual(cfg.jsfiles,['default.js','modernizr.js'])
self.assertIs(type(cfg.jsfiles),list)
self.assertTrue(cfg.implicitboolean)
self.assertIs(type(cfg.implicitboolean),bool)
def test_commandline_subsections(self):
cmdline = ['--datadir=mydata',
'--processes=4',
'--loglevel=INFO',
'--forceparse=True',
'--jsfiles=default.js',
'--jsfiles=modernizr.js',
'--mymodule-loglevel=DEBUG',
'--mymodule-forceparse=False',
'--mymodule-jsfiles=pdfviewer.js',
'--mymodule-jsfiles=zepto.js',
'--mymodule-lastrun=2012-09-18 15:41:00',
'--mymodule-arbitrary-nesting-depth=works']
cfg = LayeredConfig(commandline=cmdline)
self.assertEqual(cfg.datadir,'mydata')
with self.assertRaises(AttributeError):
cfg.mymodule.datadir
self.assertEqual(cfg.processes,'4')
with self.assertRaises(AttributeError):
cfg.mymodule.processes
self.assertEqual(cfg.loglevel,'INFO')
self.assertEqual(cfg.mymodule.loglevel,'DEBUG')
self.assertEqual(cfg.forceparse,'True')
self.assertEqual(cfg.mymodule.forceparse,'False')
self.assertEqual(cfg.jsfiles,['default.js','modernizr.js'])
self.assertEqual(cfg.mymodule.jsfiles,['pdfviewer.js','zepto.js'])
self.assertEqual(cfg.mymodule.arbitrary.nesting.depth, 'works')
with self.assertRaises(AttributeError):
cfg.lastrun
self.assertEqual(cfg.mymodule.lastrun,"2012-09-18 15:41:00")
def test_typed_inifile(self):
types = {'datadir':str,
'processes':int,
'forceparse':bool,
'jsfiles':list,
'mymodule':{'forceparse':bool,
'lastrun':datetime}}
cfg = LayeredConfig(defaults=types,inifile="ferenda.ini")
# cfg = LayeredConfig(inifile="ferenda.ini")
self.assertEqual(cfg.datadir,'mydata')
self.assertIs(type(cfg.datadir),str)
self.assertEqual(cfg.processes,4)
self.assertIs(type(cfg.processes),int)
self.assertEqual(cfg.forceparse,True)
self.assertIs(type(cfg.forceparse),bool)
self.assertEqual(cfg.jsfiles,['default.js','modernizr.js'])
self.assertIs(type(cfg.jsfiles),list)
self.assertEqual(cfg.mymodule.forceparse,False)
self.assertIs(type(cfg.mymodule.forceparse),bool)
self.assertEqual(cfg.mymodule.lastrun,datetime(2012,9,18,15,41,0))
self.assertIs(type(cfg.mymodule.lastrun),datetime)
def test_typed_commandline(self):
types = {'datadir':str,
'processes':int,
'forceparse':bool,
'jsfiles':list,
'mymodule':{'forceparse':bool,
'lastrun':datetime}
}
cmdline = ['--datadir=mydata',
'--processes=4',
'--forceparse=True',
'--jsfiles=default.js',
'--jsfiles=modernizr.js',
'--mymodule-forceparse=False',
'--mymodule-lastrun=2012-09-18 15:41:00']
cfg = LayeredConfig(defaults=types,commandline=cmdline)
self.assertEqual(cfg.datadir,'mydata')
self.assertIs(type(cfg.datadir),str)
self.assertEqual(cfg.processes,4)
self.assertIs(type(cfg.processes),int)
self.assertEqual(cfg.forceparse,True)
self.assertIs(type(cfg.forceparse),bool)
self.assertEqual(cfg.jsfiles,['default.js','modernizr.js'])
self.assertIs(type(cfg.jsfiles),list)
self.assertEqual(cfg.mymodule.forceparse,False)
self.assertIs(type(cfg.mymodule.forceparse),bool)
self.assertEqual(cfg.mymodule.lastrun,datetime(2012,9,18,15,41,0))
self.assertIs(type(cfg.mymodule.lastrun),datetime)
def test_layered(self):
defaults = {'loglevel':'ERROR'}
cmdline = ['--loglevel=DEBUG']
cfg = LayeredConfig(defaults=defaults)
self.assertEqual(cfg.loglevel, 'ERROR')
cfg = LayeredConfig(defaults=defaults,inifile="ferenda.ini")
self.assertEqual(cfg.loglevel, 'INFO')
cfg = LayeredConfig(defaults=defaults,inifile="ferenda.ini",commandline=cmdline)
self.assertEqual(cfg.loglevel, 'DEBUG')
def test_layered_subsections(self):
defaults = {'force':False,
'datadir':'thisdata',
'loglevel':'INFO'}
cmdline=['--mymodule-datadir=thatdata','--mymodule-force'] #
cfg = LayeredConfig(defaults=defaults,commandline=cmdline,cascade=True)
self.assertEqual(cfg.mymodule.force, True)
self.assertEqual(cfg.mymodule.datadir, 'thatdata')
self.assertEqual(cfg.mymodule.loglevel, 'INFO')
defaults = {'mymodule':defaults}
cmdline=['--datadir=thatdata','--force'] #
cfg = LayeredConfig(defaults=defaults,commandline=cmdline,cascade=True)
self.assertEqual(cfg.mymodule.force, True)
self.assertEqual(cfg.mymodule.datadir, 'thatdata')
self.assertEqual(cfg.mymodule.loglevel, 'INFO')
def test_modified(self):
defaults = {'lastdownload':None}
cfg = LayeredConfig(defaults=defaults)
now = datetime.now()
cfg.lastdownload = now
self.assertEqual(cfg.lastdownload,now)
def test_modified_subsections(self):
defaults = {'force':False,
'datadir':'thisdata',
'loglevel':'INFO'}
cmdline=['--mymodule-datadir=thatdata','--mymodule-force'] #
cfg = LayeredConfig(defaults=defaults,inifile="ferenda.ini",commandline=cmdline,cascade=True)
cfg.mymodule.loglevel = 'ERROR'
def test_write_configfile(self):
cfg = LayeredConfig(inifile="ferenda.ini")
cfg.mymodule.lastrun = datetime(2013,9,18,15,41,0)
cfg.write()
want = """[__root__]
datadir = mydata
processes = 4
loglevel = INFO
forceparse = True
jsfiles = ['default.js','modernizr.js']
[mymodule]
loglevel = DEBUG
forceparse = False
jsfiles = ['pdfviewer.js','zepto.js']
lastrun = 2013-09-18 15:41:00
"""
got = util.readfile("ferenda.ini")
#if not isinstance(got, six.text_type):
# got = got.decode("utf-8")
self.assertEqual(want,got)
def test_write_noconfigfile(self):
cfg = LayeredConfig(defaults={'lastrun': datetime(2012,9,18,15,41,0)})
cfg.lastrun = datetime(2013,9,18,15,41,0)
cfg.write()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os,sys
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import codecs
from ferenda.textreader import TextReader
PREFIX = os.path.dirname(__file__)+"/files/textreader"
class Basic(unittest.TestCase):
def setUp(self):
self.f = TextReader(PREFIX + "/LICENSE.txt",linesep=TextReader.UNIX)
def testReadline(self):
self.assertEqual(self.f.readline(),
'A. HISTORY OF THE SOFTWARE')
self.assertEqual(self.f.readline(),
'==========================')
self.f.seek(0)
def testIterateFile(self):
self.assertEqual(self.f.bof(), True)
self.assertEqual(self.f.eof(), False)
for line in self.f:
pass
self.assertEqual(self.f.bof(), False)
self.assertEqual(self.f.eof(), True)
self.f.seek(0)
def testReadparagraph(self):
l = self.f.readparagraph()
self.assertEqual(l, 'A. HISTORY OF THE SOFTWARE'+self.f.linesep+'==========================')
l = self.f.readparagraph()
self.assertEqual(l, 'Python was created in the early 1990s by Guido van Rossum at Stichting'+self.f.linesep+
'Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands'+self.f.linesep+
'as a successor of a language called ABC. Guido remains Python\'s'+self.f.linesep+
'principal author, although it includes many contributions from others.')
self.f.cuepast("to make these releases possible.") # next paragraph is separated by three newlines
t = self.f.readparagraph()[:23]
self.assertEqual(t,"B. TERMS AND CONDITIONS")
self.f.seek(0)
def testReadChunk(self):
l = self.f.readchunk('(')
l = self.f.readchunk(')')
self.assertEqual(l,'CWI, see http://www.cwi.nl')
self.f.seek(0)
def testPeekLine(self):
l = self.f.peekline()
self.assertEqual(l, 'A. HISTORY OF THE SOFTWARE')
l = self.f.peekline(4)
self.assertEqual(l, 'Python was created in the early 1990s by Guido van Rossum at Stichting')
self.f.seek(0)
def testPeekParagraph(self):
l = self.f.peekparagraph()
self.assertEqual(l, 'A. HISTORY OF THE SOFTWARE'+self.f.linesep+'==========================')
l = self.f.peekparagraph(2)
self.assertEqual(l, 'Python was created in the early 1990s by Guido van Rossum at Stichting'+self.f.linesep+
'Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands'+self.f.linesep+
'as a successor of a language called ABC. Guido remains Python\'s'+self.f.linesep+
'principal author, although it includes many contributions from others.')
self.f.seek(0)
def testPrevLine(self):
self.f.readparagraph()
self.f.readparagraph()
self.assertEqual(self.f.prevline(3), # first two newlines, then the actual previous line (does this make sense?)
'principal author, although it includes many contributions from others.')
self.assertEqual(self.f.prevline(6),
'Python was created in the early 1990s by Guido van Rossum at Stichting')
self.f.seek(0)
def testCue(self):
self.f.cue("Guido")
self.assertEqual(self.f.readline(),
'Guido van Rossum at Stichting')
self.f.seek(0)
def testCuePast(self):
self.f.cuepast("Guido")
self.assertEqual(self.f.readline(),
' van Rossum at Stichting')
self.f.seek(0)
def testReadTo(self):
self.assertEqual(self.f.readto("SOFTWARE"),
'A. HISTORY OF THE ')
# run all basic tests again, but this time initialised from a unicode buffer
class Ustring(Basic):
def setUp(self):
with codecs.open(PREFIX + "/LICENSE.txt",encoding='ascii') as fp:
data = fp.read()
self.f = TextReader(ustring=data,linesep=TextReader.UNIX)
class Codecs:
def testUTF(self):
f = TextReader(PREFIX + "/test/test_doctest4.txt", "utf-8")
f.cue("u'f")
self.assertEqual(f.read(5),
"u'f\u00f6\u00f6")
f.cue("u'b")
self.assertEqual(f.read(5),
"u'b\u0105r")
def testISO(self):
f = TextReader(PREFIX + "/test/test_shlex.py", "iso-8859-1")
f.cue(';|-|)|')
f.readline()
self.assertEqual(f.read(5),
"\u00e1\u00e9\u00ed\u00f3\u00fa")
def testKOI8(self):
f = TextReader(PREFIX + "/test/test_pep263.py", "koi8-r")
f.cue('u"')
self.assertEqual(f.read(7),
'u"\u041f\u0438\u0442\u043e\u043d')
class Processing(unittest.TestCase):
def setUp(self):
self.f = TextReader(PREFIX + "/LICENSE.txt",linesep=TextReader.UNIX)
def testStrip(self):
self.f.autostrip = True
self.assertEqual(self.f.peekline(28),
'Release Derived Year Owner GPL-')
self.f.autostrip = False
self.assertEqual(self.f.peekline(28),
' Release Derived Year Owner GPL-')
self.f.seek(0)
def testDewrap(self):
self.f.autodewrap = True
self.assertEqual(self.f.readparagraph(),
'A. HISTORY OF THE SOFTWARE ==========================')
self.f.seek(0)
self.f.autodewrap = False
self.assertEqual(self.f.readparagraph(),
'A. HISTORY OF THE SOFTWARE'+self.f.linesep+'==========================')
self.f.seek(0)
def testDehyphenate(self):
pass
def testExpandtabs(self):
pass
def testReadTable(self):
# Should this even be in the Processing test suite?
pass
class Customiterator(unittest.TestCase):
def setUp(self):
self.f = TextReader(PREFIX + "/LICENSE.txt",linesep=TextReader.UNIX)
def testIterateParagraph(self):
cnt = 0
for p in self.f.getiterator(self.f.readchunk,self.f.linesep*2):
cnt += 1
self.assertEqual(cnt, 44)
class Subreaders(unittest.TestCase):
def setUp(self):
self.f = TextReader(PREFIX + "/test_base64.py",linesep=TextReader.UNIX)
def testPage1(self):
p = self.f.getreader(self.f.readpage)
# print "p.maxpos: %s" % p.maxpos
self.assertEqual(p.readline(),
'import unittest')
self.assertRaises(IOError, p.peekline, 32) # we shouldn't be able to read ahead to page 2
self.assertRaises(IOError, p.cue, 'LegacyBase64TestCase') # not by this method either
self.f.seek(0)
def testPage2(self):
self.f.readpage()
p = self.f.getreader(self.f.readpage)
p.readline()
self.assertEqual(p.readline(),
'class LegacyBase64TestCase(unittest.TestCase):')
self.assertRaises(IOError,p.prevline, 4) # we shouldn't be able to read backwards to page 1
self.f.seek(0)
class Edgecases(unittest.TestCase):
def setUp(self):
self.f = TextReader(PREFIX + "/LICENSE.txt",linesep=TextReader.UNIX)
def testPeekPastEOF(self):
self.assertRaises(IOError,
self.f.peekline, 4711)
def testPrevPastBOF(self):
self.assertRaises(IOError,
self.f.prevline, 4711)
def testReadPastEOF(self):
self.assertEqual(len(self.f.read(1)), 1)
self.f.read(sys.maxsize) # read past end of file - no license text is THAT big
self.assertNotEqual(self.f.currpos, sys.maxsize+1)
self.assertEqual(len(self.f.read(1)), 0) # no more to read
self.assertEqual(len(self.f.readline()), 0)
self.f.seek(0)
def testReadlineUntilEOF(self):
for line in self.f:
prev = line
pass
self.assertEqual(prev,
'OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.')
self.assertEqual(self.f.readline(), '')
def testSearchInVain(self):
self.assertRaises(IOError,
self.f.cue, 'I am a little teapot')
self.f.seek(0)
class Fileops(unittest.TestCase):
def testClose(self):
f = open("textreader.tmp","w")
f.write("foo")
f.close()
r = TextReader("textreader.tmp")
# make sure TextReader isn't keeping the file open
os.rename("textreader.tmp", "textreader2.tmp")
os.unlink("textreader2.tmp")
self.assertEqual("foo",r.readline())
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from pyparsing import Word,nums
from ferenda.citationparser import CitationParser
from ferenda.uriformatter import URIFormatter
from ferenda.elements import Body, Heading, Paragraph, Footnote, LinkSubject, serialize
import ferenda.uriformats
import ferenda.citationpatterns
class Main(unittest.TestCase):
def test_parse_recursive(self):
doc_citation = ("Doc" + Word(nums).setResultsName("ordinal")
+ "/" +
Word(nums,exact=4).setResultsName("year")).setResultsName("DocRef")
def doc_uri_formatter(parts):
return "http://example.org/docs/%(year)s/%(ordinal)s/" % parts
doc = Body([Heading(["About Doc 43/2012 and it's interpretation"]),
Paragraph(["According to Doc 43/2012",
Footnote(["Available at http://example.org/xyz"]),
" the bizbaz should be frobnicated"])
])
result = Body([Heading(["About ",
LinkSubject("Doc 43/2012", predicate="dct:references",
uri="http://example.org/docs/2012/43/"),
" and it's interpretation"]),
Paragraph(["According to ",
LinkSubject("Doc 43/2012", predicate="dct:references",
uri="http://example.org/docs/2012/43/"),
Footnote(["Available at ",
LinkSubject("http://example.org/xyz",
predicate="dct:references",
uri="http://example.org/xyz")
]),
" the bizbaz should be frobnicated"])
])
cp = CitationParser(ferenda.citationpatterns.url, doc_citation)
cp.set_formatter(URIFormatter(("url", ferenda.uriformats.url),
("DocRef", doc_uri_formatter)))
doc = cp.parse_recursive(doc)
self.maxDiff = 4096
self.assertEqual(serialize(doc),serialize(result))
import doctest
from ferenda import citationparser
def load_tests(loader,tests,ignore):
tests.addTests(doctest.DocTestSuite(citationparser))
return tests
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import tempfile
import shutil
import logging
import hashlib
import pkg_resources
# NOTE: by inserting cwd (which *should* be the top-level source code
# dir, with 'ferenda' and 'test' as subdirs) into sys.path as early as
# possible, we make it possible for pkg_resources to find resources in
# the 'ferenda' package. We also have to call a resource method
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
pkg_resources.resource_listdir('ferenda','res')
#print("OK MANAGER IS LOADED: %s" % sys.path)
#print("LISTDIR IS %s" % pkg_resources.resource_listdir('ferenda','res'))
from ferenda.manager import setup_logger; setup_logger('CRITICAL')
if sys.version_info < (2, 7, 0):
import unittest2 as unittest
else:
import unittest
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six.moves import configparser, reload_module
try:
# assume we're on py3.3 and fall back if not
from unittest.mock import Mock, MagicMock, patch, call
except ImportError:
from mock import Mock, MagicMock, patch, call
from lxml import etree as ET
from ferenda import manager, decorators, util
from ferenda import DocumentRepository, LayeredConfig, DocumentStore
class staticmockclass(DocumentRepository):
"""Example class for testing"""
alias = "staticmock"
resourcebase = None
@decorators.action
def mymethod(self, arg):
"""Frobnicate the bizbaz"""
if arg == "myarg":
return "ok!"
def download(self):
return "%s download ok" % self.alias
def parse(self, basefile):
return "%s parse %s" % (self.alias, basefile)
def relate(self, basefile):
return "%s relate %s" % (self.alias, basefile)
def generate(self, basefile):
return "%s generate %s" % (self.alias, basefile)
def toc(self):
return "%s toc ok" % (self.alias)
def news(self):
return "%s news ok" % (self.alias)
def internalmethod(self, arg):
pass
@classmethod
def setup(cls, action, config): pass
@classmethod
def teardown(cls, action, config): pass
def get_default_options(self):
return {'datadir': 'data',
'loglevel': 'DEBUG',
'cssfiles': [self.resourcebase + '/test.css'],
'jsfiles': [self.resourcebase + '/test.js']}
def list_basefiles_for(cls,action):
return ["arg1","myarg","arg2"]
class staticmockclass2(staticmockclass):
"""Another class for testing"""
alias="staticmock2"
def mymethod(self, arg):
"""Frobnicate the bizbaz (alternate implementation)"""
if arg == "myarg":
return "yeah!"
class API(unittest.TestCase):
"""Test cases for API level methods of the manager modules (functions
like enable and makeresources, including unit tests of internal
helpers.
"""
def setUp(self):
self.maxDiff = None
self.tempdir = tempfile.mkdtemp()
staticmockclass.resourcebase = self.tempdir
# FIXME: this creates (and tearDown deletes) a file in
# cwd. Should be placed in self.tempdir, but tests need to be
# adjusted to find it there.
# NB: The section keys are different from the specified
# classes alias properties. This is intended.
util.writefile("ferenda.ini", """[__root__]
datadir = %s
loglevel = CRITICAL
[test]
class=testManager.staticmockclass
[test2]
class=testManager.staticmockclass2
"""%self.tempdir)
util.writefile(self.tempdir+"/test.js", "// test.js code goes here")
util.writefile(self.tempdir+"/test.css", "/* test.css code goes here */")
def tearDown(self):
if os.path.exists("ferenda.ini"):
os.remove("ferenda.ini")
shutil.rmtree(self.tempdir)
def test_filter_argv(self):
self.assertEqual(manager._filter_argv(["ecj", "parse", "62008J0034", "62008J0035"]),
("ecj", "parse", ["62008J0034", "62008J0035"]))
self.assertEqual(manager._filter_argv(["ecj", "parse", "62008J0034", "--force=True", "--frobnicate"]),
("ecj", "parse", ["62008J0034"]))
self.assertEqual(manager._filter_argv(["ecj", "--frobnicate"]),
("ecj", None, []))
def test_enable_class(self):
# 1. test that a normal enabling goes well
manager.enable("testManager.staticmockclass")
# os.system("cat ferenda.ini")
cfg = configparser.ConfigParser()
cfg.read(["ferenda.ini"])
self.assertEqual(cfg.get("staticmock","class"), "testManager.staticmockclass")
# 2. test that an attempt to enable a nonexistent class fails"
with self.assertRaises(ImportError):
manager.enable("testManager.Nonexistent")
# 3. test that an attempt to enable an alias fails
with self.assertRaises(ValueError):
manager.enable("staticmock")
def test_run_class(self):
enabled_classes = {'test':'testManager.staticmockclass'}
argv = ["test", "mymethod","myarg"]
self.assertEqual(manager._run_class(enabled_classes,argv),"ok!")
def test_list_enabled_classes(self):
self.assertEqual(manager._list_enabled_classes(),
OrderedDict((("test","Example class for testing"),
("test2","Another class for testing"))))
def test_list_class_usage(self):
self.assertEqual(manager._list_class_usage(staticmockclass),
{'mymethod':'Frobnicate the bizbaz'})
def test_makeresources(self):
# Test1: No combining, resources specified by docrepos
test = staticmockclass()
# print("test.get_default_options %r" % test.get_default_options())
test2 = staticmockclass2()
s = os.sep
want = {'css':[s.join(['rsrc', 'css','test.css'])],
'js':[s.join(['rsrc', 'js','test.js'])],
'xml':[s.join(['rsrc', 'resources.xml'])]
}
got = manager.makeresources([test,test2],self.tempdir+os.sep+'rsrc')
self.assertEqual(want, got)
tree = ET.parse(self.tempdir+os.sep+got['xml'][0])
stylesheets=tree.find("stylesheets").getchildren()
self.assertEqual(len(stylesheets),1)
self.assertEqual(stylesheets[0].attrib['href'],'rsrc/css/test.css')
javascripts=tree.find("javascripts").getchildren()
self.assertEqual(len(javascripts),1)
self.assertEqual(javascripts[0].attrib['src'],'rsrc/js/test.js')
self.assertEqual(tree.find("sitename").text,"MySite")
self.assertEqual(tree.find("sitedescription").text,"Just another Ferenda site")
self.assertTrue(os.path.exists(self.tempdir+'/rsrc/css/test.css'))
self.assertTrue(os.path.exists(self.tempdir+'/rsrc/js/test.js'))
# Test2: combining, resources specified by global config
# (maybe we should use smaller CSS+JS files? Test takes 2+ seconds...)
want = {'css':[s.join(['rsrc', 'css','combined.css'])],
'js':[s.join(['rsrc', 'js','combined.js'])],
'xml':[s.join(['rsrc', 'resources.xml'])]
}
got = manager.makeresources([test,test2],self.tempdir+os.sep+'rsrc',
combine=True,
cssfiles=['res/css/normalize.css',
'res/css/main.css'],
jsfiles=['res/js/jquery-1.9.0.js',
'res/js/modernizr-2.6.2-respond-1.1.0.min.js'],
sitename="Blahonga",
sitedescription="A non-default value")
self.assertEqual(want,got)
tree = ET.parse(self.tempdir+'/'+got['xml'][0])
stylesheets=tree.find("stylesheets").getchildren()
self.assertEqual(len(stylesheets),1)
self.assertEqual(stylesheets[0].attrib['href'],'rsrc/css/combined.css')
javascripts=tree.find("javascripts").getchildren()
self.assertEqual(len(javascripts),1)
self.assertEqual(javascripts[0].attrib['src'],'rsrc/js/combined.js')
self.assertEqual(tree.find("sitename").text,"Blahonga")
self.assertEqual(tree.find("sitedescription").text,"A non-default value")
self.assertTrue(os.path.exists(self.tempdir+'/rsrc/css/combined.css'))
self.assertTrue(os.path.exists(self.tempdir+'/rsrc/js/combined.js'))
# check that the combining/minifying indeed saved us some space
# physical path for these: relative to the location of ferenda/manager.py.
self.assertLess(os.path.getsize(self.tempdir+'/rsrc/css/combined.css'),
sum([os.path.getsize(x) for x in ("ferenda/res/css/normalize.css",
"ferenda/res/css/main.css")]))
self.assertLess(os.path.getsize(self.tempdir+'/rsrc/js/combined.js'),
sum([os.path.getsize(x) for x in ("ferenda/res/js/jquery-1.9.0.js",
"ferenda/res/js/modernizr-2.6.2-respond-1.1.0.min.js")]))
# Test3: No combining, make sure that a non-customized
# DocumentRepository works
repo = DocumentRepository()
got = manager.makeresources([repo],self.tempdir+os.sep+'rsrc')
s = os.sep
want = {'css':[s.join(['rsrc', 'css','normalize.css']),
s.join(['rsrc', 'css','main.css']),
s.join(['rsrc', 'css','ferenda.css'])],
'js':[s.join(['rsrc', 'js','jquery-1.9.0.js']),
s.join(['rsrc', 'js','modernizr-2.6.2-respond-1.1.0.min.js']),
s.join(['rsrc', 'js','ferenda.js'])],
'xml':[s.join(['rsrc', 'resources.xml'])]
}
self.assertEqual(want,got)
def test_frontpage(self):
test = staticmockclass()
test2 = staticmockclass2()
outfile = self.tempdir+'/index.html'
res = manager.frontpage([test,test2],
path=outfile)
self.assertTrue(res)
tree = ET.parse(outfile)
# FIXME: check that tree contains 2 divs, that they have id
# staticmock and staticmock2, that the p text is "Handles
# foaf:Document documents. Contains 3 published documents."
divs = tree.findall(".//div[@class='section-wrapper']")
self.assertEqual(2, len(list(divs)))
self.assertEqual("staticmock", divs[0].get("id"))
self.assertEqual("staticmock2", divs[1].get("id"))
self.assertIn("Handles foaf:Document", divs[0].find("p").text)
self.assertIn("Contains 3 published documents", divs[0].find("p").text)
class Run(unittest.TestCase):
"""Tests manager interface using only the run() entry point used by ferenda-build.py"""
def setUp(self):
self.addTypeEqualityFunc(OrderedDict, self.assertDictEqual)
self.tempdir = tempfile.mkdtemp()
# self.modulename = hashlib.md5(self.tempdir.encode('ascii')).hexdigest()
self.modulename = "example"
self.orig_cwd = os.getcwd()
# 1. create new blank ini file (FIXME: can't we make sure that
# _find_config_file is called with create=True when using
# run() ?)
os.chdir(self.tempdir)
util.writefile("ferenda.ini", """[__root__]
loglevel=WARNING
datadir = %s
""" % self.tempdir)
# 2. dump 2 example docrepo classes to example.py
# FIXME: should we add self.tempdir to sys.path also (and remove it in teardown)?
util.writefile(self.modulename+".py", """# Test code
from ferenda import DocumentRepository, DocumentStore, decorators
class Testrepo(DocumentRepository):
alias = "test"
def get_default_options(self):
return {'datadir': 'data',
'cssfiles': ['test.css'],
'jsfiles': ['test.js'],
'magic': 'less'}
# for inspecting the attributes of a docrepo instance
@decorators.action
def inspect(self, attr, subattr=None):
a = getattr(self,attr)
if subattr:
return getattr(a, subattr)
else:
return a
# general testing of arguments and return values (or lack thereof)
@decorators.action
def mymethod(self, arg):
if arg == "myarg":
return "ok!"
def download(self):
return "%s download ok (magic=%s)" % (self.alias, self.config.magic)
def parse(self, basefile):
return "%s parse %s" % (self.alias, basefile)
def relate(self, basefile):
return "%s relate %s" % (self.alias, basefile)
def generate(self, basefile):
return "%s generate %s" % (self.alias, basefile)
def toc(self):
return "%s toc ok" % (self.alias)
def news(self):
return "%s news ok" % (self.alias)
def list_basefiles_for(cls,action):
return ["arg1","myarg","arg2"]
@classmethod
def setup(cls, action, config): pass
@classmethod
def teardown(cls, action, config): pass
class CustomStore(DocumentStore):
def custommethod(self):
return "CustomStore OK"
class Testrepo2(Testrepo):
alias = "test2"
storage_policy = "dir"
downloaded_suffix = ".txt"
documentstore_class = CustomStore
@decorators.action
def mymethod(self, arg):
if arg == "myarg":
return "yeah!"
@decorators.action
def callstore(self):
return self.store.custommethod()
""")
util.writefile(self.tempdir+"/test.js", "// test.js code goes here")
util.writefile(self.tempdir+"/test.css", "/* test.css code goes here */")
sys.path.append(self.tempdir)
def tearDown(self):
os.chdir(self.orig_cwd)
shutil.rmtree(self.tempdir)
sys.path.remove(self.tempdir)
# functionality used by most test methods
def _enable_repos(self):
# 3. run('example.Testrepo', 'enable')
with patch.object(logging.Logger, 'info') as mocklog:
self.assertEqual("test",
manager.run([self.modulename+".Testrepo", "enable"]))
# 4. verify that "alias foo enabled" is logged
log_arg = mocklog.call_args[0][0]
self.assertEqual("Enabled class %s.Testrepo (alias 'test')" % self.modulename,
log_arg)
# 5. verify that ferenda.ini has changed
cfg = configparser.ConfigParser()
cfg.read(["ferenda.ini"])
self.assertEqual(cfg.get("test","class"), self.modulename+".Testrepo")
# (same, with 'example.Testrepo2')
self.assertEqual("test2",
manager.run([self.modulename+".Testrepo2", "enable"]))
cfg = configparser.ConfigParser()
cfg.read(["ferenda.ini"])
self.assertEqual(cfg.get("test2","class"), self.modulename+".Testrepo2")
with patch.object(logging.Logger, 'error') as mocklog:
# 6. run('example.Nonexistent', 'enable') -- the ImportError must
# be caught and an error printed.
manager.run([self.modulename+".Nonexistent", "enable"])
# 7. verify that a suitable error messsage is logged
self.assertEqual("No class named '%s.Nonexistent'" % self.modulename,
mocklog.call_args[0][0])
def test_run_enable(self):
self._enable_repos()
def test_run_single(self):
self._enable_repos()
argv = ["test","mymethod","myarg"]
self.assertEqual(manager.run(argv),
"ok!")
def test_run_all(self):
self._enable_repos()
argv = ["all","mymethod","myarg"]
self.assertEqual(manager.run(argv),
["ok!", "yeah!"])
def test_run_all_all(self):
self._enable_repos()
argv = ["all", "mymethod", "--all"]
self.assertEqual(manager.run(argv),
[[None,"ok!",None],
[None,"yeah!",None]])
def test_run_all_allmethods(self):
self._enable_repos()
argv = ["all", "all", "--magic=more"]
s = os.sep
want = OrderedDict(
[('download', OrderedDict([('test','test download ok (magic=more)'),
('test2', 'test2 download ok (magic=more)')])),
('parse', OrderedDict([('test', ['test parse arg1',
'test parse myarg',
'test parse arg2']),
('test2', ['test2 parse arg1',
'test2 parse myarg',
'test2 parse arg2'])])),
('relate', OrderedDict([('test', ['test relate arg1',
'test relate myarg',
'test relate arg2']),
('test2', ['test2 relate arg1',
'test2 relate myarg',
'test2 relate arg2'])])),
('makeresources', {'css':[s.join(['rsrc', 'css','test.css'])],
'js':[s.join(['rsrc', 'js','test.js'])],
'xml':[s.join(['rsrc', 'resources.xml'])]}),
('generate', OrderedDict([('test', ['test generate arg1',
'test generate myarg',
'test generate arg2']),
('test2', ['test2 generate arg1',
'test2 generate myarg',
'test2 generate arg2'])])),
('toc', OrderedDict([('test','test toc ok'),
('test2', 'test2 toc ok')])),
('news', OrderedDict([('test','test news ok'),
('test2', 'test2 news ok')])),
('frontpage', True)])
got = manager.run(argv)
self.maxDiff = None
self.assertEqual(want,got)
def test_run_makeresources(self):
# 1. setup test_run_enable
# 2. run('all', 'makeresources')
# 3. verify that all css/jss files specified by default and in Testrepo gets copied
# (remove rsrc)
# 4. run('all', 'makeresources', '--combine')
# 5. verify that single css and js file is created
self._enable_repos()
s = os.sep
want = {'css':[s.join(['rsrc', 'css','test.css'])],
'js':[s.join(['rsrc', 'js','test.js'])],
'xml':[s.join(['rsrc', 'resources.xml'])]
}
got = manager.run(['all', 'makeresources'])
self.assertEqual(want,got)
def test_delayed_config(self):
# Make sure configuration values gets stored properly in the instance created by run()
self._enable_repos()
self.assertEqual(".html", manager.run(['test','inspect','downloaded_suffix']))
self.assertEqual("file", manager.run(['test','inspect','storage_policy']))
self.assertEqual(self.tempdir, manager.run(['test','inspect','config', 'datadir']))
self.assertEqual(".html", manager.run(['test','inspect','store', 'downloaded_suffix']))
self.assertEqual("file", manager.run(['test','inspect','store', 'storage_policy']))
self.assertEqual(self.tempdir+os.sep+"test", manager.run(['test','inspect','store', 'datadir']))
self.assertEqual(".txt", manager.run(['test2','inspect','downloaded_suffix']))
self.assertEqual("dir", manager.run(['test2','inspect','storage_policy']))
self.assertEqual(self.tempdir, manager.run(['test2','inspect','config', 'datadir']))
self.assertEqual(".txt", manager.run(['test2','inspect','downloaded_suffix']))
self.assertEqual("dir", manager.run(['test2','inspect','storage_policy']))
self.assertEqual(self.tempdir+os.sep+"test2", manager.run(['test2','inspect','store', 'datadir']))
def test_custom_docstore(self):
self._enable_repos()
got = manager.run(['test2', 'callstore'])
self.assertEqual("CustomStore OK", got)
import doctest
from ferenda import manager
def load_tests(loader,tests,ignore):
tests.addTests(doctest.DocTestSuite(manager))
return tests
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from ferenda.legaluri import construct,parse
from ferenda.testutil import file_parametrize
class Construct(unittest.TestCase):
def parametric_test(self,filename):
with open(filename) as fp:
testdata = fp.read()
with open(filename.replace(".py",".txt")) as fp:
testanswer = fp.read().strip()
# All test case writers are honorable, noble and thorough
# persons, but just in case, let's make eval somewhat safer.
# FIXME: use ast.literal_eval instead
testdata = testdata.strip().replace("\r\n", " ")
d = eval(testdata,{"__builtins__":None},globals())
uri = construct(d)
self.assertEqual(uri,testanswer)
class Parse(unittest.TestCase):
def parametric_test(self,filename):
with open(filename) as fp:
uri = fp.read().strip()
with open(filename.replace(".txt",".py")) as fp:
parts_repr = " ".join(fp.read().split())
parts = eval(parts_repr,{"__builtins__":None},globals())
self.assertEqual(parse(uri),parts)
file_parametrize(Construct,"test/files/legaluri",".py")
file_parametrize(Parse,"test/files/legaluri",".txt")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
try:
# assume we're on py3.3 and fall back if not
from unittest.mock import Mock, MagicMock, patch
except ImportError:
from mock import Mock, MagicMock, patch
from ferenda import DocumentRepository, Document
from ferenda.errors import DocumentRemovedError, ParseError
# SUT
from ferenda.decorators import timed, parseifneeded, render, handleerror, makedocument
class Decorators(unittest.TestCase):
def test_timed(self):
# Test that a wrapped method...
@timed
def testfunc(repo,doc):
pass
# ...passed a particular docrepo and doc
mockrepo = Mock()
mockdoc = Mock()
mockdoc.basefile = "1234"
# ...has it's instances logger called...
testfunc(mockrepo,mockdoc)
call_args = mockrepo.log.info.call_args
# ...with the correct method and arguments
self.assertEqual(len(call_args[0]), 3)
self.assertEqual(call_args[0][0], '%s: OK (%.3f sec)')
self.assertEqual(call_args[0][1], "1234")
def test_parseifneeded(self):
@parseifneeded
def testfunc(repo,doc):
repo.called = True
mockdoc = Mock()
mockrepo = Mock()
mockrepo.called = False
mockrepo.config.force = False
# test 1: Outfile is newer - the parseifneeded decorator
# should make sure the actual testfunc code is never reached
with patch('ferenda.util.outfile_is_newer', return_value=True):
testfunc(mockrepo,mockdoc)
self.assertFalse(mockrepo.called)
mockrepo.called = False
# test 2: Outfile is older
with patch('ferenda.util.outfile_is_newer', return_value=False):
testfunc(mockrepo,mockdoc)
self.assertTrue(mockrepo.called)
mockrepo.called = False
# test 3: Outfile is newer, but the global force option was set
mockrepo.config.force = True
with patch('ferenda.util.outfile_is_newer', return_value=True):
testfunc(mockrepo,mockdoc)
self.assertTrue(mockrepo.called)
mockrepo.config.force = None
mockrepo.called = False
# test 4: Outfile is newer, but the module parseforce option was set
mockrepo.config.parseforce = True
with patch('ferenda.util.outfile_is_newer', return_value=True):
testfunc(mockrepo,mockdoc)
self.assertTrue(mockrepo.called)
mockrepo.called = False
@patch('ferenda.documentrepository.Graph')
def test_render(self,mock_graph):
@render
def testfunc(repo,doc):
pass
mockdoc = Mock()
mockrepo = Mock()
mockrepo.store.parsed_path.return_value = "parsed_path.xhtml"
with open("parsed_path.xhtml", "w") as fp:
fp.write("""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:dct="http://purl.org/dc/terms/">
<head about="http://example.org/doc">
<title property="dct:title">Document title</title>
</head>
<body>
<h1>Hello!</h1>
</body>
</html>""")
mockrepo.store.distilled_path.return_value = "distilled_path.xhtml"
mockrepo.get_globals.return_value = {'symbol table':'fake'}
mockdoc.meta = MagicMock()
mockdoc.body = []
mockdoc.meta.__iter__.return_value = []
with patch('ferenda.util.ensure_dir', return_value=True):
testfunc(mockrepo, mockdoc)
# 1 ensure that DocumentRepository.render_xhtml is called with
# four arguments
mockrepo.render_xhtml.assert_called_with(mockdoc, "parsed_path.xhtml")
# 2 ensure that DocumentRepository.create_external_resources
# is called with 1 argument
mockrepo.create_external_resources.assert_called_with(mockdoc)
# 3 ensure that a Graph object is created, its parse and
# serialize methods called
# FIXME: Why doesn't the patching work?!
# self.assertTrue(mock_graph().parse.called)
# self.assertTrue(mock_graph().serialize.called)
# (4. ensure that a warning gets printed if doc.meta and
# distilled_graph do not agree)
mock_graph().__iter__.return_value = ['a','b']
mockdoc.meta.__iter__.return_value = ['a','b','c']
mockdoc.meta.serialize.return_value = b"<c>"
with patch('ferenda.util.ensure_dir', return_value=True):
testfunc(mockrepo, mockdoc)
self.assertTrue(mockrepo.log.warning.called)
os.remove("parsed_path.xhtml")
os.remove("distilled_path.xhtml")
def test_handleerror(self):
@handleerror
def testfunc(repo,doc):
if doc.exception:
raise doc.exception
else:
return True
mockrepo = Mock()
mockdoc = Mock()
# 1. should not raise an exception (but should call log.info
# and util.robust_remove, and return false)
with patch('ferenda.util.robust_remove') as robust_remove:
mockdoc.exception = DocumentRemovedError
self.assertFalse(testfunc(mockrepo, mockdoc))
self.assertTrue(mockrepo.log.info.called)
self.assertTrue(robust_remove.called)
# 2. should raise the same exception
mockdoc.exception = KeyboardInterrupt
with self.assertRaises(KeyboardInterrupt):
testfunc(mockrepo, mockdoc)
# 3.1 Should raise the same exeption
mockdoc.exception = ParseError
mockrepo.config.fatalexceptions = True
with self.assertRaises(ParseError):
testfunc(mockrepo, mockdoc)
mockrepo.config.fatalexceptions = None
# 3.2 Should not raise an exception (but should call log.error and return false)
mockdoc.exception = ParseError
self.assertFalse(testfunc(mockrepo, mockdoc))
self.assertTrue(mockrepo.log.error.called)
# 4.1 Should raise the same exception
mockdoc.exception = Exception
mockrepo.config.fatalexceptions = True
with self.assertRaises(Exception):
testfunc(mockrepo, mockdoc)
mockrepo.config.fatalexceptions = None
# 4.2 Should not raise an exception
mockdoc.exception = Exception
self.assertFalse(testfunc(mockrepo, mockdoc))
self.assertTrue(mockrepo.log.error.called)
# 5. No exceptions - everything should go fine
mockdoc.exception = None
self.assertTrue(testfunc(mockrepo, mockdoc))
def test_makedocument(self):
@makedocument
def testfunc(repo,doc):
return doc
doc = testfunc(DocumentRepository(),"base/file")
self.assertIsInstance(doc,Document)
self.assertEqual(doc.basefile, "base/file")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import datetime
try:
# assume we're on py3.3 and fall back if not
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
from ferenda.sources.general import Keyword, Skeleton, Wiki
from ferenda.sources.tech import RFC, W3Standards, PEP
from ferenda.sources.legal.eu import EurlexCaselaw, EurlexTreaties
from ferenda.sources.legal.se import ARN, Direktiv, Ds, DV, JK, JO, Kommitte, MyndFskr, Propositioner, Regeringen, Riksdagen, SFS, SKVFS, SOU, SwedishLegalSource
class TestSwedishLegalSource(unittest.TestCase):
def test_parse_swedish_date(self):
repo = SwedishLegalSource()
self.assertEqual(repo.parse_swedish_date("3 februari 2010"), datetime.date(2010,2,3))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
# if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from ferenda.manager import setup_logger; setup_logger('CRITICAL')
from ferenda.testutil import RepoTester, FerendaTestCase, testparser, file_parametrize, parametrize_repotester
from ferenda.sources.tech import RFC
# import ferenda.sources.tech.rfc
class Parse(unittest.TestCase, FerendaTestCase):
def parametric_test(self, filename):
# parser = ferenda.sources.tech.rfc.RFC.get_parser()
parser = RFC.get_parser()
testparser(self,parser,filename)
file_parametrize(Parse,"test/files/rfc",".txt")
class TestRFC(RepoTester):
repoclass = RFC
docroot = os.path.dirname(__file__)+"/files/repo/rfc"
parametrize_repotester(TestRFC)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
import tempfile
import shutil
import time
#SUT
from ferenda import DocumentStore
from ferenda import util
from ferenda.errors import *
class Store(unittest.TestCase):
def setUp(self):
self.datadir = tempfile.mkdtemp()
self.store = DocumentStore(self.datadir)
def tearDown(self):
shutil.rmtree(self.datadir)
def p(self,path):
path = self.datadir+"/"+path
return path.replace('/', '\\') if os.sep == '\\' else path
def test_open(self):
wanted_filename = self.store.path("basefile", "maindir", ".suffix")
with self.store.open("basefile", "maindir", ".suffix", "w") as fp:
self.assertNotEqual(fp.name, wanted_filename)
self.assertEqual(fp.realname, wanted_filename)
fp.write("This is the data")
self.assertEqual(util.readfile(wanted_filename),
"This is the data")
mtime = os.stat(wanted_filename).st_mtime
# make sure writing identical content does not actually write
# a new file
time.sleep(.1) # just to get a different mtime
with self.store.open("basefile", "maindir", ".suffix", "w") as fp:
fp.write("This is the data")
self.assertEqual(os.stat(wanted_filename).st_mtime,
mtime)
def test_path(self):
self.assertEqual(self.store.path("123","foo", ".bar"),
self.p("foo/123.bar"))
self.assertEqual(self.store.path("123/a","foo", ".bar"),
self.p("foo/123/a.bar"))
self.assertEqual(self.store.path("123:a","foo", ".bar"),
self.p("foo/123/%3Aa.bar"))
def test_path_version(self):
eq = self.assertEqual
eq(self.store.path("123","foo", ".bar", version="42"),
self.p("archive/foo/123/42.bar"))
eq(self.store.path("123/a","foo", ".bar", version="42"),
self.p("archive/foo/123/a/42.bar"))
eq(self.store.path("123:a","foo", ".bar", version="42"),
self.p("archive/foo/123/%3Aa/42.bar"))
eq(self.store.path("123:a","foo", ".bar", version="42:1"),
self.p("archive/foo/123/%3Aa/42/%3A1.bar"))
self.store.storage_policy = "dir"
eq(self.store.path("123","foo", ".bar", version="42"),
self.p("archive/foo/123/42/index.bar"))
eq(self.store.path("123/a","foo", ".bar", version="42"),
self.p("archive/foo/123/a/42/index.bar"))
eq(self.store.path("123:a","foo", ".bar", version="42"),
self.p("archive/foo/123/%3Aa/42/index.bar"))
eq(self.store.path("123:a","foo", ".bar", version="42:1"),
self.p("archive/foo/123/%3Aa/42/%3A1/index.bar"))
def test_path_attachment(self):
eq = self.assertEqual
repo = self.store # to shorten lines < 80 chars
repo.storage_policy = "dir" # attachments require this
eq(repo.path("123","foo", None, attachment="external.foo"),
self.p("foo/123/external.foo"))
eq(repo.path("123/a","foo", None, attachment="external.foo"),
self.p("foo/123/a/external.foo"))
eq(repo.path("123:a","foo", None, attachment="external.foo"),
self.p("foo/123/%3Aa/external.foo"))
with self.assertRaises(AttachmentNameError):
repo.path("123:a","foo", None,
attachment="invalid:attachment")
with self.assertRaises(AttachmentNameError):
repo.path("123:a","foo", None,
attachment="invalid/attachment"),
repo.storage_policy = "file"
with self.assertRaises(AttachmentPolicyError):
repo.path("123:a","foo", None,
attachment="external.foo"),
def test_path_version_attachment(self):
eq = self.assertEqual
self.store.storage_policy = "dir"
eq(self.store.path("123","foo", None,
version="42", attachment="external.foo"),
self.p("archive/foo/123/42/external.foo"))
eq(self.store.path("123/a","foo", None,
version="42", attachment="external.foo"),
self.p("archive/foo/123/a/42/external.foo"))
eq(self.store.path("123:a","foo", None,
version="42", attachment="external.foo"),
self.p("archive/foo/123/%3Aa/42/external.foo"))
def test_specific_path_methods(self):
self.assertEqual(self.store.downloaded_path('123/a'),
self.p("downloaded/123/a.html"))
self.assertEqual(self.store.downloaded_path('123/a', version="1"),
self.p("archive/downloaded/123/a/1.html"))
self.assertEqual(self.store.parsed_path('123/a', version="1"),
self.p("archive/parsed/123/a/1.xhtml"))
self.assertEqual(self.store.generated_path('123/a', version="1"),
self.p("archive/generated/123/a/1.html"))
self.store.storage_policy = "dir"
self.assertEqual(self.store.downloaded_path('123/a'),
self.p("downloaded/123/a/index.html"))
self.assertEqual(self.store.downloaded_path('123/a', version="1"),
self.p("archive/downloaded/123/a/1/index.html"))
self.assertEqual(self.store.parsed_path('123/a', version="1"),
self.p("archive/parsed/123/a/1/index.xhtml"))
self.assertEqual(self.store.generated_path('123/a', version="1"),
self.p("archive/generated/123/a/1/index.html"))
def test_basefile_to_pathfrag(self):
self.assertEqual(self.store.basefile_to_pathfrag("123-a"), "123-a")
self.assertEqual(self.store.basefile_to_pathfrag("123/a"), "123/a")
self.assertEqual(self.store.basefile_to_pathfrag("123:a"), "123"+os.sep+"%3Aa")
def test_pathfrag_to_basefile(self):
self.assertEqual(self.store.pathfrag_to_basefile("123-a"), "123-a")
self.assertEqual(self.store.pathfrag_to_basefile("123/a"), "123/a")
self.assertEqual(self.store.pathfrag_to_basefile("123/%3Aa"), "123:a")
def test_list_basefiles_file(self):
files = ["downloaded/123/a.html",
"downloaded/123/b.html",
"downloaded/124/a.html",
"downloaded/124/b.html"]
basefiles = ["124/b", "124/a", "123/b", "123/a"]
for f in files:
util.writefile(self.p(f),"Nonempty")
self.assertEqual(list(self.store.list_basefiles_for("parse")),
basefiles)
def test_list_basefiles_dir(self):
files = ["downloaded/123/a/index.html",
"downloaded/123/b/index.html",
"downloaded/124/a/index.html",
"downloaded/124/b/index.html"]
basefiles = ["124/b", "124/a", "123/b", "123/a"]
self.store.storage_policy = "dir"
for f in files:
util.writefile(self.p(f),"nonempty")
self.assertEqual(list(self.store.list_basefiles_for("parse")),
basefiles)
def test_list_versions_file(self):
files = ["archive/downloaded/123/a/1.html",
"archive/downloaded/123/a/2.html",
"archive/downloaded/123/a/2bis.html",
"archive/downloaded/123/a/10.html"]
versions = ["1","2", "2bis", "10"]
for f in files:
util.writefile(self.p(f),"nonempty")
# list_versions(action, basefile)
self.assertEqual(list(self.store.list_versions("123/a","downloaded")),
versions)
def test_list_versions_dir(self):
files = ["archive/downloaded/123/a/1/index.html",
"archive/downloaded/123/a/2/index.html",
"archive/downloaded/123/a/2bis/index.html",
"archive/downloaded/123/a/10/index.html"]
basefiles = ['123/a']
versions = ["1","2", "2bis", "10"]
for f in files:
util.writefile(self.p(f),"nonempty")
self.store.storage_policy = "dir"
self.assertEqual(list(self.store.list_versions("123/a", "downloaded")),
versions)
def test_list_attachments(self):
files = ["downloaded/123/a/index.html",
"downloaded/123/a/attachment.html",
"downloaded/123/a/appendix.pdf",
"downloaded/123/a/other.txt"]
basefiles = ['123/a']
attachments = ['appendix.pdf', 'attachment.html', 'other.txt']
for f in files:
util.writefile(self.p(f),"nonempty")
# list_attachments(action, basefile, version=None)
self.assertEqual(list(self.store.list_attachments("123/a", "downloaded")),
attachments)
def test_list_attachments_version(self):
files = ["archive/downloaded/123/a/1/index.html",
"archive/downloaded/123/a/1/attachment.txt",
"archive/downloaded/123/a/2/index.html",
"archive/downloaded/123/a/2/attachment.txt",
"archive/downloaded/123/a/2/other.txt"]
basefiles = ['123/a']
versions = ['1','2']
attachments_1 = ['attachment.txt']
attachments_2 = ['attachment.txt', 'other.txt']
for f in files:
util.writefile(self.p(f),"nonempty")
self.assertEqual(list(self.store.list_attachments("123/a","downloaded",
"1")),
attachments_1)
self.assertEqual(list(self.store.list_attachments("123/a","downloaded",
"2")),
attachments_2)
import doctest
from ferenda import documentstore
def load_tests(loader,tests,ignore):
tests.addTests(doctest.DocTestSuite(documentstore))
return tests
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
if sys.version_info < (2,7,0):
import unittest2 as unittest
else:
import unittest
if os.getcwd() not in sys.path: sys.path.insert(0,os.getcwd())
from tempfile import mkdtemp
import shutil
from datetime import datetime
import whoosh.index
import whoosh.fields
from ferenda import FulltextIndex, DocumentRepository
from ferenda.fulltextindex import Identifier, Datetime, Text, Label, Keywords, Boolean, URI, Less, More, Between
basic_dataset = [
{'uri':'http://example.org/doc/1',
'repo':'base',
'basefile':'1',
'title':'First example',
'identifier':'Doc #1',
'text':'This is the main text of the document (independent sections excluded)'},
{'uri':'http://example.org/doc/1#s1',
'repo':'base',
'basefile':'1',
'title':'First sec',
'identifier':'Doc #1 (section 1)',
'text':'This is an independent section'},
{'uri':'http://example.org/doc/1#s2',
'repo':'base',
'basefile':'1',
'title':'Second sec',
'identifier':'Doc #1 (section 2)',
'text':'This is another independent section'},
{'uri':'http://example.org/doc/1#s1',
'repo':'base',
'basefile':'1',
'title':'First section',
'identifier':'Doc #1 (section 1)',
'text':'This is an (updated version of a) independent section'},
{'uri':'http://example.org/doc/2',
'repo':'base',
'basefile':'2',
'title':'Second document',
'identifier':'Doc #2',
'text':'This is the second document (not the first)'}
]
class BasicIndex(unittest.TestCase):
def setUp(self):
self.location = mkdtemp()
self.index = FulltextIndex(self.location)
def tearDown(self):
shutil.rmtree(self.location)
def test_create(self):
# assert that some files have been created at the specified location
self.assertNotEqual(os.listdir(self.location),[])
# assert that it's really a whoosh index
self.assertTrue(whoosh.index.exists_in(self.location))
# assert that we have no documents
self.assertEqual(self.index.doccount(),0)
# assert that the schema, using our types, looks OK
wanted = {'uri':Identifier(),
'repo':Label(),
'basefile':Label(),
'title':Text(boost=4),
'identifier':Label(boost=16),
'text':Text()}
got = self.index.schema()
self.assertEqual(wanted,got)
# assert that the schema with underlying whoosh types is, in
# fact, correct
got = self.index._index.schema
want = whoosh.fields.Schema(uri=whoosh.fields.ID(unique=True),
repo=whoosh.fields.ID(stored=True),
basefile=whoosh.fields.ID(stored=True),
title=whoosh.fields.TEXT(field_boost=4,stored=True),
identifier=whoosh.fields.ID(field_boost=16,stored=True),
text=whoosh.fields.TEXT(stored=True))
self.assertEqual(sorted(want.names()), sorted(got.names()))
for fld in got.names():
self.assertEqual((fld,want[fld]),(fld,got[fld]))
def test_insert(self):
self.index.update(**basic_dataset[0])
self.index.update(**basic_dataset[1])
self.index.commit()
self.assertEqual(self.index.doccount(),2)
self.index.update(**basic_dataset[2])
self.index.update(**basic_dataset[3]) # updated version of basic_dataset[1]
self.index.commit()
self.assertEqual(self.index.doccount(),3)
class BasicQuery(unittest.TestCase):
def setUp(self):
self.location = mkdtemp()
self.index = FulltextIndex(self.location)
def tearDown(self):
shutil.rmtree(self.location)
def load(self, data):
# print("loading...")
for doc in data:
#print("adding %s" % doc['uri'])
self.index.update(**doc)
# Note: commit needed here to make sure underlying
# writer.update_document actually deletes previous ver of
# a doc
self.index.commit()
#print("Now %s documents" % self.index.doccount())
def test_basic(self):
self.load(basic_dataset)
self.assertEqual(self.index.doccount(),4)
res = self.index.query("main")
self.assertEqual(len(res),1)
self.assertEqual(res[0]['identifier'], 'Doc #1')
res = self.index.query("document")
self.assertEqual(len(res),2)
# Doc #2 contains the term 'document' in title (which is a
# boosted field), not just in text.
self.assertEqual(res[0]['identifier'], 'Doc #2')
res = self.index.query("section*")
from pprint import pprint
self.assertEqual(len(res),3)
self.assertEqual(res[0]['identifier'], 'Doc #1 (section 1)')
# ----------------------------------------------------------------
# Non-working test classes - TBD!
class DocRepo1(DocumentRepository):
alias = "repo1"
def get_indexed_properties(self):
return {'issued':Datetime(),
'publisher':Label(),
'abstract': Text(boost=2),
'category':Keywords()}
class DocRepo2(DocumentRepository):
alias = "repo2"
def get_indexed_properties(self):
return {'secret':Boolean(),
'references': URI(),
'category': Keywords()}
custom_dataset = [
{'repo':'repo1',
'basefile':'1',
'uri':'http://example.org/repo1/1',
'title':'Title of first document in first repo',
'identifier':'R1 D1',
'issued':datetime(2013,2,14,14,6),
'publisher': 'Examples & son',
'category': ['green', 'standards'],
'text': 'Long text here'},
{'repo':'repo1',
'basefile':'2',
'uri':'http://example.org/repo1/2',
'title':'Title of second document in first repo',
'identifier':'R1 D2',
'issued':datetime(2013,3,4,14,16),
'publisher': 'Examples & son',
'category': ['suggestions'],
'text': 'Even longer text here'},
{'repo':'repo2',
'basefile':'1',
'uri':'http://example.org/repo2/1',
'title':'Title of first document in second repo',
'identifier':'R2 D1',
'secret': False,
'references':'http://example.org/repo2/2',
'category':['green', 'yellow']},
{'repo':'repo2',
'basefile':'2',
'uri':'http://example.org/repo2/2',
'title':'Title of second document in second repo',
'identifier':'R2 D2',
'secret': True,
'references': None,
'category':['yellow', 'red']}
]
#class CustomizedIndex(unittest.TestCase):
class CustomizedIndex(object):
def test_setup():
self.location = mkdtemp()
self.index = FulltextIndex(self.location, [DocRepo1(), DocRepo2()])
# introspecting the schema (particularly if it's derived
# directly from our definitions, not reverse-engineerded from
# a Whoosh index on-disk) is useful for eg creating dynamic
# search forms
self.assertEqual(index.schema(),{'uri':Identifier(),
'repo':Label(),
'basefile':Label(),
'title':Text(boost=4),
'identifier':Label(boost=16),
'text':Text(),
'issued':Datetime(),
'publisher':Label(),
'abstract': Text(boost=2),
'category': Keywords(),
'secret': Boolean(),
'references': URI(),
'category': Keywords()})
shutil.rmtree(self.location)
# class CustomQuery(unittest.TestCase):
class CustomQuery(object):
def setUp(self):
self.location = mkdtemp()
self.index = FulltextIndex(self.location, [DocRepo1(), DocRepo2()])
self.load(custom_dataset)
def tearDown(self):
shutil.rmtree(self.location)
def load(self, data):
for doc in data:
self.index.update(**doc)
def test_boolean(self):
res = self.index.query(secret=True)
self.assertEqual(len(res),1)
self.assertEqual(res[0]['identifier'], 'R2 D2')
res = self.index.query(secret=False)
self.assertEqual(len(res),1)
self.assertEqual(res[0]['identifier'], 'R2 D1')
def test_keywords(self):
res = self.index.query(category='green')
self.assertEqual(len(res),2)
identifiers = set([x['identifier'] for x in res])
self.assertEqual(identifiers, set(['R1 D1','R2 D1']))
def test_repo_limited_freetext(self):
res = self.index.query('first', repo='repo1')
self.assertEqual(len(res),2)
self.assertEqual(res[0]['identifier'], 'R1 D1') # contains the term 'first' twice
self.assertEqual(res[1]['identifier'], 'R1 D2') # -""- once
def test_repo_dateinterval(self):
res = self.index.query(issued=Less(datetime(2013,3,1)))
self.assertEqual(len(res),1)
self.assertEqual(res[0]['identifier'], 'R1 D1')
res = self.index.query(issued=More(datetime(2013,3,1)))
self.assertEqual(res[0]['identifier'], 'R1 D2')
res = self.index.query(issued=Between(datetime(2013,2,1),datetime(2013,4,1)))
self.assertEqual(len(res),2)
identifiers = set([x['identifier'] for x in res])
self.assertEqual(identifiers, set(['R1 D1','R1 D2']))
| Python |
{'artal': '2004', 'lopnummer': '43', 'publikation': 'ad', 'type': 7}
| Python |
{'dnr': '1075-02-40', 'myndighet': 'jk', 'type': 8}
| Python |
{'artal': '2004', 'publikation': 'nja', 'sidnummer': '43', 'type': 7}
| Python |
{'chapter': '23',
'item': '4',
'law': '1960:729',
'piece': '1',
'section': '4',
'type': 1}
| Python |
{'law': '1998:204', 'type': 1}
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a WSGI handler for Apache
Requires apache+mod_wsgi.
In httpd.conf put something like:
LoadModule wsgi_module modules/mod_wsgi.so
WSGIScriptAlias / /path/to/wsgihandler.py
"""
# change these parameters as required
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
sys.stdout=sys.stderr
import gluon.main
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a handler for lighttpd+fastcgi
This file has to be in the PYTHONPATH
Put something like this in the lighttpd.conf file:
server.port = 8000
server.bind = '127.0.0.1'
server.event-handler = 'freebsd-kqueue'
server.modules = ('mod_rewrite', 'mod_fastcgi')
server.error-handler-404 = '/test.fcgi'
server.document-root = '/somewhere/web2py'
server.errorlog = '/tmp/error.log'
fastcgi.server = ('.fcgi' =>
('localhost' =>
('min-procs' => 1,
'socket' => '/tmp/fcgi.sock'
)
)
)
"""
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
import gluon.contrib.gateways.fcgi as fcgi
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
scgihandler.py - handler for SCGI protocol
Modified by Michele Comitini <michele.comitini@glisco.it>
from fcgihandler.py to support SCGI
fcgihandler has the following copyright:
" This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"
This is a handler for lighttpd+scgi
This file has to be in the PYTHONPATH
Put something like this in the lighttpd.conf file:
server.document-root="/var/www/web2py/"
# for >= linux-2.6
server.event-handler = "linux-sysepoll"
url.rewrite-once = (
"^(/.+?/static/.+)$" => "/applications$1",
"(^|/.*)$" => "/handler_web2py.scgi$1",
)
scgi.server = ( "/handler_web2py.scgi" =>
("handler_web2py" =>
( "host" => "127.0.0.1",
"port" => "4000",
"check-local" => "disable", # don't forget to set "disable"!
)
)
)
"""
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
# uncomment one of the two imports below depending on the SCGIWSGI server installed
#import paste.util.scgiserver as scgi
from wsgitools.scgi.forkpool import SCGIServer
from wsgitools.filters import WSGIFilterMiddleware, GzipWSGIFilter
wsgiapp=WSGIFilterMiddleware(gluon.main.wsgibase, GzipWSGIFilter)
if LOGGING:
application = gluon.main.appfactory(wsgiapp=wsgiapp,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = wsgiapp
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
# uncomment one of the two rows below depending on the SCGIWSGI server installed
#scgi.serve_application(application, '', 4000).run()
SCGIServer(application, port=4000).enable_sighandler().run()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# when web2py is run as a windows service (web2py.exe -W)
# it does not load the command line options but it
# expects to find conifguration settings in a file called
#
# web2py/options.py
#
# this file is an example for options.py
import socket
import os
ip = '0.0.0.0'
port = 80
interfaces=[('0.0.0.0',80),('0.0.0.0',443,'ssl_private_key.pem','ssl_certificate.pem')]
password = '<recycle>' # ## <recycle> means use the previous password
pid_filename = 'httpserver.pid'
log_filename = 'httpserver.log'
profiler_filename = None
#ssl_certificate = 'ssl_certificate.pem' # ## path to certificate file
#ssl_private_key = 'ssl_private_key.pem' # ## path to private key file
#numthreads = 50 # ## deprecated; remove
minthreads = None
maxthreads = None
server_name = socket.gethostname()
request_queue_size = 5
timeout = 30
shutdown_timeout = 5
folder = os.getcwd()
extcron = None
nocron = None
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file is based, although a rewrite, on MIT-licensed code from the Bottle web framework.
"""
import os, sys, optparse, urllib
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
from gluon.fileutils import read_file, write_file
class Servers:
@staticmethod
def cgi(app, address=None, **options):
from wsgiref.handlers import CGIHandler
CGIHandler().run(app) # Just ignore host and port here
@staticmethod
def flup(app,address, **options):
import flup.server.fcgi
flup.server.fcgi.WSGIServer(app, bindAddress=address).run()
@staticmethod
def wsgiref(app,address,**options): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
options['handler_class'] = QuietHandler
srv = make_server(address[0],address[1],app,**options)
srv.serve_forever()
@staticmethod
def cherrypy(app,address, **options):
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket(app,address, **options):
from gluon.rocket import CherryPyWSGIServer
server = CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket_with_repoze_profiler(app,address, **options):
from gluon.rocket import CherryPyWSGIServer
from repoze.profile.profiler import AccumulatingProfileMiddleware
from gluon.settings import global_settings
global_settings.web2py_crontype = 'none'
wrapped = AccumulatingProfileMiddleware(
app,
log_filename='wsgi.prof',
discard_first_request=True,
flush_at_shutdown=True,
path = '/__profile__'
)
server = CherryPyWSGIServer(address, wrapped)
server.start()
@staticmethod
def paste(app,address,**options):
from paste import httpserver
from paste.translogger import TransLogger
httpserver.serve(app, host=address[0], port=address[1], **options)
@staticmethod
def fapws(app,address, **options):
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(address[0],str(address[1]))
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return app(environ, start_response)
evwsgi.wsgi_cb(('',app))
evwsgi.run()
@staticmethod
def gevent(app,address, **options):
from gevent import monkey; monkey.patch_all()
from gevent import pywsgi
from gevent.pool import Pool
pywsgi.WSGIServer(address, app, spawn = 'workers' in options and Pool(int(options.workers)) or 'default').serve_forever()
@staticmethod
def bjoern(app,address, **options):
import bjoern
bjoern.run(app, *address)
@staticmethod
def tornado(app,address, **options):
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(app)
server = tornado.httpserver.HTTPServer(container)
server.listen(address=address[0], port=address[1])
tornado.ioloop.IOLoop.instance().start()
@staticmethod
def twisted(app,address, **options):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
reactor.listenTCP(address[1], factory, interface=address[0])
reactor.run()
@staticmethod
def diesel(app,address, **options):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(app, port=address[1])
app.run()
@staticmethod
def gnuicorn(app,address, **options):
import gunicorn.arbiter
gunicorn.arbiter.Arbiter(address, 4, app).run()
@staticmethod
def eventlet(app,address, **options):
from eventlet import wsgi, listen
wsgi.server(listen(address), app)
@staticmethod
def mongrel2(app,address,**options):
import uuid
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from mongrel2 import handler
conn = handler.Connection(str(uuid.uuid4()),
"tcp://127.0.0.1:9997",
"tcp://127.0.0.1:9996")
mongrel2_handler(app,conn,debug=False)
def run(servername,ip,port,softcron=True,logging=False,profiler=None):
if logging:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=profiler)
else:
application = gluon.main.wsgibase
if softcron:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
getattr(Servers,servername)(application,(ip,int(port)))
def mongrel2_handler(application,conn,debug=False):
"""
Based on :
https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py
WSGI handler based on the Python wsgiref SimpleHandler.
A WSGI application should return a iterable op StringTypes.
Any encoding must be handled by the WSGI application itself.
"""
from wsgiref.handlers import SimpleHandler
try:
import cStringIO as StringIO
except:
import StringIO
# TODO - this wsgi handler executes the application and renders a page
# in memory completely before returning it as a response to the client.
# Thus, it does not "stream" the result back to the client. It should be
# possible though. The SimpleHandler accepts file-like stream objects. So,
# it should be just a matter of connecting 0MQ requests/response streams to
# the SimpleHandler requests and response streams. However, the Python API
# for Mongrel2 doesn't seem to support file-like stream objects for requests
# and responses. Unless I have missed something.
while True:
if debug: print "WAITING FOR REQUEST"
# receive a request
req = conn.recv()
if debug: print "REQUEST BODY: %r\n" % req.body
if req.is_disconnect():
if debug: print "DISCONNECT"
continue #effectively ignore the disconnect from the client
# Set a couple of environment attributes a.k.a. header attributes
# that are a must according to PEP 333
environ = req.headers
environ['SERVER_PROTOCOL'] = 'HTTP/1.1' # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1
environ['REQUEST_METHOD'] = environ['METHOD']
if ':' in environ['Host']:
environ['SERVER_NAME'] = environ['Host'].split(':')[0]
environ['SERVER_PORT'] = environ['Host'].split(':')[1]
else:
environ['SERVER_NAME'] = environ['Host']
environ['SERVER_PORT'] = ''
environ['SCRIPT_NAME'] = '' # empty for now
environ['PATH_INFO'] = urllib.unquote(environ['PATH'])
if '?' in environ['URI']:
environ['QUERY_STRING'] = environ['URI'].split('?')[1]
else:
environ['QUERY_STRING'] = ''
if environ.has_key('Content-Length'):
environ['CONTENT_LENGTH'] = environ['Content-Length'] # necessary for POST to work with Django
environ['wsgi.input'] = req.body
if debug: print "ENVIRON: %r\n" % environ
# SimpleHandler needs file-like stream objects for
# requests, errors and responses
reqIO = StringIO.StringIO(req.body)
errIO = StringIO.StringIO()
respIO = StringIO.StringIO()
# execute the application
handler = SimpleHandler(reqIO, respIO, errIO, environ, multithread = False, multiprocess = False)
handler.run(application)
# Get the response and filter out the response (=data) itself,
# the response headers,
# the response status code and the response status description
response = respIO.getvalue()
response = response.split("\r\n")
data = response[-1]
headers = dict([r.split(": ") for r in response[1:-2]])
code = response[0][9:12]
status = response[0][13:]
# strip BOM's from response data
# Especially the WSGI handler from Django seems to generate them (2 actually, huh?)
# a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari
# See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/
# Although I still find this a ugly hack, it does work.
data = data.replace('\xef\xbb\xbf', '')
# Get the generated errors
errors = errIO.getvalue()
# return the response
if debug: print "RESPONSE: %r\n" % response
if errors:
if debug: print "ERRORS: %r" % errors
data = "%s\r\n\r\n%s" % (data, errors)
conn.reply_http(req, data, code = code, status = status, headers = headers)
def main():
usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P"
try:
version = read_file('VERSION')
except IOError:
version = ''
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-l',
'--logging',
action='store_true',
default=False,
dest='logging',
help='log into httpserver.log')
parser.add_option('-P',
'--profiler',
default=False,
dest='profiler',
help='profiler filename')
servers = ', '.join(x for x in dir(Servers) if not x[0]=='_')
parser.add_option('-s',
'--server',
default='rocket',
dest='server',
help='server name (%s)' % servers)
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
help='port number')
parser.add_option('-w',
'--workers',
default='',
dest='workers',
help='number of workers number')
(options, args) = parser.parse_args()
print 'starting %s on %s:%s...' % (options.server,options.ip,options.port)
run(options.server,options.ip,options.port,logging=options.logging,profiler=options.profiler)
if __name__=='__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
Install py2exe: http://sourceforge.net/projects/py2exe/files/
Copy script to the web2py directory
c:\bin\python26\python build_windows_exe.py py2exe
Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py
"""
from distutils.core import setup
import py2exe
from gluon.import_all import base_modules, contributed_modules
from gluon.fileutils import readlines_file
from glob import glob
import fnmatch
import os
import shutil
import sys
import re
import zipfile
#read web2py version from VERSION file
web2py_version_line = readlines_file('VERSION')[0]
#use regular expression to get just the version number
v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+')
web2py_version = v_re.search(web2py_version_line).group(0)
#pull in preferences from config file
import ConfigParser
Config = ConfigParser.ConfigParser()
Config.read('setup_exe.conf')
remove_msft_dlls = Config.getboolean("Setup", "remove_microsoft_dlls")
copy_apps = Config.getboolean("Setup", "copy_apps")
copy_site_packages = Config.getboolean("Setup", "copy_site_packages")
copy_scripts = Config.getboolean("Setup", "copy_scripts")
make_zip = Config.getboolean("Setup", "make_zip")
zip_filename = Config.get("Setup", "zip_filename")
remove_build_files = Config.getboolean("Setup", "remove_build_files")
# Python base version
python_version = sys.version[:3]
# List of modules deprecated in python2.6 that are in the above set
py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter']
if python_version == '2.6':
base_modules += ['json', 'multiprocessing']
base_modules = list(set(base_modules).difference(set(py26_deprecated)))
#I don't know if this is even necessary
if python_version == '2.6':
# Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52
try:
shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/')
except:
print "You MUST copy Microsoft.VC90.CRT folder into the dist directory"
setup(
console=['web2py.py'],
windows=[{'script':'web2py.py',
'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe
}],
name="web2py",
version=web2py_version,
description="web2py web framework",
author="Massimo DiPierro",
license = "LGPL v3",
data_files=[
'ABOUT',
'LICENSE',
'VERSION',
'splashlogo.gif',
'logging.example.conf',
'options_std.py',
'app.example.yaml',
'queue.example.yaml'
],
options={'py2exe': {
'packages': contributed_modules,
'includes': base_modules,
}},
)
print "web2py binary successfully built"
def copy_folders(source, destination):
"""Copy files & folders from source to destination (within dist/)"""
if os.path.exists(os.path.join('dist',destination)):
shutil.rmtree(os.path.join('dist',destination))
shutil.copytree(os.path.join(source), os.path.join('dist',destination))
#should we remove Windows OS dlls user is unlikely to be able to distribute
if remove_msft_dlls:
print "Deleted Microsoft files not licensed for open source distribution"
print "You are still responsible for making sure you have the rights to distribute any other included files!"
#delete the API-MS-Win-Core DLLs
for f in glob ('dist/API-MS-Win-*.dll'):
os.unlink (f)
#then delete some other files belonging to Microsoft
other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll']
for f in other_ms_files:
try:
os.unlink(os.path.join('dist',f))
except:
print "unable to delete dist/"+f
sys.exit(1)
#Should we include applications?
if copy_apps:
copy_folders('applications', 'applications')
print "Your application(s) have been added"
else:
#only copy web2py's default applications
copy_folders('applications/admin', 'applications/admin')
copy_folders('applications/welcome', 'applications/welcome')
copy_folders('applications/examples', 'applications/examples')
print "Only web2py's admin, examples & welcome applications have been added"
#should we copy project's site-packages into dist/site-packages
if copy_site_packages:
#copy site-packages
copy_folders('site-packages', 'site-packages')
else:
#no worries, web2py will create the (empty) folder first run
print "Skipping site-packages"
pass
#should we copy project's scripts into dist/scripts
if copy_scripts:
#copy scripts
copy_folders('scripts', 'scripts')
else:
#no worries, web2py will create the (empty) folder first run
print "Skipping scripts"
pass
#borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile
def recursive_zip(zipf, directory, folder = ""):
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item), folder + os.sep + item)
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item)
#should we create a zip file of the build?
if make_zip:
#to keep consistent with how official web2py windows zip file is setup,
#create a web2py folder & copy dist's files into it
shutil.copytree('dist','zip_temp/web2py')
#create zip file
#use filename specified via command line
zipf = zipfile.ZipFile(zip_filename+".zip", "w", compression=zipfile.ZIP_DEFLATED )
path = 'zip_temp' #just temp so the web2py directory is included in our zip file
recursive_zip(zipf, path) #leave the first folder as None, as path is root.
zipf.close()
shutil.rmtree('zip_temp')
print "Your Windows binary version of web2py can be found in "+zip_filename+".zip"
print "You may extract the archive anywhere and then run web2py/web2py.exe"
#should py2exe build files be removed?
if remove_build_files:
shutil.rmtree('build')
shutil.rmtree('deposit')
shutil.rmtree('dist')
print "py2exe build files removed"
#final info
if not make_zip and not remove_build_files:
print "Your Windows binary & associated files can also be found in /dist"
print "Finished!"
print "Enjoy web2py " +web2py_version_line
| Python |
#!/usr/bin/env python
import os
import sys
"""
Author: Christopher Steel on behalf of Voice of Access
Copyright: Copyrighted (c) by Massimo Di Pierro (2007-2011)
web2py_clone becomes part of the web2py distribution available
on Pypi via 'pip install web2py'
web2py_clone is one of multiple commands that become available after running
'pip install web2py' in a virtual environment. It requires
mercurial to be installed in the virtual environment.
web2py_clone creates a local clone from the Web2py google code
project in the directory "./web2py," a directory called web2py
one directory up from the location of this script.
./bin/web2py_clone
./web2py
"""
def main():
iwd = cwd = os.getcwd() # set initial and current working directories
script_filename = os.path.realpath(__file__)
script_dirname = os.path.dirname(script_filename)
try:
print ("cwd now: %s" % cwd)
except:
print ("command failed %s" % cwd)
try:
os.chdir(script_dirname)
cwd = os.getcwd()
print ("cwd now: %s" % cwd)
source = "https://code.google.com/p/web2py/"
target = os.path.join('..','web2py')
print ("attempting to clone %s" % source)
print ("to %s" % target)
if os.path.isdir(target):
print ("found directory called web2py at %s" % target)
print ("is web2py already installed?")
print ("aborting clone attempt")
else:
os.system("hg clone %s %s" % (source,target))
os.chdir(iwd) # return to our initial working directory
cwd = iwd # set current working directory
except:
print ("web2py-clone failed in second try statement %s" % cwd)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
path = os.getcwd()
try:
if sys.argv[1] and os.path.exists(sys.argv[1]):
path = sys.argv[1]
except:
pass
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
# import gluon.import_all ##### This should be uncommented for py2exe.py
import gluon.widget
def main():
# Start Web2py and Web2py cron service!
gluon.widget.start(cron=True)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py.
"""
import sys
import cStringIO
import time
import thread
import re
import os
import socket
import signal
import math
import logging
import newcron
import main
from fileutils import w2p_pack, read_file, write_file
from shell import run, test
from settings import global_settings
try:
import Tkinter, tkMessageBox
import contrib.taskbar_widget
from winservice import web2py_windows_service_handler
except:
pass
try:
BaseException
except NameError:
BaseException = Exception
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-2011'
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.4', '2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.4, 2.5 (recommended), 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def try_start_browser(url):
""" Try to start the default browser """
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
def start_browser(ip, port):
""" Starts the default browser """
print 'please visit:'
print '\thttp://%s:%s' % (ip, port)
print 'starting browser...'
try_start_browser('http://%s:%s' % (ip, port))
def presentation(root):
""" Draw the splash screen """
root.withdraw()
dx = root.winfo_screenwidth()
dy = root.winfo_screenheight()
dialog = Tkinter.Toplevel(root, bg='white')
dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150))
dialog.overrideredirect(1)
dialog.focus_force()
canvas = Tkinter.Canvas(dialog,
background='white',
width=500,
height=300)
canvas.pack()
root.update()
logo = 'splashlogo.gif'
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(canvas, image=img, background='white', bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image=img
def add_label(text='Change Me', font_size=12, foreground='#195866', height=1):
return Tkinter.Label(
master=canvas,
width=250,
height=height,
text=text,
font=('Helvetica', font_size),
anchor=Tkinter.CENTER,
foreground=foreground,
background='white'
)
add_label('Welcome to...').pack(side='top')
add_label(ProgramName, 18, '#FF5C1F', 2).pack()
add_label(ProgramAuthor).pack()
add_label(ProgramVersion).pack()
root.update()
time.sleep(5)
dialog.destroy()
return
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
root.title('web2py server')
self.root = Tkinter.Toplevel(root)
self.options = options
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
# Building the Menu
item = lambda: try_start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: try_start_browser('http://www.web2py.com')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# IP
Tkinter.Label(self.root,
text='Server IP:',
justify=Tkinter.LEFT).grid(row=0,
column=0,
sticky=sticky)
self.ip = Tkinter.Entry(self.root)
self.ip.insert(Tkinter.END, self.options.ip)
self.ip.grid(row=0, column=1, sticky=sticky)
# Port
Tkinter.Label(self.root,
text='Server Port:',
justify=Tkinter.LEFT).grid(row=1,
column=0,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=1, column=1, sticky=sticky)
# Password
Tkinter.Label(self.root,
text='Choose Password:',
justify=Tkinter.LEFT).grid(row=2,
column=0,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=2, column=1, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=300,
height=100,
bg='black')
self.canvas.grid(row=3, column=0, columnspan=2)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=4, column=0, columnspan=2)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1)
self.button_stop.configure(state='disabled')
if options.taskbar:
self.tb = contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def checkTaskBar(self):
""" Check taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Update app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connect pages """
for arq in os.listdir('applications/'):
if os.path.exists('applications/%s/__init__.py' % arq):
url = self.url + '/' + arq
start_browser = lambda u = url: try_start_browser(u)
self.pagesmenu.add_command(label=url,
command=start_browser)
def quit(self, justHide=False):
""" Finish the program execution """
if justHide:
self.root.withdraw()
else:
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Show error message """
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Start web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.ip.get()
regexp = '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if ip and not re.compile(regexp).match(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
self.url = 'http://%s:%s' % (ip, port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_filename=options.profiler_filename,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(start_browser, (ip, port))
self.password.configure(state='readonly')
self.ip.configure(state='readonly')
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def stop(self):
""" Stop web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
self.ip.configure(state='normal')
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Update canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 300
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(usage, None, optparse.Option, ProgramVersion)
parser.description = description
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address of the server (127.0.0.1)')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
msg = 'password to be used for administration'
msg += ' (use -a "<recycle>" to reuse the last password))'
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help='Use this file containing the CA certificate to validate X509 certificates from clients')
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = 'set debug output level (0-100, 0 means all, 100 means none;'
msg += ' default is 30)'
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = 'run web2py in interactive shell or IPython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += ' APPNAME like a/c/f (c,f optional)'
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = 'run web2py in interactive shell or bpython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += '\n Use combined with --shell'
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = 'auto import model files; default is False; should be used'
msg += ' with --shell option'
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = 'run PYTHON_FILE in web2py environment;'
msg += ' should be used with --shell option'
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = 'run scheduled tasks for the specified apps'
msg += '-K app1,app2,app3'
msg += 'requires a scheduler defined in the models'
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run doctests in web2py environment; ' +\
'TEST_PATH like a/c/f (c,f optional)'
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
parser.add_option('-W',
'--winservice',
dest='winservice',
default='',
help='-W install|start|stop as Windows service')
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-N',
'--no-cron',
action='store_true',
dest='nocron',
default=False,
help='do not start cron automatically')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_filename',
default=None,
help='profiler filename')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help='should be followed by a list of arguments to be passed to script, to be used with -S, -A must be the last option')
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = 'listen on multiple addresses: "ip:port:cert:key:ca_cert;ip2:port2:cert2:key2:ca_cert2;..." (:cert:key optional; no spaces)'
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
if '-A' in sys.argv: k = sys.argv.index('-A')
elif '--args' in sys.argv: k = sys.argv.index('--args')
else: k=len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k+1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.nocron = True # don't start cron jobs
options.plain = True # cronjobs use a plain shell
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip:port:cert:key;ip2:port2;ip3:port3:cert3:key3"
# (no spaces; optional cert:key indicate SSL)
if isinstance(options.interfaces, str):
options.interfaces = [
interface.split(':') for interface in options.interfaces.split(';')]
for interface in options.interfaces:
interface[1] = int(interface[1]) # numeric port
options.interfaces = [
tuple(interface) for interface in options.interfaces]
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'):
try:
w2p_pack('welcome.w2p','applications/welcome')
os.unlink('NEWINSTALL')
except:
msg = "New installation: unable to create welcome.w2p file"
sys.stderr.write(msg)
return (options, args)
def start_schedulers(options):
apps = [app.strip() for app in options.scheduler.split(',')]
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
code = "from gluon import current; current._scheduler.loop()"
for app in apps:
print 'starting scheduler for "%s"...' % app
args = (app,True,True,None,False,code)
logging.getLogger().setLevel(logging.DEBUG)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
print "Processes started"
for p in processes:
try:
p.join()
except KeyboardInterrupt:
p.terminate()
p.join()
def start(cron=True):
""" Start server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal import drivers
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(drivers)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options,key):
setattr(options,key,getattr(options2,key))
# ## if -T run doctests (no cron)
if hasattr(options,'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -K
if options.scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run)
return
# ## if -C start cron run (extcron) and exit
# ## if -N or not cron disable cron in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if options.extcron:
print 'Starting extcron...'
global_settings.web2py_crontype = 'external'
extcron = newcron.extcron(options.folder)
extcron.start()
extcron.join()
return
elif cron and not options.nocron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and not options.nocron:
print 'Starting hardcron...'
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if -W install/start/stop web2py as service
if options.winservice:
if os.name == 'nt':
web2py_windows_service_handler(['', options.winservice],
options.config)
else:
print 'Error: Windows services not supported on this platform'
sys.exit(1)
return
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui:
try:
import Tkinter
havetk = True
except ImportError:
logger.warn('GUI not available because Tk library is not installed')
havetk = False
if options.password == '<ask>' and havetk or options.taskbar and havetk:
try:
root = Tkinter.Tk()
except:
pass
if root:
root.focus_force()
if not options.quiet:
presentation(root)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = raw_input('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ## start server
(ip, port) = (options.ip, int(options.port))
if not options.nobanner:
print 'please visit:'
print '\thttp://%s:%s' % (ip, port)
print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid()
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_filename=options.profiler_filename,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
logging.shutdown()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
"""
Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open(\"somefile\", \"r+\")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write(\"foo\")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
I learned the win32 technique for locking files from sample code
provided by John Nielsen <nielsenjf@my-deja.com> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <jdf@pobox.com>
Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $
"""
import logging
import platform
logger = logging.getLogger("web2py")
os_locking = None
try:
import google.appengine
os_locking = 'gae'
except:
try:
import fcntl
os_locking = 'posix'
except:
try:
import win32con
import win32file
import pywintypes
os_locking = 'windows'
except:
pass
if os_locking == 'windows':
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, 0x7fff0000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0x7fff0000, __overlapped)
elif os_locking == 'posix':
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
def lock(file, flags):
fcntl.flock(file.fileno(), flags)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
if platform.system() == 'Windows':
logger.error('no file locking, you must install the win32 extensions from: http://sourceforge.net/projects/pywin32/files/')
elif os_locking != 'gae':
logger.debug('no file locking, this will cause problems')
LOCK_EX = None
LOCK_SH = None
LOCK_NB = None
def lock(file, flags):
pass
def unlock(file):
pass
class LockedFile(object):
def __init__(self,filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename,mode)
lock(self.file,LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename,mode.replace('w','a'))
lock(self.file,LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate()
else:
raise RuntimeError, "invalid LockedFile(...,mode)"
def read(self,size=None):
return self.file.read() if size is None else self.file.read(size)
def readline(self):
return self.file.readline()
def readlines(self):
return self.file.readlines()
def write(self,data):
self.file.write(data)
self.file.flush()
def close(self):
if not self.file is None:
unlock(self.file)
self.file.close()
self.file = None
def __del__(self):
self.close()
if __name__=='__main__':
f = LockedFile('test.txt',mode='wb')
f.write('test ok')
f.close()
f = LockedFile('test.txt',mode='rb')
print f.read()
f.close()
| Python |
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
from storage import Storage
global_settings = Storage()
settings = global_settings # legacy compatibility
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
__all__ = ['HTTP', 'redirect']
defined_status = {
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
# If web2py is executed with python2.4 we need
# to use Exception instead of BaseException
try:
BaseException
except NameError:
BaseException = Exception
class HTTP(BaseException):
def __init__(
self,
status,
body='',
**headers
):
self.status = status
self.body = body
self.headers = headers
def to(self, responder):
if self.status in defined_status:
status = '%d %s' % (self.status, defined_status[self.status])
else:
status = str(self.status) + ' '
if not 'Content-Type' in self.headers:
self.headers['Content-Type'] = 'text/html; charset=UTF-8'
body = self.body
if status[:1] == '4':
if not body:
body = status
if isinstance(body, str):
if len(body)<512 and self.headers['Content-Type'].startswith('text/html'):
body += '<!-- %s //-->' % ('x'*512) ### trick IE
self.headers['Content-Length'] = len(body)
headers = []
for (k, v) in self.headers.items():
if isinstance(v, list):
for item in v:
headers.append((k, str(item)))
else:
headers.append((k, str(v)))
responder(status, headers)
if hasattr(body, '__iter__') and not isinstance(self.body, str):
return body
return [str(body)]
@property
def message(self):
'''
compose a message describing this exception
"status defined_status [web2py_error]"
message elements that are not defined are omitted
'''
msg = '%(status)d'
if self.status in defined_status:
msg = '%(status)d %(defined_status)s'
if 'web2py_error' in self.headers:
msg += ' [%(web2py_error)s]'
return msg % dict(status=self.status,
defined_status=defined_status.get(self.status),
web2py_error=self.headers.get('web2py_error'))
def __str__(self):
"stringify me"
return self.message
def redirect(location, how=303):
if not location:
return
location = location.replace('\r', '%0D').replace('\n', '%0A')
raise HTTP(how,
'You are being redirected <a href="%s">here</a>' % location,
Location=location)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>,
limodou <limodou@gmail.com> and srackham <srackham@gmail.com>.
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import logging
import os
import pdb
import Queue
import sys
logger = logging.getLogger("web2py")
class Pipe(Queue.Queue):
def __init__(self, name, mode='r', *args, **kwargs):
self.__name = name
Queue.Queue.__init__(self, *args, **kwargs)
def write(self, data):
logger.debug("debug %s writting %s" % (self.__name, data))
self.put(data)
def flush(self):
# mark checkpoint (complete message)
logger.debug("debug %s flushing..." % self.__name)
self.put(None)
# wait until it is processed
self.join()
logger.debug("debug %s flush done" % self.__name)
def read(self, count=None, timeout=None):
logger.debug("debug %s reading..." % (self.__name, ))
data = self.get(block=True, timeout=timeout)
# signal that we are ready
self.task_done()
logger.debug("debug %s read %s" % (self.__name, data))
return data
def readline(self):
logger.debug("debug %s readline..." % (self.__name, ))
return self.read()
pipe_in = Pipe('in')
pipe_out = Pipe('out')
debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,)
def set_trace():
"breakpoint shortcut (like pdb)"
logger.info("DEBUG: set_trace!")
debugger.set_trace(sys._getframe().f_back)
def stop_trace():
"stop waiting for the debugger (called atexit)"
# this should prevent communicate is wait forever a command result
# and the main thread has finished
logger.info("DEBUG: stop_trace!")
pipe_out.write("debug finished!")
pipe_out.write(None)
#pipe_out.flush()
def communicate(command=None):
"send command to debbuger, wait result"
if command is not None:
logger.info("DEBUG: sending command %s" % command)
pipe_in.write(command)
#pipe_in.flush()
result = []
while True:
data = pipe_out.read()
if data is None:
break
result.append(data)
logger.info("DEBUG: result %s" % repr(result))
return ''.join(result)
# New debugger implementation using qdb and a web UI
import gluon.contrib.qdb as qdb
from threading import RLock
interact_lock = RLock()
run_lock = RLock()
def check_interaction(fn):
"Decorator to clean and prevent interaction when not available"
def check_fn(self, *args, **kwargs):
interact_lock.acquire()
try:
if self.filename:
self.clear_interaction()
return fn(self, *args, **kwargs)
finally:
interact_lock.release()
return check_fn
class WebDebugger(qdb.Frontend):
"Qdb web2py interface"
def __init__(self, pipe, completekey='tab', stdin=None, stdout=None):
qdb.Frontend.__init__(self, pipe)
self.clear_interaction()
def clear_interaction(self):
self.filename = None
self.lineno = None
self.exception_info = None
self.context = None
# redefine Frontend methods:
def run(self):
run_lock.acquire()
try:
while self.pipe.poll():
qdb.Frontend.run(self)
finally:
run_lock.release()
def interaction(self, filename, lineno, line, **context):
# store current status
interact_lock.acquire()
try:
self.filename = filename
self.lineno = lineno
self.context = context
finally:
interact_lock.release()
def exception(self, title, extype, exvalue, trace, request):
self.exception_info = {'title': title,
'extype': extype, 'exvalue': exvalue,
'trace': trace, 'request': request}
@check_interaction
def do_continue(self):
qdb.Frontend.do_continue(self)
@check_interaction
def do_step(self):
qdb.Frontend.do_step(self)
@check_interaction
def do_return(self):
qdb.Frontend.do_return(self)
@check_interaction
def do_next(self):
qdb.Frontend.do_next(self)
@check_interaction
def do_quit(self):
qdb.Frontend.do_quit(self)
def do_exec(self, statement):
interact_lock.acquire()
try:
# check to see if we're inside interaction
if self.filename:
# avoid spurious interaction notifications:
self.set_burst(2)
# execute the statement in the remote debugger:
return qdb.Frontend.do_exec(self, statement)
finally:
interact_lock.release()
# create the connection between threads:
parent_queue, child_queue = Queue.Queue(), Queue.Queue()
front_conn = qdb.QueuePipe("parent", parent_queue, child_queue)
child_conn = qdb.QueuePipe("child", child_queue, parent_queue)
web_debugger = WebDebugger(front_conn) # frontend
qdb_debugger = qdb.Qdb(pipe=child_conn, redirect_stdio=False, skip=None) # backend
dbg = qdb_debugger
# enable getting context (stack, globals/locals) at interaction
qdb_debugger.set_params(dict(call_stack=True, environment=True))
import gluon.main
gluon.main.global_settings.debugging = True
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file specifically includes utilities for security.
"""
import hashlib
import hmac
import uuid
import random
import time
import os
import logging
logger = logging.getLogger("web2py")
def md5_hash(text):
""" Generate a md5 hash with the given text """
return hashlib.md5(text).hexdigest()
def simple_hash(text, digest_alg = 'md5'):
"""
Generates hash with the given text using the specified
digest hashing algorithm
"""
if not digest_alg:
raise RuntimeError, "simple_hash with digest_alg=None"
elif not isinstance(digest_alg,str):
h = digest_alg(text)
else:
h = hashlib.new(digest_alg)
h.update(text)
return h.hexdigest()
def get_digest(value):
"""
Returns a hashlib digest algorithm from a string
"""
if not isinstance(value,str):
return value
value = value.lower()
if value == "md5":
return hashlib.md5
elif value == "sha1":
return hashlib.sha1
elif value == "sha224":
return hashlib.sha224
elif value == "sha256":
return hashlib.sha256
elif value == "sha384":
return hashlib.sha384
elif value == "sha512":
return hashlib.sha512
else:
raise ValueError("Invalid digest algorithm")
def hmac_hash(value, key, digest_alg='md5', salt=None):
if ':' in key:
digest_alg, key = key.split(':')
digest_alg = get_digest(digest_alg)
d = hmac.new(key,value,digest_alg)
if salt:
d.update(str(salt))
return d.hexdigest()
### compute constant ctokens
def initialize_urandom():
"""
This function and the web2py_uuid follow from the following discussion:
http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09
At startup web2py compute a unique ID that identifies the machine by adding
uuid.getnode() + int(time.time() * 1e3)
This is a 48-bit number. It converts the number into 16 8-bit tokens.
It uses this value to initialize the entropy source ('/dev/urandom') and to seed random.
If os.random() is not supported, it falls back to using random and issues a warning.
"""
node_id = uuid.getnode()
microseconds = int(time.time() * 1e6)
ctokens = [((node_id + microseconds) >> ((i%6)*8)) % 256 for i in range(16)]
random.seed(node_id + microseconds)
try:
os.urandom(1)
try:
# try to add process-specific entropy
frandom = open('/dev/urandom','wb')
try:
frandom.write(''.join(chr(t) for t in ctokens))
finally:
frandom.close()
except IOError:
# works anyway
pass
except NotImplementedError:
logger.warning(
"""Cryptographically secure session management is not possible on your system because
your system does not provide a cryptographically secure entropy source.
This is not specific to web2py; consider deploying on a different operating system.""")
return ctokens
ctokens = initialize_urandom()
def web2py_uuid():
"""
This function follows from the following discussion:
http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09
It works like uuid.uuid4 except that tries to use os.urandom() if possible
and it XORs the output with the tokens uniquely associated with this machine.
"""
bytes = [random.randrange(256) for i in range(16)]
try:
ubytes = [ord(c) for c in os.urandom(16)] # use /dev/urandom if possible
bytes = [bytes[i] ^ ubytes[i] for i in range(16)]
except NotImplementedError:
pass
## xor bytes with constant ctokens
bytes = ''.join(chr(c ^ ctokens[i]) for i,c in enumerate(bytes))
return str(uuid.UUID(bytes=bytes, version=4))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
import cPickle
import portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds return None
instead of IndexOutOfBounds
"""
def __call__(self, i, default=None):
if 0<=i<len(self):
return self[i]
else:
return default
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
def __getattr__(self, key):
return dict.get(self, key, None)
def __setattr__(self, key, value):
if value is None:
if key in self:
del self[key]
else:
self[key] = value
def __delattr__(self, key):
if key in self:
del self[key]
else:
raise AttributeError, "missing key=%s" % key
def __getitem__(self, key):
return dict.get(self, key, None)
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self, value):
for (k, v) in value.items():
self[k] = v
def getlist(self, key):
"""Return a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, [value] will be returned.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, None)
if isinstance(value, (list, tuple)):
return value
elif value is None:
return []
return [value]
def getfirst(self, key):
"""Return the first or only value when given a request.vars-style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
value = self.getlist(key)
if len(value):
return value[0]
return None
def getlast(self, key):
"""Returns the last or only single value when given a request.vars-style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
value = self.getlist(key)
if len(value):
return value[-1]
return None
PICKABLE = (str,int,long,float,bool,list,dict,tuple,set)
def PickleableStorage(data):
return Storage(dict((k,v) for (k,v) in data.items() if isinstance(v,PICKABLE)))
class StorageList(Storage):
"""
like Storage but missing elements default to [] instead of None
"""
def __getattr__(self, key):
if key in self:
return self[key]
else:
self[key] = []
return self[key]
def load_storage(filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'rb')
storage = cPickle.load(fp)
finally:
if fp: fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'wb')
cPickle.dump(dict(storage), fp)
finally:
if fp: fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self.get('lock_keys', None)\
and not key in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self.get('lock_values', None):
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
class Messages(Storage):
def __init__(self, T):
self['T'] = T
def __setattr__(self, key, value):
if key != 'lock_keys' and self.get('lock_keys', None)\
and not key in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self.get('lock_values', None):
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return str(self['T'](value))
return value
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import re
# pattern to find defined tables
regex_tables = re.compile(\
"""^[\w]+\.define_table\(\s*[\'\"](?P<name>[\w_]+)[\'\"]""",
flags=re.M)
# pattern to find exposed functions in controller
regex_expose = re.compile(\
'^def\s+(?P<name>(?:[a-zA-Z0-9]\w*)|(?:_[a-zA-Z0-9]\w*))\(\)\s*:',
flags=re.M)
regex_include = re.compile(\
'(?P<all>\{\{\s*include\s+[\'"](?P<name>[^\'"]*)[\'"]\s*\}\})')
regex_extend = re.compile(\
'^\s*(?P<all>\{\{\s*extend\s+[\'"](?P<name>[^\'"]+)[\'"]\s*\}\})',re.MULTILINE)
| Python |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Thanks to
* Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support
* Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support
* Denes
* Chris Clark
* clach05
* Denes Lengyel
* and many others who have contributed to current and previous versions
This file contains the DAL support for many relational databases,
including:
- SQLite
- MySQL
- Postgres
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage:
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('mysql://a:b@localhost/x', 'sqlite://storage.sqlite'), folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name, groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported field types:
id string text boolean integer double decimal password upload blob time date datetime
Supported DAL URI strings:
'sqlite://test.db'
'sqlite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2://DSN=dsn;UID=user;PWD=pass'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'google:datastore' # for google app engine datastore
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
For more info:
help(DAL)
help(Field)
"""
###################################################################################
# this file only exposes DAL and Field
###################################################################################
__all__ = ['DAL', 'Field']
MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length
DEFAULTLENGTH = {'string':512,
'password':512,
'upload':512,
'text':2**15,
'blob':2**31}
TIMINGSSIZE = 100
import re
import sys
import locale
import os
import types
import cPickle
import datetime
import threading
import time
import cStringIO
import csv
import cgi
import copy
import socket
import logging
import copy_reg
import base64
import shutil
import marshal
import decimal
import struct
import urllib
import hashlib
import uuid
import glob
import traceback
CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
###################################################################################
# following checks allows running of dal without web2py as a standalone module
###################################################################################
try:
from utils import web2py_uuid
except ImportError:
import uuid
def web2py_uuid(): return str(uuid.uuid4())
try:
import portalocker
have_portalocker = True
except ImportError:
have_portalocker = False
try:
import serializers
have_serializers = True
except ImportError:
have_serializers = False
try:
import validators
have_validators = True
except ImportError:
have_validators = False
logger = logging.getLogger("web2py.dal")
DEFAULT = lambda:0
sql_locker = threading.RLock()
thread = threading.local()
# internal representation of tables with field
# <table>.<field>, tables and fields may only be [a-zA-Z0-0_]
regex_type = re.compile('^([\w\_\:]+)')
regex_dbname = re.compile('^(\w+)(\:\w+)*')
regex_table_field = re.compile('^([\w_]+)\.([\w_]+)$')
regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$')
regex_cleanup_fn = re.compile('[\'"\s;]+')
string_unpack=re.compile('(?<!\|)\|(?!\|)')
regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
regex_select_as_parser = re.compile("\s+AS\s+(\S+)")
# list of drivers will be built on the fly
# and lists only what is available
drivers = []
try:
from new import classobj
from google.appengine.ext import db as gae
from google.appengine.api import namespace_manager, rdbms
from google.appengine.api.datastore_types import Key ### needed for belongs on ID
from google.appengine.ext.db.polymodel import PolyModel
drivers.append('google')
except ImportError:
pass
if not 'google' in drivers:
try:
from pysqlite2 import dbapi2 as sqlite3
drivers.append('pysqlite2')
except ImportError:
try:
from sqlite3 import dbapi2 as sqlite3
drivers.append('SQLite3')
except ImportError:
logger.debug('no sqlite3 or pysqlite2.dbapi2 driver')
try:
import contrib.pymysql as pymysql
drivers.append('pymysql')
except ImportError:
logger.debug('no pymysql driver')
try:
import psycopg2
from psycopg2.extensions import adapt as psycopg2_adapt
drivers.append('psycopg2')
except ImportError:
logger.debug('no psycopg2 driver')
try:
# first try contrib driver, then from site-packages (if installed)
try:
import contrib.pg8000.dbapi as pg8000
except ImportError:
import pg8000.dbapi as pg8000
drivers.append('pg8000')
except ImportError:
logger.debug('no pg8000 driver')
try:
import cx_Oracle
drivers.append('Oracle')
except ImportError:
logger.debug('no cx_Oracle driver')
try:
import pyodbc
drivers.append('MSSQL/DB2/Teradata')
except ImportError:
logger.debug('no MSSQL/DB2/Teradata driver')
try:
import kinterbasdb
drivers.append('Interbase')
except ImportError:
logger.debug('no kinterbasdb driver')
try:
import firebirdsql
drivers.append('Firebird')
except ImportError:
logger.debug('no Firebird driver')
try:
import informixdb
drivers.append('Informix')
logger.warning('Informix support is experimental')
except ImportError:
logger.debug('no informixdb driver')
try:
import sapdb
drivers.append('SAPDB')
logger.warning('SAPDB support is experimental')
except ImportError:
logger.debug('no sapdb driver')
try:
import cubriddb
drivers.append('Cubrid')
logger.warning('Cubrid support is experimental')
except ImportError:
logger.debug('no cubriddb driver')
try:
from com.ziclix.python.sql import zxJDBC
import java.sql
# Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/
from org.sqlite import JDBC # required by java.sql; ensure we have it
drivers.append('zxJDBC')
logger.warning('zxJDBC support is experimental')
is_jdbc = True
except ImportError:
logger.debug('no zxJDBC driver')
is_jdbc = False
try:
import ingresdbi
drivers.append('Ingres')
except ImportError:
logger.debug('no Ingres driver')
# NOTE could try JDBC.......
try:
import couchdb
drivers.append('CouchDB')
except ImportError:
logger.debug('no couchdb driver')
try:
import pymongo
drivers.append('mongoDB')
except:
logger.debug('no mongoDB driver')
try:
import imaplib
drivers.append('IMAP')
except:
logger.debug('could not import imaplib')
PLURALIZE_RULES = [
(re.compile('child$'), re.compile('child$'), 'children'),
(re.compile('oot$'), re.compile('oot$'), 'eet'),
(re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
(re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
(re.compile('sis$'), re.compile('sis$'), 'ses'),
(re.compile('man$'), re.compile('man$'), 'men'),
(re.compile('ife$'), re.compile('ife$'), 'ives'),
(re.compile('eau$'), re.compile('eau$'), 'eaux'),
(re.compile('lf$'), re.compile('lf$'), 'lves'),
(re.compile('[sxz]$'), re.compile('$'), 'es'),
(re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
(re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
(re.compile('$'), re.compile('$'), 's'),
]
def pluralize(singular, rules=PLURALIZE_RULES):
for line in rules:
re_search, re_sub, replace = line
plural = re_search.search(singular) and re_sub.sub(replace, singular)
if plural: return plural
def OR(a,b):
return a|b
def AND(a,b):
return a&b
if 'google' in drivers:
is_jdbc = False
class GAEDecimalProperty(gae.Property):
"""
GAE decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
super(GAEDecimalProperty, self).__init__(self, **kwargs)
d = '1.'
for x in range(scale):
d += '0'
self.round = decimal.Decimal(d)
def get_value_for_datastore(self, model_instance):
value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance)
if value is None or value == '':
return None
else:
return str(value)
def make_value_from_datastore(self, value):
if value is None or value == '':
return None
else:
return decimal.Decimal(value).quantize(self.round)
def validate(self, value):
value = super(GAEDecimalProperty, self).validate(value)
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
###################################################################################
# class that handles connection pooling (all adapters are derived from this one)
###################################################################################
class ConnectionPool(object):
pools = {}
check_active_connection = True
@staticmethod
def set_folder(folder):
thread.folder = folder
# ## this allows gluon to commit/rollback all dbs in this thread
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
if not hasattr(thread, 'instances'):
return
while thread.instances:
instance = thread.instances.pop()
if action:
getattr(instance, action)()
# ## if you want pools, recycle this connection
really = True
if instance.pool_size:
sql_locker.acquire()
pool = ConnectionPool.pools[instance.uri]
if len(pool) < instance.pool_size:
pool.append(instance.connection)
really = False
sql_locker.release()
if really:
getattr(instance, 'close')()
return
def find_or_make_work_folder(self):
""" this actually does not make the folder. it has to be there """
if hasattr(thread,'folder'):
self.folder = thread.folder
else:
self.folder = thread.folder = ''
# Creating the folder if it does not exist
if False and self.folder and not os.path.exists(self.folder):
os.mkdir(self.folder)
def pool_connection(self, f, cursor=True):
"""
this function defines: self.connection and self.cursor (iff cursor is True)
if self.pool_size>0 it will try pull the connection from the pool
if the connection is not active (closed by db server) it will loop
if not self.pool_size or no active connections in pool makes a new one
"""
if not self.pool_size:
self.connection = f()
self.cursor = cursor and self.connection.cursor()
else:
uri = self.uri
while True:
sql_locker.acquire()
if not uri in ConnectionPool.pools:
ConnectionPool.pools[uri] = []
if ConnectionPool.pools[uri]:
self.connection = ConnectionPool.pools[uri].pop()
sql_locker.release()
self.cursor = cursor and self.connection.cursor()
try:
if self.cursor and self.check_active_connection:
self.execute('SELECT 1;')
break
except:
pass
else:
sql_locker.release()
self.connection = f()
self.cursor = cursor and self.connection.cursor()
break
if not hasattr(thread,'instances'):
thread.instances = []
thread.instances.append(self)
###################################################################################
# this is a generic adapter that does nothing; all others are derived from this one
###################################################################################
class BaseAdapter(ConnectionPool):
driver = None
maxcharlength = MAXCHARLENGTH
commit_on_alter_table = False
support_distributed_transaction = False
uploads_in_blob = False
can_select_for_update = True
types = {
'boolean': 'CHAR(1)',
'string': 'CHAR(%(length)s)',
'text': 'TEXT',
'password': 'CHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'CHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'DOUBLE',
'decimal': 'DOUBLE',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
}
def adapt(self, obj):
return "'%s'" % obj.replace("'", "''")
def integrity_error(self):
return self.driver.IntegrityError
def operational_error(self):
return self.driver.OperationalError
def file_exists(self, filename):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
return os.path.exists(filename)
def file_open(self, filename, mode='rb', lock=True):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
if have_portalocker and lock:
fileobj = portalocker.LockedFile(filename,mode)
else:
fileobj = open(filename,mode)
return fileobj
def file_close(self, fileobj):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
if fileobj:
fileobj.close()
def file_delete(self, filename):
os.unlink(filename)
def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "None"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
class Dummy(object):
lastrowid = 1
def __getattr__(self, value):
return lambda *a, **b: []
self.connection = Dummy()
self.cursor = Dummy()
def sequence_name(self,tablename):
return '%s_sequence' % tablename
def trigger_name(self,tablename):
return '%s_sequence' % tablename
def create_table(self, table,
migrate=True,
fake_migrate=False,
polymodel=None):
fields = []
# PostGIS geo fields are added after the table has been created
postcreation_fields = []
sql_fields = {}
sql_fields_aux = {}
TFK = {}
tablename = table._tablename
sortable = 0
for field in table:
sortable += 1
k = field.name
if isinstance(field.type,SQLCustomType):
ftype = field.type.native or field.type.type
elif field.type.startswith('reference'):
referenced = field.type[10:].strip()
constraint_name = self.constraint_name(tablename, field.name)
if hasattr(table,'_primarykey'):
rtablename,rfieldname = referenced.split('.')
rtable = table._db[rtablename]
rfield = rtable[rfieldname]
# must be PK reference or unique
if rfieldname in rtable._primarykey or rfield.unique:
ftype = self.types[rfield.type[:9]] % dict(length=rfield.length)
# multicolumn primary key reference?
if not rfield.unique and len(rtable._primarykey)>1 :
# then it has to be a table level FK
if rtablename not in TFK:
TFK[rtablename] = {}
TFK[rtablename][rfieldname] = field.name
else:
ftype = ftype + \
self.types['reference FK'] %dict(\
constraint_name=constraint_name,
table_name=tablename,
field_name=field.name,
foreign_key='%s (%s)'%(rtablename, rfieldname),
on_delete_action=field.ondelete)
else:
# make a guess here for circular references
id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id'
ftype = self.types[field.type[:9]]\
% dict(table_name=tablename,
field_name=field.name,
constraint_name=constraint_name,
foreign_key=referenced + ('(%s)' % id_fieldname),
on_delete_action=field.ondelete)
elif field.type.startswith('list:reference'):
ftype = self.types[field.type[:14]]
elif field.type.startswith('decimal'):
precision, scale = map(int,field.type[8:-1].split(','))
ftype = self.types[field.type[:7]] % \
dict(precision=precision,scale=scale)
elif field.type.startswith('geo'):
srid = self.srid
geotype, parms = field.type[:-1].split('(')
if not geotype in self.types:
raise SyntaxError, 'Field: unknown field type: %s for %s' % \
(field.type, field.name)
ftype = self.types[geotype]
if self.dbengine == 'postgres' and geotype == 'geometry':
# parameters: schema, srid, dimension
dimension = 2 # GIS.dimension ???
parms = parms.split(',')
if len(parms) == 3:
schema, srid, dimension = parms
elif len(parms) == 2:
schema, srid = parms
else:
schema = parms[0]
ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % self.types[geotype]
ftype = ftype % dict(schema=schema, tablename=tablename,
fieldname=field.name, srid=srid, dimension=dimension)
postcreation_fields.append(ftype)
elif not field.type in self.types:
raise SyntaxError, 'Field: unknown field type: %s for %s' % \
(field.type, field.name)
else:
ftype = self.types[field.type]\
% dict(length=field.length)
if not field.type.startswith('id') and not field.type.startswith('reference'):
if field.notnull:
ftype += ' NOT NULL'
else:
ftype += self.ALLOW_NULL()
if field.unique:
ftype += ' UNIQUE'
# add to list of fields
sql_fields[field.name] = dict(sortable=sortable,
type=str(field.type),
sql=ftype)
if isinstance(field.default,(str,int,float)):
# Caveat: sql_fields and sql_fields_aux differ for default values.
# sql_fields is used to trigger migrations and sql_fields_aux
# is used for create tables.
# The reason is that we do not want to trigger a migration simply
# because a default value changes.
not_null = self.NOT_NULL(field.default, field.type)
ftype = ftype.replace('NOT NULL', not_null)
sql_fields_aux[field.name] = dict(sql=ftype)
# Postgres - PostGIS:
# geometry fields are added after the table has been created, not now
if not (self.dbengine == 'postgres' and field.type.startswith('geom')):
fields.append('%s %s' %(field.name, ftype))
other = ';'
# backend-specific extensions to fields
if self.dbengine == 'mysql':
if not hasattr(table, "_primarykey"):
fields.append('PRIMARY KEY(%s)' % table._id.name)
other = ' ENGINE=InnoDB CHARACTER SET utf8;'
fields = ',\n '.join(fields)
for rtablename in TFK:
rfields = TFK[rtablename]
pkeys = table._db[rtablename]._primarykey
fkeys = [ rfields[k] for k in pkeys ]
fields = fields + ',\n ' + \
self.types['reference TFK'] %\
dict(table_name=tablename,
field_name=', '.join(fkeys),
foreign_table=rtablename,
foreign_key=', '.join(pkeys),
on_delete_action=field.ondelete)
if hasattr(table,'_primarykey'):
query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \
(tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other)
else:
query = '''CREATE TABLE %s(\n %s\n)%s''' % \
(tablename, fields, other)
if self.uri.startswith('sqlite:///'):
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding)
else:
dbpath = self.folder
if not migrate:
return query
elif self.uri.startswith('sqlite:memory'):
table._dbt = None
elif isinstance(migrate, str):
table._dbt = os.path.join(dbpath, migrate)
else:
table._dbt = os.path.join(dbpath, '%s_%s.table' \
% (table._db._uri_hash, tablename))
if table._dbt:
table._loggername = os.path.join(dbpath, 'sql.log')
logfile = self.file_open(table._loggername, 'a')
else:
logfile = None
if not table._dbt or not self.file_exists(table._dbt):
if table._dbt:
logfile.write('timestamp: %s\n'
% datetime.datetime.today().isoformat())
logfile.write(query + '\n')
if not fake_migrate:
self.create_sequence_and_triggers(query,table)
table._db.commit()
# Postgres geom fields are added now, after the table has been created
for query in postcreation_fields:
self.execute(query)
table._db.commit()
if table._dbt:
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields, tfile)
self.file_close(tfile)
if fake_migrate:
logfile.write('faked!\n')
else:
logfile.write('success!\n')
else:
tfile = self.file_open(table._dbt, 'r')
try:
sql_fields_old = cPickle.load(tfile)
except EOFError:
self.file_close(tfile)
self.file_close(logfile)
raise RuntimeError, 'File %s appears corrupted' % table._dbt
self.file_close(tfile)
if sql_fields != sql_fields_old:
self.migrate_table(table,
sql_fields, sql_fields_old,
sql_fields_aux, logfile,
fake_migrate=fake_migrate)
self.file_close(logfile)
return query
def migrate_table(
self,
table,
sql_fields,
sql_fields_old,
sql_fields_aux,
logfile,
fake_migrate=False,
):
tablename = table._tablename
def fix(item):
k,v=item
if not isinstance(v,dict):
v=dict(type='unkown',sql=v)
return k.lower(),v
### make sure all field names are lower case to avoid conflicts
sql_fields = dict(map(fix,sql_fields.items()))
sql_fields_old = dict(map(fix,sql_fields_old.items()))
sql_fields_aux = dict(map(fix,sql_fields_aux.items()))
keys = sql_fields.keys()
for key in sql_fields_old:
if not key in keys:
keys.append(key)
if self.dbengine == 'mssql':
new_add = '; ALTER TABLE %s ADD ' % tablename
else:
new_add = ', ADD '
metadata_change = False
sql_fields_current = copy.copy(sql_fields_old)
for key in keys:
query = None
if not key in sql_fields_old:
sql_fields_current[key] = sql_fields[key]
if self.dbengine in ('postgres',) and \
sql_fields[key]['type'].startswith('geometry'):
# 'sql' == ftype in sql
query = [ sql_fields[key]['sql'] ]
else:
query = ['ALTER TABLE %s ADD %s %s;' % \
(tablename, key,
sql_fields_aux[key]['sql'].replace(', ', new_add))]
metadata_change = True
elif self.dbengine == 'sqlite':
if key in sql_fields:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
elif not key in sql_fields:
del sql_fields_current[key]
ftype = sql_fields_old[key]['type']
if self.dbengine in ('postgres',) and ftype.startswith('geometry'):
geotype, parms = ftype[:-1].split('(')
schema = parms.split(',')[0]
query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % \
dict(schema=schema, table=tablename, field=key,) ]
elif not self.dbengine in ('firebird',):
query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)]
else:
query = ['ALTER TABLE %s DROP %s;' % (tablename, key)]
metadata_change = True
elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \
and not isinstance(table[key].type, SQLCustomType) \
and not (table[key].type.startswith('reference') and \
sql_fields[key]['sql'].startswith('INT,') and \
sql_fields_old[key]['sql'].startswith('INT NOT NULL,')):
sql_fields_current[key] = sql_fields[key]
t = tablename
tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
if not self.dbengine in ('firebird',):
query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt),
'UPDATE %s SET %s__tmp=%s;' % (t, key, key),
'ALTER TABLE %s DROP COLUMN %s;' % (t, key),
'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
'UPDATE %s SET %s=%s__tmp;' % (t, key, key),
'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)]
else:
query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt),
'UPDATE %s SET %s__tmp=%s;' % (t, key, key),
'ALTER TABLE %s DROP %s;' % (t, key),
'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
'UPDATE %s SET %s=%s__tmp;' % (t, key, key),
'ALTER TABLE %s DROP %s__tmp;' % (t, key)]
metadata_change = True
elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
if query:
logfile.write('timestamp: %s\n'
% datetime.datetime.today().isoformat())
table._db['_lastsql'] = '\n'.join(query)
for sub_query in query:
logfile.write(sub_query + '\n')
if not fake_migrate:
self.execute(sub_query)
# Caveat: mysql, oracle and firebird do not allow multiple alter table
# in one transaction so we must commit partial transactions and
# update table._dbt after alter table.
if table._db._adapter.commit_on_alter_table:
table._db.commit()
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
logfile.write('success!\n')
else:
logfile.write('faked!\n')
elif metadata_change:
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
if metadata_change and \
not (query and self.dbengine in ('mysql','oracle','firebird')):
table._db.commit()
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
def LOWER(self, first):
return 'LOWER(%s)' % self.expand(first)
def UPPER(self, first):
return 'UPPER(%s)' % self.expand(first)
def EXTRACT(self, first, what):
return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
def AGGREGATE(self, first, what):
return "%s(%s)" % (what, self.expand(first))
def JOIN(self):
return 'JOIN'
def LEFT_JOIN(self):
return 'LEFT JOIN'
def RANDOM(self):
return 'Random()'
def NOT_NULL(self, default, field_type):
return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
def COALESCE(self, first, second):
expressions = [self.expand(first)]+[self.expand(e) for e in second]
return 'COALESCE(%s)' % ','.join(expressions)
def COALESCE_ZERO(self, first):
return 'COALESCE(%s,0)' % self.expand(first)
def RAW(self, first):
return first
def ALLOW_NULL(self):
return ''
def SUBSTRING(self, field, parameters):
return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self, key):
return 'PRIMARY KEY(%s)' % key
def _drop(self, table, mode):
return ['DROP TABLE %s;' % table]
def drop(self, table, mode=''):
if table._dbt:
logfile = self.file_open(table._loggername, 'a')
queries = self._drop(table, mode)
for query in queries:
if table._dbt:
logfile.write(query + '\n')
self.execute(query)
table._db.commit()
del table._db[table._tablename]
del table._db.tables[table._db.tables.index(table._tablename)]
table._db._update_referenced_by(table._tablename)
if table._dbt:
self.file_delete(table._dbt)
logfile.write('success!\n')
def _insert(self, table, fields):
keys = ','.join(f.name for f,v in fields)
values = ','.join(self.expand(v,f.type) for f,v in fields)
return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
def insert(self, table, fields):
query = self._insert(table,fields)
try:
self.execute(query)
except Exception, e:
if isinstance(e,self.integrity_error_class()):
return None
raise e
if hasattr(table,'_primarykey'):
return dict([(k[0].name, k[1]) for k in fields \
if k[0].name in table._primarykey])
id = self.lastrowid(table)
if not isinstance(id,int):
return id
rid = Reference(id)
(rid._table, rid._record) = (table, None)
return rid
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
def NOT(self, first):
return '(NOT %s)' % self.expand(first)
def AND(self, first, second):
return '(%s AND %s)' % (self.expand(first), self.expand(second))
def OR(self, first, second):
return '(%s OR %s)' % (self.expand(first), self.expand(second))
def BELONGS(self, first, second):
if isinstance(second, str):
return '(%s IN (%s))' % (self.expand(first), second[:-1])
elif second==[] or second==():
return '(1=0)'
items = ','.join(self.expand(item, first.type) for item in second)
return '(%s IN (%s))' % (self.expand(first), items)
def REGEXP(self, first, second):
"regular expression operator"
raise NotImplementedError
def LIKE(self, first, second):
"case sensitive like operator"
raise NotImplementedError
def ILIKE(self, first, second):
"case in-sensitive like operator"
return '(%s LIKE %s)' % (self.expand(first), self.expand(second, 'string'))
def STARTSWITH(self, first, second):
return '(%s LIKE %s)' % (self.expand(first), self.expand(second+'%', 'string'))
def ENDSWITH(self, first, second):
return '(%s LIKE %s)' % (self.expand(first), self.expand('%'+second, 'string'))
def CONTAINS(self, first, second):
if first.type in ('string', 'text'):
key = '%'+str(second).replace('%','%%')+'%'
elif first.type.startswith('list:'):
key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%'
return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
def EQ(self, first, second=None):
if second is None:
return '(%s IS NULL)' % self.expand(first)
return '(%s = %s)' % (self.expand(first), self.expand(second, first.type))
def NE(self, first, second=None):
if second is None:
return '(%s IS NOT NULL)' % self.expand(first)
return '(%s <> %s)' % (self.expand(first), self.expand(second, first.type))
def LT(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s < None" % first
return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
def LE(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s <= None" % first
return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
def GT(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s > None" % first
return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
def GE(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s >= None" % first
return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
def ADD(self, first, second):
return '(%s + %s)' % (self.expand(first), self.expand(second, first.type))
def SUB(self, first, second):
return '(%s - %s)' % (self.expand(first), self.expand(second, first.type))
def MUL(self, first, second):
return '(%s * %s)' % (self.expand(first), self.expand(second, first.type))
def DIV(self, first, second):
return '(%s / %s)' % (self.expand(first), self.expand(second, first.type))
def MOD(self, first, second):
return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type))
def AS(self, first, second):
return '%s AS %s' % (self.expand(first), second)
def ON(self, first, second):
return '%s ON %s' % (self.expand(first), self.expand(second))
def INVERT(self, first):
return '%s DESC' % self.expand(first)
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
def expand(self, expression, field_type=None):
if isinstance(expression, Field):
return str(expression)
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
elif not isinstance(expression.op, str):
return expression.op()
else:
return '(%s)' % expression.op
elif field_type:
return str(self.represent(expression,field_type))
elif isinstance(expression,(list,tuple)):
return ','.join(self.represent(item,field_type) for item in expression)
else:
return str(expression)
def alias(self, table, alias):
"""
Given a table object, makes a new table object
with alias name.
"""
other = copy.copy(table)
other['_ot'] = other._tablename
other['ALL'] = SQLALL(other)
other['_tablename'] = alias
for fieldname in other.fields:
other[fieldname] = copy.copy(other[fieldname])
other[fieldname]._tablename = alias
other[fieldname].tablename = alias
other[fieldname].table = other
table._db[alias] = other
return other
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
def truncate(self, table, mode= ' '):
# Prepare functions "write_to_logfile" and "close_logfile"
if table._dbt:
logfile = self.file_open(table._loggername, 'a')
else:
class Logfile(object):
def write(self, value):
pass
def close(self):
pass
logfile = Logfile()
try:
queries = table._db._adapter._truncate(table, mode)
for query in queries:
logfile.write(query + '\n')
self.execute(query)
table._db.commit()
logfile.write('success!\n')
finally:
logfile.close()
def _update(self, tablename, query, fields):
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename])
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_v = ','.join(['%s=%s' % (field.name, self.expand(value, field.type)) \
for (field, value) in fields])
return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
def update(self, tablename, query, fields):
sql = self._update(tablename, query, fields)
self.execute(sql)
try:
return self.cursor.rowcount
except:
return None
def _delete(self, tablename, query):
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename])
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
return 'DELETE FROM %s%s;' % (tablename, sql_w)
def delete(self, tablename, query):
sql = self._delete(tablename, query)
### special code to handle CASCADE in SQLite
db = self.db
table = db[tablename]
if self.dbengine=='sqlite' and table._referenced_by:
deleted = [x[table._id.name] for x in db(query).select(table._id)]
### end special code to handle CASCADE in SQLite
self.execute(sql)
try:
counter = self.cursor.rowcount
except:
counter = None
### special code to handle CASCADE in SQLite
if self.dbengine=='sqlite' and counter:
for tablename,fieldname in table._referenced_by:
f = db[tablename][fieldname]
if f.type=='reference '+table._tablename and f.ondelete=='CASCADE':
db(db[tablename][fieldname].belongs(deleted)).delete()
### end special code to handle CASCADE in SQLite
return counter
def get_table(self, query):
tablenames = self.tables(query)
if len(tablenames)==1:
return tablenames[0]
elif len(tablenames)<1:
raise RuntimeError, "No table selected"
else:
raise RuntimeError, "Too many tables selected"
def expand_all(self, fields, tablenames):
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
# ## if no fields specified take them all from the requested tables
if not new_fields:
for table in tablenames:
for field in self.db[table]:
new_fields.append(field)
return new_fields
def _select(self, query, fields, attributes):
for key in set(attributes.keys())-set(('orderby', 'groupby', 'limitby',
'required', 'cache', 'left',
'distinct', 'having', 'join',
'for_update')):
raise SyntaxError, 'invalid select attribute: %s' % key
tablenames = self.tables(query)
for field in fields:
if isinstance(field, basestring) and regex_table_field.match(field):
tn,fn = field.split('.')
field = self.db[tn][fn]
for tablename in self.tables(field):
if not tablename in tablenames:
tablenames.append(tablename)
if use_common_filters(query):
query = self.common_filter(query,tablenames)
if len(tablenames) < 1:
raise SyntaxError, 'Set: no tables selected'
sql_f = ', '.join(map(self.expand, fields))
self._colnames = [c.strip() for c in sql_f.split(', ')]
if query:
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_o = ''
sql_s = ''
left = attributes.get('left', False)
inner_join = attributes.get('join', False)
distinct = attributes.get('distinct', False)
groupby = attributes.get('groupby', False)
orderby = attributes.get('orderby', False)
having = attributes.get('having', False)
limitby = attributes.get('limitby', False)
for_update = attributes.get('for_update', False)
if self.can_select_for_update is False and for_update is True:
raise SyntaxError, 'invalid select attribute: for_update'
if distinct is True:
sql_s += 'DISTINCT'
elif distinct:
sql_s += 'DISTINCT ON (%s)' % distinct
if inner_join:
icommand = self.JOIN()
if not isinstance(inner_join, (tuple, list)):
inner_join = [inner_join]
ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)]
ijoinon = [t for t in inner_join if isinstance(t, Expression)]
itables_to_merge={} #issue 490
[itables_to_merge.update(dict.fromkeys(self.tables(t))) for t in ijoinon] # issue 490
ijoinont = [t.first._tablename for t in ijoinon]
[itables_to_merge.pop(t) for t in ijoinont if t in itables_to_merge] #issue 490
iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() # issue 490
iexcluded = [t for t in tablenames if not t in iimportant_tablenames]
if left:
join = attributes['left']
command = self.LEFT_JOIN()
if not isinstance(join, (tuple, list)):
join = [join]
joint = [t._tablename for t in join if not isinstance(t, Expression)]
joinon = [t for t in join if isinstance(t, Expression)]
#patch join+left patch (solves problem with ordering in left joins)
tables_to_merge={}
[tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon]
joinont = [t.first._tablename for t in joinon]
[tables_to_merge.pop(t) for t in joinont if t in tables_to_merge]
important_tablenames = joint + joinont + tables_to_merge.keys()
excluded = [t for t in tablenames if not t in important_tablenames ]
def alias(t):
return str(self.db[t])
if inner_join and not left:
sql_t = ', '.join([alias(t) for t in iexcluded + itables_to_merge.keys()]) # issue 490
for t in ijoinon:
sql_t += ' %s %s' % (icommand, str(t))
elif not inner_join and left:
sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()])
if joint:
sql_t += ' %s %s' % (command, ','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, str(t))
elif inner_join and left:
all_tables_in_query = set(important_tablenames + \
iimportant_tablenames + \
tablenames) # issue 490
tables_in_joinon = set(joinont + ijoinont) # issue 490
tables_not_in_joinon = all_tables_in_query.difference(tables_in_joinon) # issue 490
sql_t = ','.join([alias(t) for t in tables_not_in_joinon]) # issue 490
for t in ijoinon:
sql_t += ' %s %s' % (icommand, str(t))
if joint:
sql_t += ' %s %s' % (command, ','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, str(t))
else:
sql_t = ', '.join(alias(t) for t in tablenames)
if groupby:
if isinstance(groupby, (list, tuple)):
groupby = xorify(groupby)
sql_o += ' GROUP BY %s' % self.expand(groupby)
if having:
sql_o += ' HAVING %s' % attributes['having']
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if str(orderby) == '<random>':
sql_o += ' ORDER BY %s' % self.RANDOM()
else:
sql_o += ' ORDER BY %s' % self.expand(orderby)
if limitby:
if not orderby and tablenames:
sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t], '_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])])
# oracle does not support limitby
sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby)
if for_update and self.can_select_for_update is True:
sql = sql.rstrip(';') + ' FOR UPDATE;'
return sql
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def select(self, query, fields, attributes):
"""
Always returns a Rows object, possibly empty.
"""
def response(sql):
self.execute(sql)
return self.cursor.fetchall()
sql = self._select(query, fields, attributes)
if attributes.get('cache', None):
(cache_model, time_expire) = attributes['cache']
del attributes['cache']
key = self.uri + '/' + sql
key = (len(key)<=200) and key or hashlib.md5(key).hexdigest()
rows = cache_model(key, lambda: response(sql), time_expire)
else:
rows = response(sql)
if isinstance(rows,tuple):
rows = list(rows)
limitby = attributes.get('limitby', None) or (0,)
rows = self.rowslice(rows,limitby[0],None)
processor = attributes.get('processor',self.parse)
return processor(rows,fields,self._colnames)
def _count(self, query, distinct=None):
tablenames = self.tables(query)
if query:
if use_common_filters(query):
query = self.common_filter(query, tablenames)
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_t = ','.join(tablenames)
if distinct:
if isinstance(distinct,(list, tuple)):
distinct = xorify(distinct)
sql_d = self.expand(distinct)
return 'SELECT count(DISTINCT %s) FROM %s%s;' % (sql_d, sql_t, sql_w)
return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
def count(self, query, distinct=None):
self.execute(self._count(query, distinct))
return self.cursor.fetchone()[0]
def tables(self, query):
tables = set()
if isinstance(query, Field):
tables.add(query.tablename)
elif isinstance(query, (Expression, Query)):
if not query.first is None:
tables = tables.union(self.tables(query.first))
if not query.second is None:
tables = tables.union(self.tables(query.second))
return list(tables)
def commit(self):
return self.connection.commit()
def rollback(self):
return self.connection.rollback()
def close(self):
return self.connection.close()
def distributed_transaction_begin(self, key):
return
def prepare(self, key):
self.connection.prepare()
def commit_prepared(self, key):
self.connection.commit()
def rollback_prepared(self, key):
self.connection.rollback()
def concat_add(self, table):
return ', ADD '
def constraint_name(self, table, fieldname):
return '%s_%s__constraint' % (table,fieldname)
def create_sequence_and_triggers(self, query, table, **args):
self.execute(query)
def log_execute(self, *a, **b):
self.db._lastsql = a[0]
t0 = time.time()
ret = self.cursor.execute(*a, **b)
self.db._timings.append((a[0],time.time()-t0))
del self.db._timings[:-TIMINGSSIZE]
return ret
def execute(self, *a, **b):
return self.log_execute(*a, **b)
def represent(self, obj, fieldtype):
if isinstance(obj, CALLABLETYPES):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
return fieldtype.encoder(obj)
if isinstance(obj, (Expression, Field)):
return str(obj)
if fieldtype.startswith('list:'):
if not obj:
obj = []
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if fieldtype.startswith('list:string'):
obj = [str(item) for item in obj]
else:
obj = [int(item) for item in obj]
if isinstance(obj, (list, tuple)):
obj = bar_encode(obj)
if obj is None:
return 'NULL'
if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']:
return 'NULL'
r = self.represent_exceptions(obj, fieldtype)
if not r is None:
return r
if fieldtype == 'boolean':
if obj and not str(obj)[:1].upper() in ['F', '0']:
return "'T'"
else:
return "'F'"
if fieldtype == 'id' or fieldtype == 'integer':
return str(int(obj))
if fieldtype.startswith('decimal'):
return str(obj)
elif fieldtype.startswith('reference'): # reference
if fieldtype.find('.')>0:
return repr(obj)
elif isinstance(obj, (Row, Reference)):
return str(obj['id'])
return str(int(obj))
elif fieldtype == 'double':
return repr(float(obj))
if isinstance(obj, unicode):
obj = obj.encode(self.db_codec)
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
elif fieldtype == 'time':
if isinstance(obj, datetime.time):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
if not isinstance(obj,str):
obj = str(obj)
try:
obj.decode(self.db_codec)
except:
obj = obj.decode('latin1').encode(self.db_codec)
return self.adapt(obj)
def represent_exceptions(self, obj, fieldtype):
return None
def lastrowid(self, table):
return None
def integrity_error_class(self):
return type(None)
def rowslice(self, rows, minimum=0, maximum=None):
""" By default this function does nothing; overload when db does not do slicing. """
return rows
def parse_value(self, value, field_type, blob_decode=True):
if field_type != 'blob' and isinstance(value, str):
try:
value = value.decode(self.db._db_codec)
except Exception:
pass
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(field_type, SQLCustomType):
value = field_type.decoder(value)
if not isinstance(field_type, str) or value is None:
return value
elif field_type in ('string', 'text', 'password', 'upload', 'dict'): # ???
return value
elif field_type.startswith('geo'):
return value
elif field_type == 'blob' and not blob_decode:
return value
else:
key = regex_type.match(field_type).group(0)
return self.parsemap[key](value,field_type)
def parse_reference(self, value, field_type):
referee = field_type[10:].strip()
if not '.' in referee:
value = Reference(value)
value._table, value._record = self.db[referee], None
return value
def parse_boolean(self, value, field_type):
return value == True or str(value)[:1].lower() == 't'
def parse_date(self, value, field_type):
if not isinstance(value, (datetime.date,datetime.datetime)):
(y, m, d) = map(int, str(value)[:10].strip().split('-'))
value = datetime.date(y, m, d)
return value
def parse_time(self, value, field_type):
if not isinstance(value, datetime.time):
time_items = map(int,str(value)[:8].strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
value = datetime.time(h, mi, s)
return value
def parse_datetime(self, value, field_type):
if not isinstance(value, datetime.datetime):
(y, m, d) = map(int,str(value)[:10].strip().split('-'))
time_items = map(int,str(value)[11:19].strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
value = datetime.datetime(y, m, d, h, mi, s)
return value
def parse_blob(self, value, field_type):
return base64.b64decode(str(value))
def parse_decimal(self, value, field_type):
decimals = int(field_type[8:-1].split(',')[-1])
if self.dbengine == 'sqlite':
value = ('%.' + str(decimals) + 'f') % value
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
return value
def parse_list_integers(self, value, field_type):
if not self.dbengine=='google:datastore':
value = bar_decode_integer(value)
return value
def parse_list_references(self, value, field_type):
if not self.dbengine=='google:datastore':
value = bar_decode_integer(value)
return [self.parse_reference(r, field_type[5:]) for r in value]
def parse_list_strings(self, value, field_type):
if not self.dbengine=='google:datastore':
value = bar_decode_string(value)
return value
def parse_id(self, value, field_type):
return int(value)
def parse_integer(self, value, field_type):
return int(value)
def parse_double(self, value, field_type):
return float(value)
def build_parsemap(self):
self.parsemap = {
'id':self.parse_id,
'integer':self.parse_integer,
'double':self.parse_double,
'reference':self.parse_reference,
'boolean':self.parse_boolean,
'date':self.parse_date,
'time':self.parse_time,
'datetime':self.parse_datetime,
'blob':self.parse_blob,
'decimal':self.parse_decimal,
'list:integer':self.parse_list_integers,
'list:reference':self.parse_list_references,
'list:string':self.parse_list_strings,
}
def parse(self, rows, fields, colnames, blob_decode=True):
self.build_parsemap()
db = self.db
virtualtables = []
new_rows = []
for (i,row) in enumerate(rows):
new_row = Row()
for j,colname in enumerate(colnames):
value = row[j]
if not regex_table_field.match(colnames[j]):
if not '_extra' in new_row:
new_row['_extra'] = Row()
new_row['_extra'][colnames[j]] = \
self.parse_value(value, fields[j].type,blob_decode)
new_column_name = \
regex_select_as_parser.search(colnames[j])
if not new_column_name is None:
column_name = new_column_name.groups(0)
setattr(new_row,column_name[0],value)
else:
(tablename, fieldname) = colname.split('.')
table = db[tablename]
field = table[fieldname]
if not tablename in new_row:
colset = new_row[tablename] = Row()
if tablename not in virtualtables:
virtualtables.append(tablename)
else:
colset = new_row[tablename]
colset[fieldname] = value = \
self.parse_value(value,field.type,blob_decode)
if field.type == 'id':
id = value
colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a)
colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete()
for (referee_table, referee_name) in table._referenced_by:
s = db[referee_table][referee_name]
referee_link = db._referee_name and \
db._referee_name % dict(table=referee_table,field=referee_name)
if referee_link and not referee_link in colset:
colset[referee_link] = Set(db, s == id)
new_rows.append(new_row)
rowsobj = Rows(db, new_rows, colnames, rawrows=rows)
for tablename in virtualtables:
### new style virtual fields
table = db[tablename]
fields_virtual = [(f,v) for (f,v) in table.items() if isinstance(v,FieldVirtual)]
fields_lazy = [(f,v) for (f,v) in table.items() if isinstance(v,FieldLazy)]
if fields_virtual or fields_lazy:
for row in rowsobj.records:
box = row[tablename]
for f,v in fields_virtual:
box[f] = v.f(row)
for f,v in fields_lazy:
box[f] = (v.handler or VirtualCommand)(v.f,row)
### old style virtual fields
for item in table.virtualfields:
try:
rowsobj = rowsobj.setvirtualfields(**{tablename:item})
except KeyError:
# to avoid breaking virtualfields when partial select
pass
return rowsobj
def common_filter(self, query, tablenames):
tenant_fieldname = self.db._request_tenant
for tablename in tablenames:
table = self.db[tablename]
# deal with user provided filters
if table._common_filter != None:
query = query & table._common_filter(query)
# deal with multi_tenant filters
if tenant_fieldname in table:
default = table[tenant_fieldname].default
if not default is None:
newquery = table[tenant_fieldname] == default
if query is None:
query = newquery
else:
query = query & newquery
return query
###################################################################################
# List of all the available adapters; they all extend BaseAdapter.
###################################################################################
class SQLiteAdapter(BaseAdapter):
driver = globals().get('sqlite3', None)
can_select_for_update = None # support ourselves with BEGIN TRANSACTION
def EXTRACT(self,field,what):
return "web2py_extract('%s',%s)" % (what, self.expand(field))
@staticmethod
def web2py_extract(lookup, s):
table = {
'year': (0, 4),
'month': (5, 7),
'day': (8, 10),
'hour': (11, 13),
'minute': (14, 16),
'second': (17, 19),
}
try:
(i, j) = table[lookup]
return int(s[i:j])
except:
return None
@staticmethod
def web2py_regexp(expression, item):
return re.compile(expression).search(item) is not None
def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "sqlite"
self.uri = uri
self.pool_size = 0
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
dbpath = ':memory:'
else:
dbpath = uri.split('://')[1]
if dbpath[0] != '/':
dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'), dbpath)
if not 'check_same_thread' in driver_args:
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
def connect(dbpath=dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
self.pool_connection(connect)
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
self.connection.create_function("REGEXP", 2,
SQLiteAdapter.web2py_regexp)
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['DELETE FROM %s;' % tablename,
"DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
def lastrowid(self, table):
return self.cursor.lastrowid
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def _select(self, query, fields, attributes):
"""
Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION.
Note that the entire database, rather than one record, is locked
(it will be locked eventually anyway by the following UPDATE).
"""
sql = super(SQLiteAdapter, self)._select(query, fields, attributes)
if attributes.get('for_update', False):
sql = 'BEGIN IMMEDIATE TRANSACTION; ' + sql
return sql
class JDBCSQLiteAdapter(SQLiteAdapter):
driver = globals().get('zxJDBC', None)
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "sqlite"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
dbpath = ':memory:'
else:
dbpath = uri.split('://')[1]
if dbpath[0] != '/':
dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'), dbpath)
def connect(dbpath=dbpath,driver_args=driver_args):
return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath), **driver_args)
self.pool_connection(connect)
# FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs
self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
def execute(self, a):
return self.log_execute(a)
class MySQLAdapter(BaseAdapter):
driver = globals().get('pymysql',None)
maxcharlength = 255
commit_on_alter_table = True
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONGTEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONGBLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'INT AUTO_INCREMENT NOT NULL',
'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONGTEXT',
'list:string': 'LONGTEXT',
'list:reference': 'LONGTEXT',
}
def RANDOM(self):
return 'RAND()'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def _drop(self,table,mode):
# breaks db integrity but without this mysql does not drop table
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
def distributed_transaction_begin(self,key):
self.execute('XA START;')
def prepare(self,key):
self.execute("XA END;")
self.execute("XA PREPARE;")
def commit_prepared(self,ley):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
self.execute("XA ROLLBACK;")
def concat_add(self,table):
return '; ALTER TABLE %s ADD ' % table
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
driver_args.update(dict(db=db,
user=credential_decoder(user),
passwd=credential_decoder(password),
host=host,
port=port,
charset=charset))
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def lastrowid(self,table):
self.execute('select last_insert_id();')
return int(self.cursor.fetchone()[0])
class PostgreSQLAdapter(BaseAdapter):
driver = None
drivers = {'psycopg2': globals().get('psycopg2', None),
'pg8000': globals().get('pg8000', None), }
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
}
def adapt(self,obj):
return psycopg2_adapt(obj).getquoted()
def sequence_name(self,table):
return '%s_id_Seq' % table
def RANDOM(self):
return 'RANDOM()'
def ADD(self, first, second):
t = first.type
if t in ('text','string','password','upload','blob'):
return '(%s || %s)' % (self.expand(first), self.expand(second, t))
else:
return '(%s + %s)' % (self.expand(first), self.expand(second, t))
def distributed_transaction_begin(self,key):
return
def prepare(self,key):
self.execute("PREPARE TRANSACTION '%s';" % key)
def commit_prepared(self,key):
self.execute("COMMIT PREPARED '%s';" % key)
def rollback_prepared(self,key):
self.execute("ROLLBACK PREPARED '%s';" % key)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
# self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
# self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
# % (table._tablename, table._fieldname, table._sequence_name))
self.execute(query)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}, srid=4326):
if not self.drivers.get('psycopg2') and not self.drivers.get('pg8000'):
raise RuntimeError, "Unable to import any drivers (psycopg2 or pg8000)"
self.db = db
self.dbengine = "postgres"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.srid = srid
self.find_or_make_work_folder()
library, uri = uri.split('://')[:2]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL"
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = m.group('port') or '5432'
sslmode = m.group('sslmode')
if sslmode:
msg = ("dbname='%s' user='%s' host='%s' "
"port=%s password='%s' sslmode='%s'") \
% (db, user, host, port, password, sslmode)
else:
msg = ("dbname='%s' user='%s' host='%s' "
"port=%s password='%s'") \
% (db, user, host, port, password)
# choose diver according uri
if library == "postgres":
if 'psycopg2' in self.drivers:
self.driver = self.drivers['psycopg2']
elif 'pg8000' in self.drivers:
self.driver = self.drivers['pg8000']
else:
raise RuntimeError, "No pgsql driver"
elif library == "postgres:psycopg2":
self.driver = self.drivers.get('psycopg2')
elif library == "postgres:pg8000":
self.driver = self.drivers.get('pg8000')
if not self.driver:
raise RuntimeError, "%s is not available" % library
self.__version__ = "%s %s" % (self.driver.__name__, self.driver.__version__)
def connect(msg=msg,driver_args=driver_args):
return self.driver.connect(msg,**driver_args)
self.pool_connection(connect)
self.connection.set_client_encoding('UTF8')
self.execute("SET standard_conforming_strings=on;")
def lastrowid(self,table):
self.execute("select currval('%s')" % table._sequence_name)
return int(self.cursor.fetchone()[0])
def LIKE(self,first,second):
return '(%s LIKE %s)' % (self.expand(first),
self.expand(second,'string'))
def ILIKE(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),
self.expand(second,'string'))
def REGEXP(self,first,second):
return '(%s ~ %s)' % (self.expand(first),
self.expand(second,'string'))
def STARTSWITH(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),
self.expand(second+'%','string'))
def ENDSWITH(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),
self.expand('%'+second,'string'))
def CONTAINS(self,first,second):
if first.type in ('string','text'):
key = '%'+str(second).replace('%','%%')+'%'
elif first.type.startswith('list:'):
key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%'
return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
# GIS functions
def ST_ASGEOJSON(self, first, second):
"""
http://postgis.org/docs/ST_AsGeoJSON.html
"""
return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'],
self.expand(first), second['precision'], second['options'])
def ST_ASTEXT(self, first):
"""
http://postgis.org/docs/ST_AsText.html
"""
return 'ST_AsText(%s)' %(self.expand(first))
# def ST_CONTAINED(self, first, second):
# """
# non-standard function based on ST_Contains with parameters reversed
# http://postgis.org/docs/ST_Contains.html
# """
# return 'ST_Contains(%s,%s)' % (self.expand(second, first.type), self.expand(first))
def ST_CONTAINS(self, first, second):
"""
http://postgis.org/docs/ST_Contains.html
"""
return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
"""
http://postgis.org/docs/ST_Distance.html
"""
return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
"""
http://postgis.org/docs/ST_Equals.html
"""
return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
"""
http://postgis.org/docs/ST_Intersects.html
"""
return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
"""
http://postgis.org/docs/ST_Overlaps.html
"""
return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
"""
http://postgis.org/docs/ST_Simplify.html
"""
return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
"""
http://postgis.org/docs/ST_Touches.html
"""
return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
"""
http://postgis.org/docs/ST_Within.html
"""
return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def represent(self, obj, fieldtype):
if fieldtype.startswith('geo'):
srid = 4326 # postGIS default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
if fieldtype.startswith('geometry'):
value = "ST_GeomFromText('%s',%s)" %(obj, srid)
elif fieldtype.startswith('geography'):
value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
# else:
# raise SyntaxError, 'Invalid field type %s' %fieldtype
return value
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "postgres"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL"
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = m.group('port') or '5432'
msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
def connect(msg=msg,driver_args=driver_args):
return self.driver.connect(*msg,**driver_args)
self.pool_connection(connect)
self.connection.set_client_encoding('UTF8')
self.execute('BEGIN;')
self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
class OracleAdapter(BaseAdapter):
driver = globals().get('cx_Oracle',None)
commit_on_alter_table = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR2(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR2(%(length)s)',
'blob': 'CLOB',
'upload': 'VARCHAR2(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATE',
'id': 'NUMBER PRIMARY KEY',
'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def sequence_name(self,tablename):
return '%s_sequence' % tablename
def trigger_name(self,tablename):
return '%s_trigger' % tablename
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'dbms_random.value'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def constraint_name(self, tablename, fieldname):
constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)
if len(constraint_name)>30:
constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])
return constraint_name
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return ":CLOB('%s')" % obj
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "oracle"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
if not 'threaded' in driver_args:
driver_args['threaded']=True
def connect(uri=uri,driver_args=driver_args):
return self.driver.connect(uri,**driver_args)
self.pool_connection(connect)
self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))")
def execute(self, command, args=None):
args = args or []
i = 1
while True:
m = self.oracle_fix.match(command)
if not m:
break
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command, args)
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name)
self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT %s.currval FROM dual;' % sequence_name)
return int(self.cursor.fetchone()[0])
class MSSQLAdapter(BaseAdapter):
driver = globals().get('pyodbc',None)
types = {
'boolean': 'BIT',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'geometry',
'geography': 'geography',
}
def EXTRACT(self,field,what):
return "DATEPART(%s,%s)" % (what, self.expand(field))
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'NEWID()'
def ALLOW_NULL(self):
return ' NULL'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self,key):
return 'PRIMARY KEY CLUSTERED (%s)' % key
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'boolean':
if obj and not str(obj)[0].upper() == 'F':
return '1'
else:
return '0'
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}, fake_connect=False, srid=4326):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "mssql"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
uri = uri.split('://')[1]
if '@' not in uri:
try:
m = re.compile('^(?P<dsn>.+)$').match(uri)
if not m:
raise SyntaxError, \
'Parsing uri string(%s) has no result' % self.uri
dsn = m.group('dsn')
if not dsn:
raise SyntaxError, 'DSN required'
except SyntaxError, e:
logger.error('NdGpatch error')
raise e
# was cnxn = 'DSN=%s' % dsn
cnxn = dsn
else:
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# Default values (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = { 'DRIVER':'{SQL Server}' }
urlargs = m.group('urlargs') or ''
argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)')
for argmatch in argpattern.finditer(urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue')
urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()])
cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connect(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
if not fake_connect:
self.pool_connection(connect)
def lastrowid(self,table):
#self.execute('SELECT @@IDENTITY;')
self.execute('SELECT SCOPE_IDENTITY();')
return int(self.cursor.fetchone()[0])
def integrity_error_class(self):
return pyodbc.IntegrityError
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
# GIS functions
# No STAsGeoJSON in MSSQL
def ST_ASTEXT(self, first):
return '%s.STAsText()' %(self.expand(first))
def ST_CONTAINS(self, first, second):
return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
# no STSimplify in MSSQL
def ST_TOUCHES(self, first, second):
return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
def represent(self, obj, fieldtype):
if fieldtype.startswith('geometry'):
srid = 0 # MS SQL default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
elif fieldtype == 'geography':
srid = 4326 # MS SQL default srid for geography
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
return "geography::STGeomFromText('%s',%s)" %(obj, srid)
# else:
# raise SyntaxError, 'Invalid field type %s' %fieldtype
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
return BaseAdapter.represent(self, obj, fieldtype)
class MSSQL2Adapter(MSSQLAdapter):
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NTEXT',
'password': 'NVARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'NVARCHAR(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'NTEXT',
'list:string': 'NTEXT',
'list:reference': 'NTEXT',
}
def represent(self, obj, fieldtype):
value = BaseAdapter.represent(self, obj, fieldtype)
if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'":
value = 'N'+value
return value
def execute(self,a):
return self.log_execute(a.decode('utf8'))
class FireBirdAdapter(BaseAdapter):
driver = globals().get('pyodbc',None)
commit_on_alter_table = False
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'DOUBLE PRECISION',
'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
}
def sequence_name(self,tablename):
return 'genid_%s' % tablename
def trigger_name(self,tablename):
return 'trg_id_%s' % tablename
def RANDOM(self):
return 'RAND()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self,table,mode = ''):
return ['DELETE FROM %s;' % table._tablename,
'SET GENERATOR %s TO 0;' % table._sequence_name]
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if adapter_args.has_key('driver_name'):
if adapter_args['driver_name'] == 'kinterbasdb':
self.driver = kinterbasdb
elif adapter_args['driver_name'] == 'firebirdsql':
self.driver = firebirdsql
else:
self.driver = kinterbasdb
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "firebird"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL: %s" % uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
port = int(m.group('port') or 3050)
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
charset = m.group('charset') or 'UTF8'
driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db),
user = credential_decoder(user),
password = credential_decoder(password),
charset = charset))
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('create generator %s;' % sequence_name)
self.execute('set generator %s to 0;' % sequence_name)
self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name)
return int(self.cursor.fetchone()[0])
class FireBirdEmbeddedAdapter(FireBirdAdapter):
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if adapter_args.has_key('driver_name'):
if adapter_args['driver_name'] == 'kinterbasdb':
self.driver = kinterbasdb
elif adapter_args['driver_name'] == 'firebirdsql':
self.driver = firebirdsql
else:
self.driver = kinterbasdb
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "firebird"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
pathdb = m.group('path')
if not pathdb:
raise SyntaxError, 'Path required'
charset = m.group('charset')
if not charset:
charset = 'UTF8'
host = ''
driver_args.update(dict(host=host,
database=pathdb,
user=credential_decoder(user),
password=credential_decoder(password),
charset=charset))
#def connect(driver_args=driver_args):
# return kinterbasdb.connect(**driver_args)
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
class InformixAdapter(BaseAdapter):
driver = globals().get('informixdb',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'SERIAL',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
}
def RANDOM(self):
return 'Random()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
dbms_version = int(self.connection.dbms_version.split('.')[0])
if lmin and (dbms_version >= 10):
# Requires Informix 10.0+
sql_s += ' SKIP %d' % (lmin, )
if fetch_amt and (dbms_version >= 9):
# Requires Informix 9.0+
sql_s += ' FIRST %d' % (fetch_amt, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "informix"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
user = credential_decoder(user)
password = credential_decoder(password)
dsn = '%s@%s' % (db,host)
driver_args.update(dict(user=user,password=password,autocommit=True))
def connect(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.pool_connection(connect)
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
return self.cursor.sqlerrd[1]
def integrity_error_class(self):
return informixdb.IntegrityError
class DB2Adapter(BaseAdapter):
driver = globals().get('pyodbc',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RAND()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return "BLOB('%s')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T','-').replace(':','.')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+'-00.00.00'
return "'%s'" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "db2"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
cnxn = uri.split('://', 1)[1]
def connect(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.pool_connection(connect)
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table)
return int(self.cursor.fetchone()[0])
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class TeradataAdapter(BaseAdapter):
driver = globals().get('pyodbc',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER GENERATED ALWAYS AS IDENTITY', # Teradata Specific
# Modified Constraint syntax for Teradata.
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def create_table(self, table,
migrate=True,
fake_migrate=False,
polymodel=None):
fields = []
sql_fields = {}
sql_fields_aux = {}
TFK = {}
tablename = table._tablename
sortable = 0
for field in table:
sortable += 1
k = field.name
if isinstance(field.type,SQLCustomType):
ftype = field.type.native or field.type.type
elif field.type.startswith('reference'):
referenced = field.type[10:].strip()
constraint_name = self.constraint_name(tablename, field.name)
if hasattr(table,'_primarykey'):
rtablename,rfieldname = referenced.split('.')
rtable = table._db[rtablename]
rfield = rtable[rfieldname]
# must be PK reference or unique
if rfieldname in rtable._primarykey or rfield.unique:
ftype = self.types[rfield.type[:9]] % dict(length=rfield.length)
# multicolumn primary key reference?
if not rfield.unique and len(rtable._primarykey)>1 :
# then it has to be a table level FK
if rtablename not in TFK:
TFK[rtablename] = {}
TFK[rtablename][rfieldname] = field.name
else:
ftype = ftype + \
self.types['reference FK'] %dict(\
constraint_name=constraint_name,
table_name=tablename,
field_name=field.name,
foreign_key='%s (%s)'%(rtablename, rfieldname),
on_delete_action=field.ondelete)
else:
# make a guess here for circular references
id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id'
ftype = self.types[field.type[:9]]\
% dict(table_name=tablename,
field_name=field.name,
constraint_name=constraint_name,
foreign_key=referenced + ('(%s)' % id_fieldname),
on_delete_action=field.ondelete)
elif field.type.startswith('list:reference'):
ftype = self.types[field.type[:14]]
elif field.type.startswith('decimal'):
precision, scale = map(int,field.type[8:-1].split(','))
ftype = self.types[field.type[:7]] % \
dict(precision=precision,scale=scale)
elif not field.type in self.types:
raise SyntaxError, 'Field: unknown field type: %s for %s' % \
(field.type, field.name)
else:
ftype = self.types[field.type]\
% dict(length=field.length)
if not field.type.startswith('id') and not field.type.startswith('reference'):
if field.notnull:
ftype += ' NOT NULL'
else:
ftype += self.ALLOW_NULL()
if field.unique:
ftype += ' UNIQUE'
# add to list of fields
sql_fields[field.name] = dict(sortable=sortable,
type=str(field.type),
sql=ftype)
if isinstance(field.default,(str,int,float)):
# Caveat: sql_fields and sql_fields_aux differ for default values.
# sql_fields is used to trigger migrations and sql_fields_aux
# is used for create tables.
# The reason is that we do not want to trigger a migration simply
# because a default value changes.
not_null = self.NOT_NULL(field.default, field.type)
ftype = ftype.replace('NOT NULL', not_null)
sql_fields_aux[field.name] = dict(sql=ftype)
fields.append('%s %s' % (field.name, ftype))
other = ';'
fields = ',\n '.join(fields)
for rtablename in TFK:
rfields = TFK[rtablename]
pkeys = table._db[rtablename]._primarykey
fkeys = [ rfields[k] for k in pkeys ]
fields = fields + ',\n ' + \
self.types['reference TFK'] %\
dict(table_name=tablename,
field_name=', '.join(fkeys),
foreign_table=rtablename,
foreign_key=', '.join(pkeys),
on_delete_action=field.ondelete)
if hasattr(table,'_primarykey'):
query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \
(tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other)
else:
query = '''CREATE TABLE %s(\n %s\n)%s''' % \
(tablename, fields, other)
if self.uri.startswith('sqlite:///'):
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding)
else:
dbpath = self.folder
if not migrate:
return query
elif self.uri.startswith('sqlite:memory'):
table._dbt = None
elif isinstance(migrate, str):
table._dbt = os.path.join(dbpath, migrate)
else:
table._dbt = os.path.join(dbpath, '%s_%s.table' \
% (table._db._uri_hash, tablename))
if table._dbt:
table._loggername = os.path.join(dbpath, 'sql.log')
logfile = self.file_open(table._loggername, 'a')
else:
logfile = None
if not table._dbt or not self.file_exists(table._dbt):
if table._dbt:
logfile.write('timestamp: %s\n'
% datetime.datetime.today().isoformat())
logfile.write(query + '\n')
if not fake_migrate:
self.create_sequence_and_triggers(query,table)
table._db.commit()
if table._dbt:
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields, tfile)
self.file_close(tfile)
if fake_migrate:
logfile.write('faked!\n')
else:
logfile.write('success!\n')
else:
tfile = self.file_open(table._dbt, 'r')
try:
sql_fields_old = cPickle.load(tfile)
except EOFError:
self.file_close(tfile)
self.file_close(logfile)
raise RuntimeError, 'File %s appears corrupted' % table._dbt
self.file_close(tfile)
if sql_fields != sql_fields_old:
self.migrate_table(table,
sql_fields, sql_fields_old,
sql_fields_aux, logfile,
fake_migrate=fake_migrate)
self.file_close(logfile)
return query
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "teradata"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
cnxn = uri.split('://', 1)[1]
def connect(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.pool_connection(connect)
# Similar to MSSQL, Teradata can't specify a range (for Pageby)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
# (ANSI-SQL wants this form of name
# to be a delimited identifier)
class IngresAdapter(BaseAdapter):
driver = globals().get('ingresdbi',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME,
'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RANDOM()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
if fetch_amt:
sql_s += ' FIRST %d ' % (fetch_amt, )
if lmin:
# Requires Ingres 9.2+
sql_o += ' OFFSET %d' % (lmin, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "ingres"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
connstr = self._uri.split(':', 1)[1]
# Simple URI processing
connstr = connstr.lstrip()
while connstr.startswith('/'):
connstr = connstr[1:]
database_name=connstr # Assume only (local) dbname is passed in
vnode = '(local)'
servertype = 'ingres'
trace = (0, None) # No tracing
driver_args.update(dict(database=database_name,
vnode=vnode,
servertype=servertype,
trace=trace))
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
def create_sequence_and_triggers(self, query, table, **args):
# post create table auto inc code (if needed)
# modify table to btree for performance....
# Older Ingres releases could use rule/trigger like Oracle above.
if hasattr(table,'_primarykey'):
modify_tbl_sql = 'modify %s to btree unique on %s' % \
(table._tablename,
', '.join(["'%s'" % x for x in table.primarykey]))
self.execute(modify_tbl_sql)
else:
tmp_seqname='%s_iisq' % table._tablename
query=query.replace(INGRES_SEQNAME, tmp_seqname)
self.execute('create sequence %s' % tmp_seqname)
self.execute(query)
self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
def lastrowid(self,table):
tmp_seqname='%s_iisq' % table
self.execute('select current value for %s' % tmp_seqname)
return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
def integrity_error_class(self):
return ingresdbi.IntegrityError
class IngresUnicodeAdapter(IngresAdapter):
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NCLOB',
'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME,
'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
'list:integer': 'NCLOB',
'list:string': 'NCLOB',
'list:reference': 'NCLOB',
}
class SAPDBAdapter(BaseAdapter):
driver = globals().get('sapdb',None)
support_distributed_transaction = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONG',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONG',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'FIXED(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT PRIMARY KEY',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONG',
'list:string': 'LONG',
'list:reference': 'LONG',
}
def sequence_name(self,table):
return '%s_id_Seq' % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
% (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "sapdb"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL"
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
def connect(user=user, password=password, database=db,
host=host, driver_args=driver_args):
return self.driver.Connection(user, password, database,
host, **driver_args)
self.pool_connection(connect)
def lastrowid(self,table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return int(self.cursor.fetchone()[0])
class CubridAdapter(MySQLAdapter):
driver = globals().get('cubriddb', None)
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
if not self.driver:
raise RuntimeError, "Unable to import driver"
self.db = db
self.dbengine = "cubrid"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = int(m.group('port') or '30000')
charset = m.group('charset') or 'utf8'
user = credential_decoder(user)
passwd = credential_decoder(password)
def connect(host=host,port=port,db=db,
user=user,passwd=password,driver_args=driver_args):
return self.driver.connect(host,port,db,user,passwd,**driver_args)
self.pool_connection(connect)
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
######## GAE MySQL ##########
class DatabaseStoredFile:
web2py_filesystem = False
def escape(self,obj):
return self.db._adapter.escape(obj)
def __init__(self,db,filename,mode):
if db._adapter.dbengine != 'mysql':
raise RuntimeError, "only MySQL can store metadata .table files in database for now"
self.db = db
self.filename = filename
self.mode = mode
if not self.web2py_filesystem:
self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;")
DatabaseStoredFile.web2py_filesystem = True
self.p=0
self.data = ''
if mode in ('r','rw','a'):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \
% filename
rows = self.db.executesql(query)
if rows:
self.data = rows[0][0]
elif os.path.exists(filename):
datafile = open(filename, 'r')
try:
self.data = datafile.read()
finally:
datafile.close()
elif mode in ('r','rw'):
raise RuntimeError, "File %s does not exist" % filename
def read(self, bytes):
data = self.data[self.p:self.p+bytes]
self.p += len(data)
return data
def readline(self):
i = self.data.find('\n',self.p)+1
if i>0:
data, self.p = self.data[self.p:i], i
else:
data, self.p = self.data[self.p:], len(self.data)
return data
def write(self,data):
self.data += data
def close(self):
self.db.executesql("DELETE FROM web2py_filesystem WHERE path='%s'" \
% self.filename)
query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\
% (self.filename, self.data.replace("'","''"))
self.db.executesql(query)
self.db.commit()
@staticmethod
def exists(db, filename):
if os.path.exists(filename):
return True
query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
if db.executesql(query):
return True
return False
class UseDatabaseStoredFile:
def file_exists(self, filename):
return DatabaseStoredFile.exists(self.db,filename)
def file_open(self, filename, mode='rb', lock=True):
return DatabaseStoredFile(self.db,filename,mode)
def file_close(self, fileobj):
fileobj.close()
def file_delete(self,filename):
query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
self.db.executesql(query)
self.db.commit()
class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
def __init__(self, db, uri='google:sql://realm:domain/database',
pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1]
m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):])
if not m:
raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri
instance = credential_decoder(m.group('instance'))
db = credential_decoder(m.group('db'))
driver_args['instance'] = instance
createdb = adapter_args.get('createdb',True)
if not createdb:
driver_args['database'] = db
def connect(driver_args=driver_args):
return rdbms.connect(**driver_args)
self.pool_connection(connect)
if createdb:
# self.execute('DROP DATABASE %s' % db)
self.execute('CREATE DATABASE IF NOT EXISTS %s' % db)
self.execute('USE %s' % db)
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
class NoSQLAdapter(BaseAdapter):
can_select_for_update = False
@staticmethod
def to_unicode(obj):
if isinstance(obj, str):
return obj.decode('utf8')
elif not isinstance(obj, unicode):
return unicode(obj)
return obj
def represent(self, obj, fieldtype):
if isinstance(obj, CALLABLETYPES):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
return fieldtype.encoder(obj)
if isinstance(obj, (Expression, Field)):
raise SyntaxError, "non supported on GAE"
if self.dbengine == 'google:datastore':
if isinstance(fieldtype, gae.Property):
return obj
is_string = isinstance(fieldtype,str)
is_list = is_string and fieldtype.startswith('list:')
if is_list:
if not obj:
obj = []
if not isinstance(obj, (list, tuple)):
obj = [obj]
if obj == '' and not \
(is_string and fieldtype[:2] in ['st','te','pa','up']):
return None
if not obj is None:
if isinstance(obj, list) and not is_list:
obj = [self.represent(o, fieldtype) for o in obj]
elif fieldtype in ('integer','id'):
obj = long(obj)
elif fieldtype == 'double':
obj = float(obj)
elif is_string and fieldtype.startswith('reference'):
if isinstance(obj, (Row, Reference)):
obj = obj['id']
obj = long(obj)
elif fieldtype == 'boolean':
if obj and not str(obj)[0].upper() == 'F':
obj = True
else:
obj = False
elif fieldtype == 'date':
if not isinstance(obj, datetime.date):
(y, m, d) = map(int,str(obj).strip().split('-'))
obj = datetime.date(y, m, d)
elif isinstance(obj,datetime.datetime):
(y, m, d) = (obj.year, obj.month, obj.day)
obj = datetime.date(y, m, d)
elif fieldtype == 'time':
if not isinstance(obj, datetime.time):
time_items = map(int,str(obj).strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
obj = datetime.time(h, mi, s)
elif fieldtype == 'datetime':
if not isinstance(obj, datetime.datetime):
(y, m, d) = map(int,str(obj)[:10].strip().split('-'))
time_items = map(int,str(obj)[11:].strip().split(':')[:3])
while len(time_items)<3:
time_items.append(0)
(h, mi, s) = time_items
obj = datetime.datetime(y, m, d, h, mi, s)
elif fieldtype == 'blob':
pass
elif is_string and fieldtype.startswith('list:string'):
return map(self.to_unicode,obj)
elif is_list:
return map(int,obj)
else:
obj = self.to_unicode(obj)
return obj
def _insert(self,table,fields):
return 'insert %s in %s' % (fields, table)
def _count(self,query,distinct=None):
return 'count %s' % repr(query)
def _select(self,query,fields,attributes):
return 'select %s where %s' % (repr(fields), repr(query))
def _delete(self,tablename, query):
return 'delete %s where %s' % (repr(tablename),repr(query))
def _update(self,tablename,query,fields):
return 'update %s (%s) where %s' % (repr(tablename),
repr(fields),repr(query))
def commit(self):
"""
remember: no transactions on many NoSQL
"""
pass
def rollback(self):
"""
remember: no transactions on many NoSQL
"""
pass
def close(self):
"""
remember: no transactions on many NoSQL
"""
pass
# these functions should never be called!
def OR(self,first,second): raise SyntaxError, "Not supported"
def AND(self,first,second): raise SyntaxError, "Not supported"
def AS(self,first,second): raise SyntaxError, "Not supported"
def ON(self,first,second): raise SyntaxError, "Not supported"
def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
def ADD(self,first,second): raise SyntaxError, "Not supported"
def SUB(self,first,second): raise SyntaxError, "Not supported"
def MUL(self,first,second): raise SyntaxError, "Not supported"
def DIV(self,first,second): raise SyntaxError, "Not supported"
def LOWER(self,first): raise SyntaxError, "Not supported"
def UPPER(self,first): raise SyntaxError, "Not supported"
def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
def LEFT_JOIN(self): raise SyntaxError, "Not supported"
def RANDOM(self): raise SyntaxError, "Not supported"
def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
def ILIKE(self,first,second): raise SyntaxError, "Not supported"
def drop(self,table,mode): raise SyntaxError, "Not supported"
def alias(self,table,alias): raise SyntaxError, "Not supported"
def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
def prepare(self,key): raise SyntaxError, "Not supported"
def commit_prepared(self,key): raise SyntaxError, "Not supported"
def rollback_prepared(self,key): raise SyntaxError, "Not supported"
def concat_add(self,table): raise SyntaxError, "Not supported"
def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
def create_sequence_and_triggers(self, query, table, **args): pass
def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
def execute(self,*a,**b): raise SyntaxError, "Not supported"
def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
def lastrowid(self,table): raise SyntaxError, "Not supported"
def integrity_error_class(self): raise SyntaxError, "Not supported"
def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
class GAEF(object):
def __init__(self,name,op,value,apply):
self.name=name=='id' and '__key__' or name
self.op=op
self.value=value
self.apply=apply
def __repr__(self):
return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
class GoogleDatastoreAdapter(NoSQLAdapter):
uploads_in_blob = True
types = {}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.types.update({
'boolean': gae.BooleanProperty,
'string': (lambda: gae.StringProperty(multiline=True)),
'text': gae.TextProperty,
'password': gae.StringProperty,
'blob': gae.BlobProperty,
'upload': gae.StringProperty,
'integer': gae.IntegerProperty,
'double': gae.FloatProperty,
'decimal': GAEDecimalProperty,
'date': gae.DateProperty,
'time': gae.TimeProperty,
'datetime': gae.DateTimeProperty,
'id': None,
'reference': gae.IntegerProperty,
'list:string': (lambda: gae.StringListProperty(default=None)),
'list:integer': (lambda: gae.ListProperty(int,default=None)),
'list:reference': (lambda: gae.ListProperty(int,default=None)),
})
self.db = db
self.uri = uri
self.dbengine = 'google:datastore'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self.pool_size = 0
match = re.compile('.*://(?P<namespace>.+)').match(uri)
if match:
namespace_manager.set_namespace(match.group('namespace'))
def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
myfields = {}
for k in table.fields:
if isinstance(polymodel,Table) and k in polymodel.fields():
continue
field = table[k]
attr = {}
if isinstance(field.type, SQLCustomType):
ftype = self.types[field.type.native or field.type.type](**attr)
elif isinstance(field.type, gae.Property):
ftype = field.type
elif field.type.startswith('id'):
continue
elif field.type.startswith('decimal'):
precision, scale = field.type[7:].strip('()').split(',')
precision = int(precision)
scale = int(scale)
ftype = GAEDecimalProperty(precision, scale, **attr)
elif field.type.startswith('reference'):
if field.notnull:
attr = dict(required=True)
referenced = field.type[10:].strip()
ftype = self.types[field.type[:9]](referenced)
elif field.type.startswith('list:reference'):
if field.notnull:
attr = dict(required=True)
referenced = field.type[15:].strip()
ftype = self.types[field.type[:14]](**attr)
elif field.type.startswith('list:'):
ftype = self.types[field.type](**attr)
elif not field.type in self.types\
or not self.types[field.type]:
raise SyntaxError, 'Field: unknown field type: %s' % field.type
else:
ftype = self.types[field.type](**attr)
myfields[field.name] = ftype
if not polymodel:
table._tableobj = classobj(table._tablename, (gae.Model, ), myfields)
elif polymodel==True:
table._tableobj = classobj(table._tablename, (PolyModel, ), myfields)
elif isinstance(polymodel,Table):
table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError, "polymodel must be None, True, a table or a tablename"
return None
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type in ('text','blob'):
raise SyntaxError, 'AppEngine does not index by: %s' % expression.type
return expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
else:
return expression.op()
elif field_type:
return self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
return ','.join([self.represent(item,field_type) for item in expression])
else:
return str(expression)
### TODO from gql.py Expression
def AND(self,first,second):
a = self.expand(first)
b = self.expand(second)
if b[0].name=='__key__' and a[0].name!='__key__':
return b+a
return a+b
def EQ(self,first,second=None):
if isinstance(second, Key):
return [GAEF(first.name,'=',second,lambda a,b:a==b)]
return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
def NE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
def LT(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'<',second,lambda a,b:a<b)]
def LE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
def GT(self,first,second=None):
if first.type != 'id' or second==0 or second == '0':
return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'>',second,lambda a,b:a>b)]
def GE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
def INVERT(self,first):
return '-%s' % first.name
def COMMA(self,first,second):
return '%s, %s' % (self.expand(first),self.expand(second))
def BELONGS(self,first,second=None):
if not isinstance(second,(list, tuple)):
raise SyntaxError, "Not supported"
if first.type != 'id':
return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)]
else:
second = [Key.from_path(first._tablename, i) for i in second]
return [GAEF(first.name,'in',second,lambda a,b:a in b)]
def CONTAINS(self,first,second):
if not first.type.startswith('list:'):
raise SyntaxError, "Not supported"
return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)]
def NOT(self,first):
nops = { self.EQ: self.NE,
self.NE: self.EQ,
self.LT: self.GE,
self.GT: self.LE,
self.LE: self.GT,
self.GE: self.LT}
if not isinstance(first,Query):
raise SyntaxError, "Not suported"
nop = nops.get(first.op,None)
if not nop:
raise SyntaxError, "Not suported %s" % first.op.__name__
first.op = nop
return self.expand(first)
def truncate(self,table,mode):
self.db(table._id > 0).delete()
def select_raw(self,query,fields=None,attributes=None):
fields = fields or []
attributes = attributes or {}
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
fields = new_fields
if query:
tablename = self.get_table(query)
elif fields:
tablename = fields[0].tablename
query = fields[0].table._id>0
else:
raise SyntaxError, "Unable to determine a tablename"
if query:
if use_common_filters(query):
query = self.common_filter(query,[tablename])
tableobj = self.db[tablename]._tableobj
items = tableobj.all()
filters = self.expand(query)
for filter in filters:
if filter.name=='__key__' and filter.op=='>' and filter.value==0:
continue
elif filter.name=='__key__' and filter.op=='=':
if filter.value==0:
items = []
elif isinstance(filter.value, Key):
item = tableobj.get(filter.value)
items = (item and [item]) or []
else:
item = tableobj.get_by_id(filter.value)
items = (item and [item]) or []
elif isinstance(items,list): # i.e. there is a single record!
items = [i for i in items if filter.apply(getattr(item,filter.name),
filter.value)]
else:
if filter.name=='__key__': items.order('__key__')
items = items.filter('%s %s' % (filter.name,filter.op),filter.value)
if not isinstance(items,list):
if attributes.get('left', None):
raise SyntaxError, 'Set: no left join in appengine'
if attributes.get('groupby', None):
raise SyntaxError, 'Set: no groupby in appengine'
orderby = attributes.get('orderby', False)
if orderby:
### THIS REALLY NEEDS IMPROVEMENT !!!
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if isinstance(orderby,Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
for order in orders:
order={'-id':'-__key__','id':'__key__'}.get(order,order)
items = items.order(order)
if attributes.get('limitby', None):
(lmin, lmax) = attributes['limitby']
(limit, offset) = (lmax - lmin, lmin)
items = items.fetch(limit, offset=offset)
fields = self.db[tablename].fields
return (items, tablename, fields)
def select(self,query,fields,attributes):
(items, tablename, fields) = self.select_raw(query,fields,attributes)
# self.db['_lastsql'] = self._select(query,fields,attributes)
rows = [
[t=='id' and (int(item.key().id()) if item.key().id() else
item.key().name()) or getattr(item, t) for t in fields]
for item in items]
colnames = ['%s.%s' % (tablename, t) for t in fields]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def count(self,query,distinct=None):
if distinct:
raise RuntimeError, "COUNT DISTINCT not supported"
(items, tablename, fields) = self.select_raw(query)
# self.db['_lastsql'] = self._count(query)
try:
return len(items)
except TypeError:
return items.count(limit=None)
def delete(self,tablename, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer support deleting more than 1000 records.
"""
# self.db['_lastsql'] = self._delete(tablename,query)
(items, tablename, fields) = self.select_raw(query)
# items can be one item or a query
if not isinstance(items,list):
counter = items.count(limit=None)
leftitems = items.fetch(1000)
while len(leftitems):
gae.delete(leftitems)
leftitems = items.fetch(1000)
else:
counter = len(items)
gae.delete(items)
return counter
def update(self,tablename,query,update_fields):
# self.db['_lastsql'] = self._update(tablename,query,update_fields)
(items, tablename, fields) = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
setattr(item, field.name, self.represent(value,field.type))
item.put()
counter += 1
logger.info(str(counter))
return counter
def insert(self,table,fields):
dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
# table._db['_lastsql'] = self._insert(table,fields)
tmp = table._tableobj(**dfields)
tmp.put()
rid = Reference(tmp.key().id())
(rid._table, rid._record) = (table, None)
return rid
def bulk_insert(self,table,items):
parsed_items = []
for item in items:
dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
parsed_items.append(table._tableobj(**dfields))
gae.put(parsed_items)
return True
def uuid2int(uuidv):
return uuid.UUID(uuidv).int
def int2uuid(n):
return str(uuid.UUID(int=n))
class CouchDBAdapter(NoSQLAdapter):
uploads_in_blob = True
types = {
'boolean': bool,
'string': str,
'text': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type=='id':
return "%s._id" % expression.tablename
return BaseAdapter.expand(self,expression,field_type)
def AND(self,first,second):
return '(%s && %s)' % (self.expand(first),self.expand(second))
def OR(self,first,second):
return '(%s || %s)' % (self.expand(first),self.expand(second))
def EQ(self,first,second):
if second is None:
return '(%s == null)' % self.expand(first)
return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
def NE(self,first,second):
if second is None:
return '(%s != null)' % self.expand(first)
return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
def COMMA(self,first,second):
return '%s + %s' % (self.expand(first),self.expand(second))
def represent(self, obj, fieldtype):
value = NoSQLAdapter.represent(self, obj, fieldtype)
if fieldtype=='id':
return repr(str(int(value)))
elif fieldtype in ('date','time','datetime','boolean'):
return serializers.json(value)
return repr(not isinstance(value,unicode) and value or value.encode('utf8'))
def __init__(self,db,uri='couchdb://127.0.0.1:5984',
pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.uri = uri
self.dbengine = 'couchdb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self.pool_size = pool_size
url='http://'+uri[10:]
def connect(url=url,driver_args=driver_args):
return couchdb.Server(url,**driver_args)
self.pool_connection(connect,cursor=False)
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
if migrate:
try:
self.connection.create(table._tablename)
except:
pass
def insert(self,table,fields):
id = uuid2int(web2py_uuid())
ctable = self.connection[table._tablename]
values = dict((k.name,self.represent(v,k.type)) for k,v in fields)
values['_id'] = str(id)
ctable.save(values)
return id
def _select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
for key in set(attributes.keys())-set(('orderby','groupby','limitby',
'required','cache','left',
'distinct','having')):
raise SyntaxError, 'invalid select attribute: %s' % key
new_fields=[]
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
def uid(fd):
return fd=='id' and '_id' or fd
def get(row,fd):
return fd=='id' and int(row['_id']) or row.get(fd,None)
fields = new_fields
tablename = self.get_table(query)
fieldnames = [f.name for f in (fields or self.db[tablename])]
colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\
dict(t=tablename,
query=self.expand(query),
order='%s._id' % tablename,
fields=fields)
return fn, colnames
def select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
fn, colnames = self._select(query,fields,attributes)
tablename = colnames[0].split('.')[0]
ctable = self.connection[tablename]
rows = [cols['value'] for cols in ctable.query(fn)]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def delete(self,tablename,query):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
assert(tablename == query.first.tablename)
ctable = self.connection[tablename]
try:
del ctable[str(id)]
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
for row in rows:
del ctable[str(row.id)]
return len(rows)
def update(self,tablename,query,fields):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
ctable = self.connection[tablename]
try:
doc = ctable[str(id)]
for key,value in fields:
doc[key.name] = self.represent(value,self.db[tablename][key.name].type)
ctable.save(doc)
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
table = self.db[tablename]
for row in rows:
doc = ctable[str(row.id)]
for key,value in fields:
doc[key.name] = self.represent(value,table[key.name].type)
ctable.save(doc)
return len(rows)
def count(self,query,distinct=None):
if distinct:
raise RuntimeError, "COUNT DISTINCT not supported"
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
return len(rows)
def cleanup(text):
"""
validates that the given text is clean: only contains [0-9a-zA-Z_]
"""
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError, \
'only [0-9a-zA-Z_] allowed in table and field names, received %s' \
% text
return text
class MongoDBAdapter(NoSQLAdapter):
uploads_in_blob = True
types = {
'boolean': bool,
'string': str,
'text': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
def __init__(self,db,uri='mongodb://127.0.0.1:5984/db',
pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
m=None
try:
#Since version 2
import pymongo.uri_parser
m = pymongo.uri_parser.parse_uri(uri)
except ImportError:
try:
#before version 2 of pymongo
import pymongo.connection
m = pymongo.connection._parse_uri(uri)
except ImportError:
raise ImportError("Uriparser for mongodb is not available")
except:
raise SyntaxError("This type of uri is not supported by the mongodb uri parser")
self.db = db
self.uri = uri
self.dbengine = 'mongodb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self.pool_size = pool_size
#this is the minimum amount of replicates that it should wait for on insert/update
self.minimumreplication = adapter_args.get('minimumreplication',0)
#by default alle insert and selects are performand asynchronous, but now the default is
#synchronous, except when overruled by either this default or function parameter
self.safe = adapter_args.get('safe',True)
if isinstance(m,tuple):
m = {"database" : m[1]}
if m.get('database')==None:
raise SyntaxError("Database is required!")
def connect(uri=self.uri,m=m):
try:
return pymongo.Connection(uri)[m.get('database')]
except pymongo.errors.ConnectionFailure, inst:
raise SyntaxError, "The connection to " + uri + " could not be made"
except Exception, inst:
if inst == "cannot specify database without a username and password":
raise SyntaxError("You are probebly running version 1.1 of pymongo which contains a bug which requires authentication. Update your pymongo.")
else:
raise SyntaxError("This is not an official Mongodb uri (http://www.mongodb.org/display/DOCS/Connections) Error : %s" % inst)
self.pool_connection(connect,cursor=False)
def represent(self, obj, fieldtype):
value = NoSQLAdapter.represent(self, obj, fieldtype)
if fieldtype =='date':
if value == None:
return value
t = datetime.time(0, 0, 0)#this piece of data can be stripped of based on the fieldtype
return datetime.datetime.combine(value, t) #mongodb doesn't has a date object and so it must datetime, string or integer
elif fieldtype == 'time':
if value == None:
return value
d = datetime.date(2000, 1, 1) #this piece of data can be stripped of based on the fieldtype
return datetime.datetime.combine(d, value) #mongodb doesn't has a time object and so it must datetime, string or integer
elif fieldtype == 'list:string' or fieldtype == 'list:integer' or fieldtype == 'list:reference':
return value #raise SyntaxError, "Not Supported"
return value
#Safe determines whether a asynchronious request is done or a synchronious action is done
#For safety, we use by default synchronious requests
def insert(self,table,fields,safe=None):
if safe==None:
safe=self.safe
ctable = self.connection[table._tablename]
values = dict((k.name,self.represent(v,table[k.name].type)) for k,v in fields)
ctable.insert(values,safe=safe)
return int(str(values['_id']), 16)
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None, isCapped=False):
if isCapped:
raise RuntimeError, "Not implemented"
else:
pass
def count(self,query,distinct=None,snapshot=True):
if distinct:
raise RuntimeError, "COUNT DISTINCT not supported"
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
tablename = self.get_table(query)
return int(self.select(query,[self.db[tablename]._id],{},count=True,snapshot=snapshot)['count'])
#Maybe it would be faster if we just implemented the pymongo .count() function which is probably quicker?
# therefor call __select() connection[table].find(query).count() Since this will probably reduce the return set?
def expand(self, expression, field_type=None):
import pymongo.objectid
#if isinstance(expression,Field):
# if expression.type=='id':
# return {_id}"
if isinstance(expression, Query):
print "in expand and this is a query"
# any query using 'id':=
# set name as _id (as per pymongo/mongodb primary key)
# convert second arg to an objectid field (if its not already)
# if second arg is 0 convert to objectid
if isinstance(expression.first,Field) and expression.first.type == 'id':
expression.first.name = '_id'
if expression.second != 0 and not isinstance(expression.second,pymongo.objectid.ObjectId):
if isinstance(expression.second,int):
try:
#Cause the reference field is by default an integer and therefor this must be an integer to be able to work with other databases
expression.second = pymongo.objectid.ObjectId(("%X" % expression.second))
except:
raise SyntaxError, 'The second argument must by an integer that can represent an objectid.'
else:
try:
#But a direct id is also possible
expression.second = pymongo.objectid.ObjectId(expression.second)
except:
raise SyntaxError, 'second argument must be of type bson.objectid.ObjectId or an objectid representable integer'
elif expression.second == 0:
expression.second = pymongo.objectid.ObjectId('000000000000000000000000')
return expression.op(expression.first, expression.second)
if isinstance(expression, Field):
if expression.type=='id':
return "_id"
else:
return expression.name
#return expression
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
elif not isinstance(expression.op, str):
return expression.op()
else:
return expression.op
elif field_type:
return str(self.represent(expression,field_type))
elif isinstance(expression,(list,tuple)):
return ','.join(self.represent(item,field_type) for item in expression)
else:
return expression
def _select(self,query,fields,attributes):
from pymongo import son
for key in set(attributes.keys())-set(('limitby','orderby')):
raise SyntaxError, 'invalid select attribute: %s' % key
new_fields=[]
mongosort_list = []
# try an orderby attribute
orderby = attributes.get('orderby', False)
limitby = attributes.get('limitby', False)
#distinct = attributes.get('distinct', False)
if orderby:
#print "in if orderby %s" % orderby
if isinstance(orderby, (list, tuple)):
print "in xorify"
orderby = xorify(orderby)
# !!!! need to add 'random'
for f in self.expand(orderby).split(','):
if f.startswith('-'):
mongosort_list.append((f[1:],-1))
else:
mongosort_list.append((f,1))
print "mongosort_list = %s" % mongosort_list
if limitby:
# a tuple
limitby_skip,limitby_limit = limitby
else:
limitby_skip = 0
limitby_limit = 0
#if distinct:
#print "in distinct %s" % distinct
mongofields_dict = son.SON()
mongoqry_dict = {}
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
fields = new_fields
if isinstance(query,Query):
tablename = self.get_table(query)
elif len(fields) != 0:
tablename = fields[0].tablename
else:
raise SyntaxError, "The table name could not be found in the query nor from the select statement."
fieldnames = [f for f in (fields or self.db[tablename])] # ie table.field
mongoqry_dict = self.expand(query)
for f in fieldnames:
mongofields_dict[f.name] = 1 # ie field=1
return tablename, mongoqry_dict, mongofields_dict, mongosort_list, limitby_limit, limitby_skip
# need to define all the 'sql' methods gt,lt etc....
def select(self,query,fields,attributes,count=False,snapshot=False):
withId=False
tablename, mongoqry_dict , mongofields_dict, mongosort_list, limitby_limit, limitby_skip = self._select(query,fields,attributes)
for key in mongofields_dict.keys():
if key == 'id':
withId = True
break;
try:
print "mongoqry_dict=%s" % mongoqry_dict
except:
pass
print "mongofields_dict=%s" % mongofields_dict
ctable = self.connection[tablename]
if count:
return {'count' : ctable.find(mongoqry_dict,mongofields_dict,skip=limitby_skip, limit=limitby_limit, sort=mongosort_list,snapshot=snapshot).count()}
else:
mongo_list_dicts = ctable.find(mongoqry_dict,mongofields_dict,skip=limitby_skip, limit=limitby_limit, sort=mongosort_list,snapshot=snapshot) # pymongo cursor object
print "mongo_list_dicts=%s" % mongo_list_dicts
#if mongo_list_dicts.count() > 0: #
#colnames = mongo_list_dicts[0].keys() # assuming all docs have same "shape", grab colnames from first dictionary (aka row)
#else:
#colnames = mongofields_dict.keys()
#print "colnames = %s" % colnames
#rows = [row.values() for row in mongo_list_dicts]
rows = []
for record in mongo_list_dicts:
row=[]
for column in record:
if withId and (column == '_id'):
if isinstance(record[column],pymongo.objectid.ObjectId):
row.append( int(str(record[column]),16))
else:
#in case of alternative key
row.append( record[column] )
elif not (column == '_id'):
row.append(record[column])
rows.append(row)
#else the id is not supposed to be included. Work around error. mongo always sends key:(
processor = attributes.get('processor',self.parse)
return processor(rows,fields,mongofields_dict.keys(),False)
def INVERT(self,first):
#print "in invert first=%s" % first
return '-%s' % self.expand(first)
def drop(self, table, mode=''):
ctable = self.connection[table._tablename]
ctable.drop()
def truncate(self,table,mode,safe=None):
if safe==None:
safe=self.safe
ctable = self.connection[table._tablename]
ctable.remove(None, safe=True)
#the update function should return a string
def oupdate(self,tablename,query,fields):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
filter = None
if query:
filter = self.expand(query)
f_v = []
modify = { '$set' : dict(((k.name,self.represent(v,k.type)) for k,v in fields)) }
return modify,filter
#TODO implement update
#TODO implement set operator
#TODO implement find and modify
#todo implement complex update
def update(self,tablename,query,fields,safe=None):
if safe==None:
safe=self.safe
#return amount of adjusted rows or zero, but no exceptions related not finding the result
if not isinstance(query,Query):
raise RuntimeError, "Not implemented"
amount = self.count(query,False)
modify,filter = self.oupdate(tablename,query,fields)
try:
if safe:
return self.connection[tablename].update(filter,modify,multi=True,safe=safe).n
else:
amount =self.count(query)
self.connection[tablename].update(filter,modify,multi=True,safe=safe)
return amount
except:
#TODO Reverse update query to verifiy that the query succeded
return 0
"""
An special update operator that enables the update of specific field
return a dict
"""
#this function returns a dict with the where clause and update fields
def _update(self,tablename,query,fields):
return str(self.oupdate(tablename,query,fields))
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
#TODO This will probably not work:(
def NOT(self, first):
result = {}
result["$not"] = self.expand(first)
return result
def AND(self,first,second):
f = self.expand(first)
s = self.expand(second)
f.update(s)
return f
def OR(self,first,second):
# pymongo expects: .find( {'$or' : [{'name':'1'}, {'name':'2'}] } )
result = {}
f = self.expand(first)
s = self.expand(second)
result['$or'] = [f,s]
return result
def BELONGS(self, first, second):
if isinstance(second, str):
return {self.expand(first) : {"$in" : [ second[:-1]]} }
elif second==[] or second==():
return {1:0}
items = [self.expand(item, first.type) for item in second]
return {self.expand(first) : {"$in" : items} }
def LIKE(self, first, second):
#escaping regex operators?
return {self.expand(first) : ('%s' % self.expand(second, 'string').replace('%','/'))}
def STARTSWITH(self, first, second):
#escaping regex operators?
return {self.expand(first) : ('/^%s/' % self.expand(second, 'string'))}
def ENDSWITH(self, first, second):
#escaping regex operators?
return {self.expand(first) : ('/%s^/' % self.expand(second, 'string'))}
def CONTAINS(self, first, second):
#There is a technical difference, but mongodb doesn't support that, but the result will be the same
return {self.expand(first) : ('/%s/' % self.expand(second, 'string'))}
def EQ(self,first,second):
result = {}
#if second is None:
#return '(%s == null)' % self.expand(first)
#return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
result[self.expand(first)] = self.expand(second)
return result
def NE(self, first, second=None):
print "in NE"
result = {}
result[self.expand(first)] = {'$ne': self.expand(second)}
return result
def LT(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s < None" % first
print "in LT"
result = {}
result[self.expand(first)] = {'$lt': self.expand(second)}
return result
def LE(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s <= None" % first
print "in LE"
result = {}
result[self.expand(first)] = {'$lte': self.expand(second)}
return result
def GT(self,first,second):
print "in GT"
#import pymongo.objectid
result = {}
#if expanded_first == '_id':
#if expanded_second != 0 and not isinstance(second,pymongo.objectid.ObjectId):
#raise SyntaxError, 'second argument must be of type bson.objectid.ObjectId'
#elif expanded_second == 0:
#expanded_second = pymongo.objectid.ObjectId('000000000000000000000000')
result[self.expand(first)] = {'$gt': self.expand(second)}
return result
def GE(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s >= None" % first
print "in GE"
result = {}
result[self.expand(first)] = {'$gte': self.expand(second)}
return result
def ADD(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '%s + %s' % (self.expand(first), self.expand(second, first.type))
def SUB(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s - %s)' % (self.expand(first), self.expand(second, first.type))
def MUL(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s * %s)' % (self.expand(first), self.expand(second, first.type))
def DIV(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s / %s)' % (self.expand(first), self.expand(second, first.type))
def MOD(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type))
def AS(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '%s AS %s' % (self.expand(first), second)
#We could implement an option that simulates a full featured SQL database. But I think the option should be set explicit or implemented as another library.
def ON(self, first, second):
raise NotImplementedError, "This is not possible in NoSQL, but can be simulated with a wrapper."
return '%s ON %s' % (self.expand(first), self.expand(second))
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
#TODO This will probably not work:(
def NOT(self, first):
result = {}
result["$not"] = self.expand(first)
return result
def AND(self,first,second):
f = self.expand(first)
s = self.expand(second)
f.update(s)
return f
def OR(self,first,second):
# pymongo expects: .find( {'$or' : [{'name':'1'}, {'name':'2'}] } )
result = {}
f = self.expand(first)
s = self.expand(second)
result['$or'] = [f,s]
return result
def BELONGS(self, first, second):
if isinstance(second, str):
return {self.expand(first) : {"$in" : [ second[:-1]]} }
elif second==[] or second==():
return {1:0}
items = [self.expand(item, first.type) for item in second]
return {self.expand(first) : {"$in" : items} }
#TODO verify full compatibilty with official SQL Like operator
def LIKE(self, first, second):
import re
return {self.expand(first) : {'$regex' : re.escape(self.expand(second, 'string')).replace('%','.*')}}
#TODO verify full compatibilty with official SQL Like operator
def STARTSWITH(self, first, second):
#TODO Solve almost the same problem as with endswith
import re
return {self.expand(first) : {'$regex' : '^' + re.escape(self.expand(second, 'string'))}}
#TODO verify full compatibilty with official SQL Like operator
def ENDSWITH(self, first, second):
#escaping regex operators?
#TODO if searched for a name like zsa_corbitt and the function is endswith('a') then this is also returned. Aldo it end with a t
import re
return {self.expand(first) : {'$regex' : re.escape(self.expand(second, 'string')) + '$'}}
#TODO verify full compatibilty with official oracle contains operator
def CONTAINS(self, first, second):
#There is a technical difference, but mongodb doesn't support that, but the result will be the same
#TODO contains operators need to be transformed to Regex
return {self.expand(first) : {' $regex' : ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
def EQ(self,first,second):
result = {}
#if second is None:
#return '(%s == null)' % self.expand(first)
#return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
result[self.expand(first)] = self.expand(second)
return result
def NE(self, first, second=None):
print "in NE"
result = {}
result[self.expand(first)] = {'$ne': self.expand(second)}
return result
def LT(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s < None" % first
print "in LT"
result = {}
result[self.expand(first)] = {'$lt': self.expand(second)}
return result
def LE(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s <= None" % first
print "in LE"
result = {}
result[self.expand(first)] = {'$lte': self.expand(second)}
return result
def GT(self,first,second):
print "in GT"
#import pymongo.objectid
result = {}
#if expanded_first == '_id':
#if expanded_second != 0 and not isinstance(second,pymongo.objectid.ObjectId):
#raise SyntaxError, 'second argument must be of type bson.objectid.ObjectId'
#elif expanded_second == 0:
#expanded_second = pymongo.objectid.ObjectId('000000000000000000000000')
result[self.expand(first)] = {'$gt': self.expand(second)}
return result
def GE(self,first,second=None):
if second is None:
raise RuntimeError, "Cannot compare %s >= None" % first
print "in GE"
result = {}
result[self.expand(first)] = {'$gte': self.expand(second)}
return result
#TODO javascript has math
def ADD(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '%s + %s' % (self.expand(first), self.expand(second, first.type))
#TODO javascript has math
def SUB(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s - %s)' % (self.expand(first), self.expand(second, first.type))
#TODO javascript has math
def MUL(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s * %s)' % (self.expand(first), self.expand(second, first.type))
#TODO javascript has math
def DIV(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s / %s)' % (self.expand(first), self.expand(second, first.type))
#TODO javascript has math
def MOD(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type))
#TODO javascript can do this
def AS(self, first, second):
raise NotImplementedError, "This must yet be replaced with javescript in order to accomplish this. Sorry"
return '%s AS %s' % (self.expand(first), second)
#We could implement an option that simulates a full featured SQL database. But I think the option should be set explicit or implemented as another library.
def ON(self, first, second):
raise NotImplementedError, "This is not possible in NoSQL, but can be simulated with a wrapper."
return '%s ON %s' % (self.expand(first), self.expand(second))
#TODO is this used in mongodb?
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
class IMAPAdapter(NoSQLAdapter):
""" IMAP server adapter
This class is intended as an interface with
email IMAP servers to perform simple queries in the
web2py DAL query syntax, so email read, search and
other related IMAP mail services (as those implemented
by brands like Google(r), and Yahoo!(r)
can be managed from web2py applications.
The code uses examples by Yuji Tomita on this post:
http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137
and is based in docs for Python imaplib, python email
and email IETF's (i.e. RFC2060 and RFC3501)
This adapter was tested with a small set of operations with Gmail(r). Other
services requests could raise command syntax and response data issues.
It creates its table and field names "statically",
meaning that the developer should leave the table and field
definitions to the DAL instance by calling the adapter's
.define_tables() method. The tables are defined with the
IMAP server mailbox list information.
Here is a list of supported fields:
Field Type Description
################################################################
uid string
answered boolean Flag
created date
content list:string A list of text or html parts
to string
cc string
bcc string
size integer the amount of octets of the message*
deleted boolean Flag
draft boolean Flag
flagged boolean Flag
sender string
recent boolean Flag
seen boolean Flag
subject string
mime string The mime header declaration
email string The complete RFC822 message**
attachments list:string Each non text decoded part as string
*At the application side it is measured as the length of the RFC822
message string
WARNING: As row id's are mapped to email sequence numbers,
make sure your imap client web2py app does not delete messages
during select or update actions, to prevent
updating or deleting different messages.
Sequence numbers change whenever the mailbox is updated.
To avoid this sequence numbers issues, it is recommended the use
of uid fields in query references (although the update and delete
in separate actions rule still applies).
# This is the code recommended to start imap support
# at the app's model:
imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
imapdb.define_tables()
Here is an (incomplete) list of possible imap commands:
# Count today's unseen messages
# smaller than 6000 octets from the
# inbox mailbox
q = imapdb.INBOX.seen == False
q &= imapdb.INBOX.created == datetime.date.today()
q &= imapdb.INBOX.size < 6000
unread = imapdb(q).count()
# Fetch last query messages
rows = imapdb(q).select()
# it is also possible to filter query select results with limitby and
# sequences of mailbox fields
set.select(<fields sequence>, limitby=(<int>, <int>))
# Mark last query messages as seen
messages = [row.uid for row in rows]
seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
# Delete messages in the imap database that have mails from mr. Gumby
deleted = 0
for mailbox in imapdb.tables
deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
# It is possible also to mark messages for deletion instead of ereasing them
# directly with set.update(deleted=True)
"""
types = {
'string': str,
'text': str,
'date': datetime.date,
'datetime': datetime.datetime,
'id': long,
'boolean': bool,
'integer': int,
'blob': str,
'list:string': str,
}
dbengine = 'imap'
driver = globals().get('imaplib',None)
def __init__(self,
db,
uri,
pool_size=0,
folder=None,
db_codec ='UTF-8',
credential_decoder=lambda x:x,
driver_args={},
adapter_args={}):
# db uri: user@example.com:password@imap.server.com:123
# TODO: max size adapter argument for preventing large mail transfers
uri = uri.split("://")[1]
self.db = db
self.uri = uri
self.pool_size=pool_size
self.folder = folder
self.db_codec = db_codec
self.credential_decoder = credential_decoder
self.driver_args = driver_args
self.adapter_args = adapter_args
self.mailbox_size = None
self.charset = sys.getfilesystemencoding()
# imap class
self.imap4 = None
""" MESSAGE is an identifier for sequence number"""
self.flags = ['\\Deleted', '\\Draft', '\\Flagged',
'\\Recent', '\\Seen', '\\Answered']
self.search_fields = {
'id': 'MESSAGE', 'created': 'DATE',
'uid': 'UID', 'sender': 'FROM',
'to': 'TO', 'cc': 'CC',
'bcc': 'BCC', 'content': 'TEXT',
'size': 'SIZE', 'deleted': '\\Deleted',
'draft': '\\Draft', 'flagged': '\\Flagged',
'recent': '\\Recent', 'seen': '\\Seen',
'subject': 'SUBJECT', 'answered': '\\Answered',
'mime': None, 'email': None,
'attachments': None
}
db['_lastsql'] = ''
m = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?$').match(uri)
user = m.group('user')
password = m.group('password')
host = m.group('host')
port = int(m.group('port'))
over_ssl = False
if port==993:
over_ssl = True
driver_args.update(dict(host=host,port=port, password=password, user=user))
def connect(driver_args=driver_args):
# it is assumed sucessful authentication alLways
# TODO: support direct connection and login tests
if over_ssl:
self.imap4 = self.driver.IMAP4_SSL
else:
self.imap4 = self.driver.IMAP4
connection = self.imap4(driver_args["host"], driver_args["port"])
data = connection.login(driver_args["user"], driver_args["password"])
# print "Connected to remote server"
# print data
# static mailbox list
connection.mailbox_names = None
# dummy cursor function
connection.cursor = lambda : True
return connection
self.pool_connection(connect)
self.db.define_tables = self.define_tables
def pool_connection(self, f, cursor=True):
"""
IMAP4 Pool connection method
imap connection lacks of self cursor command.
A custom command should be provided as a replacement
for connection pooling to prevent uncaught remote session
closing
"""
# print "Pool Connection"
if not self.pool_size:
self.connection = f()
self.cursor = cursor and self.connection.cursor()
else:
uri = self.uri
# print "uri", self.uri
while True:
sql_locker.acquire()
if not uri in ConnectionPool.pools:
ConnectionPool.pools[uri] = []
if ConnectionPool.pools[uri]:
self.connection = ConnectionPool.pools[uri].pop()
sql_locker.release()
self.cursor = cursor and self.connection.cursor()
# print "self.cursor", self.cursor
if self.cursor and self.check_active_connection:
try:
# check if connection is alive or close it
result, data = self.connection.list()
# print "Checked connection"
# print result, data
# self.execute('SELECT 1;')
except:
# Possible connection reset error
# TODO: read exception class
# print "Re-connecting to IMAP server"
self.connection = f()
break
else:
sql_locker.release()
self.connection = f()
self.cursor = cursor and self.connection.cursor()
break
if not hasattr(thread,'instances'):
thread.instances = []
thread.instances.append(self)
def get_last_message(self, tablename):
last_message = None
# request mailbox list to the server
# if needed
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
try:
result = self.connection.select(self.connection.mailbox_names[tablename])
last_message = int(result[1][0])
except (IndexError, ValueError, TypeError, KeyError), e:
logger.debug("Error retrieving the last mailbox sequence number. %s" % str(e))
return last_message
def get_uid_bounds(self, tablename):
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
# fetch first and last messages
# return (first, last) messages uid's
last_message = self.get_last_message(tablename)
result, data = self.connection.uid("search", None, "(ALL)")
uid_list = data[0].strip().split()
if len(uid_list) <= 0:
return None
else:
return (uid_list[0], uid_list[-1])
def convert_date(self, date, add=None):
if add is None:
add = datetime.timedelta()
""" Convert a date object to a string
with d-Mon-Y style for IMAP or the inverse
case
add <timedelta> adds to the date object
"""
months = [None, "Jan","Feb","Mar","Apr","May","Jun",
"Jul", "Aug","Sep","Oct","Nov","Dec"]
if isinstance(date, basestring):
# Prevent unexpected date response format
try:
dayname, datestring = date.split(",")
except (ValueError):
logger.debug("Could not parse date text: %s" % date)
return None
date_list = datestring.strip().split()
year = int(date_list[2])
month = months.index(date_list[1])
day = int(date_list[0])
hms = [int(value) for value in date_list[3].split(":")]
return datetime.datetime(year, month, day,
hms[0], hms[1], hms[2]) + add
elif isinstance(date, (datetime.datetime, datetime.date)):
return (date + add).strftime("%d-%b-%Y")
else:
return None
def encode_text(self, text, charset, errors="replace"):
""" convert text for mail to unicode"""
if text is None:
text = ""
else:
if isinstance(text, str):
if charset is not None:
text = unicode(text, charset, errors)
else:
text = unicode(text, "utf-8", errors)
else:
raise Exception("Unsupported mail text type %s" % type(text))
return text.encode("utf-8")
def get_charset(self, message):
charset = message.get_content_charset()
return charset
def reset_mailboxes(self):
self.connection.mailbox_names = None
self.get_mailboxes()
def get_mailboxes(self):
""" Query the mail database for mailbox names """
mailboxes_list = self.connection.list()
self.connection.mailbox_names = dict()
mailboxes = list()
for item in mailboxes_list[1]:
item = item.strip()
if not "NOSELECT" in item.upper():
sub_items = item.split("\"")
sub_items = [sub_item for sub_item in sub_items if len(sub_item.strip()) > 0]
mailbox = sub_items[len(sub_items) - 1]
# remove unwanted characters and store original names
mailbox_name = mailbox.replace("[", "").replace("]", "").replace("/", "_")
mailboxes.append(mailbox_name)
self.connection.mailbox_names[mailbox_name] = mailbox
# print "Mailboxes query", mailboxes
return mailboxes
def get_query_mailbox(self, query):
nofield = True
tablename = None
attr = query
while nofield:
if hasattr(attr, "first"):
attr = attr.first
if isinstance(attr, Field):
return attr.tablename
elif isinstance(attr, Query):
pass
else:
return None
else:
return None
return tablename
def is_flag(self, flag):
if self.search_fields.get(flag, None) in self.flags:
return True
else:
return False
def define_tables(self):
"""
Auto create common IMAP fileds
This function creates fields definitions "statically"
meaning that custom fields as in other adapters should
not be supported and definitions handled on a service/mode
basis (local syntax for Gmail(r), Ymail(r)
"""
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
mailboxes = self.connection.mailbox_names.keys()
for mailbox_name in mailboxes:
self.db.define_table("%s" % mailbox_name,
Field("uid", "string", writable=False),
Field("answered", "boolean"),
Field("created", "datetime", writable=False),
Field("content", "list:string", writable=False),
Field("to", "string", writable=False),
Field("cc", "string", writable=False),
Field("bcc", "string", writable=False),
Field("size", "integer", writable=False),
Field("deleted", "boolean"),
Field("draft", "boolean"),
Field("flagged", "boolean"),
Field("sender", "string", writable=False),
Field("recent", "boolean", writable=False),
Field("seen", "boolean"),
Field("subject", "string", writable=False),
Field("mime", "string", writable=False),
Field("email", "string", writable=False, readable=False),
Field("attachments", "list:string", writable=False, readable=False),
)
def create_table(self, *args, **kwargs):
# not implemented
logger.debug("Create table feature is not implemented for %s" % type(self))
def _select(self,query,fields,attributes):
""" Search and Fetch records and return web2py
rows
"""
if use_common_filters(query):
query = self.common_filter(query, [self.get_query_mailbox(query),])
# move this statement elsewhere (upper-level)
import email
import email.header
decode_header = email.header.decode_header
# get records from imap server with search + fetch
# convert results to a dictionary
tablename = None
fetch_results = list()
if isinstance(query, (Expression, Query)):
tablename = self.get_table(query)
mailbox = self.connection.mailbox_names.get(tablename, None)
if isinstance(query, Expression):
pass
elif isinstance(query, Query):
if mailbox is not None:
# select with readonly
selected = self.connection.select(mailbox, True)
self.mailbox_size = int(selected[1][0])
search_query = "(%s)" % str(query).strip()
# print "Query", query
# print "Search query", search_query
search_result = self.connection.uid("search", None, search_query)
# print "Search result", search_result
# print search_result
# Normal IMAP response OK is assumed (change this)
if search_result[0] == "OK":
# For "light" remote server responses just get the first
# ten records (change for non-experimental implementation)
# However, light responses are not guaranteed with this
# approach, just fewer messages.
# TODO: change limitby single to 2-tuple argument
limitby = attributes.get('limitby', None)
messages_set = search_result[1][0].split()
# descending order
messages_set.reverse()
if limitby is not None:
# TODO: asc/desc attributes
messages_set = messages_set[int(limitby[0]):int(limitby[1])]
# Partial fetches are not used since the email
# library does not seem to support it (it converts
# partial messages to mangled message instances)
imap_fields = "(RFC822)"
if len(messages_set) > 0:
# create fetch results object list
# fetch each remote message and store it in memmory
# (change to multi-fetch command syntax for faster
# transactions)
for uid in messages_set:
# fetch the RFC822 message body
typ, data = self.connection.uid("fetch", uid, imap_fields)
if typ == "OK":
fr = {"message": int(data[0][0].split()[0]),
"uid": int(uid),
"email": email.message_from_string(data[0][1]),
"raw_message": data[0][1]
}
fr["multipart"] = fr["email"].is_multipart()
# fetch flags for the message
ftyp, fdata = self.connection.uid("fetch", uid, "(FLAGS)")
if ftyp == "OK":
# print "Raw flags", fdata
fr["flags"] = self.driver.ParseFlags(fdata[0])
# print "Flags", fr["flags"]
fetch_results.append(fr)
else:
# error retrieving the flags for this message
pass
else:
# error retrieving the message body
pass
elif isinstance(query, basestring):
# not implemented
pass
else:
pass
imapqry_dict = {}
imapfields_dict = {}
if len(fields) == 1 and isinstance(fields[0], SQLALL):
allfields = True
elif len(fields) == 0:
allfields = True
else:
allfields = False
if allfields:
fieldnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()]
else:
fieldnames = ["%s.%s" % (tablename, field.name) for field in fields]
for k in fieldnames:
imapfields_dict[k] = k
imapqry_list = list()
imapqry_array = list()
for fr in fetch_results:
attachments = []
content = []
size = 0
n = int(fr["message"])
item_dict = dict()
message = fr["email"]
uid = fr["uid"]
charset = self.get_charset(message)
flags = fr["flags"]
raw_message = fr["raw_message"]
# Return messages data mapping static fields
# and fetched results. Mapping should be made
# outside the select function (with auxiliary
# instance methods)
# pending: search flags states trough the email message
# instances for correct output
if "%s.id" % tablename in fieldnames:
item_dict["%s.id" % tablename] = n
if "%s.created" % tablename in fieldnames:
item_dict["%s.created" % tablename] = self.convert_date(message["Date"])
if "%s.uid" % tablename in fieldnames:
item_dict["%s.uid" % tablename] = uid
if "%s.sender" % tablename in fieldnames:
# If there is no encoding found in the message header
# force utf-8 replacing characters (change this to
# module's defaults). Applies to .sender, .to, .cc and .bcc fields
#############################################################################
# TODO: External function to manage encoding and decoding of message strings
#############################################################################
item_dict["%s.sender" % tablename] = self.encode_text(message["From"], charset)
if "%s.to" % tablename in fieldnames:
item_dict["%s.to" % tablename] = self.encode_text(message["To"], charset)
if "%s.cc" % tablename in fieldnames:
if "Cc" in message.keys():
# print "cc field found"
item_dict["%s.cc" % tablename] = self.encode_text(message["Cc"], charset)
else:
item_dict["%s.cc" % tablename] = ""
if "%s.bcc" % tablename in fieldnames:
if "Bcc" in message.keys():
# print "bcc field found"
item_dict["%s.bcc" % tablename] = self.encode_text(message["Bcc"], charset)
else:
item_dict["%s.bcc" % tablename] = ""
if "%s.deleted" % tablename in fieldnames:
item_dict["%s.deleted" % tablename] = "\\Deleted" in flags
if "%s.draft" % tablename in fieldnames:
item_dict["%s.draft" % tablename] = "\\Draft" in flags
if "%s.flagged" % tablename in fieldnames:
item_dict["%s.flagged" % tablename] = "\\Flagged" in flags
if "%s.recent" % tablename in fieldnames:
item_dict["%s.recent" % tablename] = "\\Recent" in flags
if "%s.seen" % tablename in fieldnames:
item_dict["%s.seen" % tablename] = "\\Seen" in flags
if "%s.subject" % tablename in fieldnames:
subject = message["Subject"]
decoded_subject = decode_header(subject)
text = decoded_subject[0][0]
encoding = decoded_subject[0][1]
if encoding in (None, ""):
encoding = charset
item_dict["%s.subject" % tablename] = self.encode_text(text, encoding)
if "%s.answered" % tablename in fieldnames:
item_dict["%s.answered" % tablename] = "\\Answered" in flags
if "%s.mime" % tablename in fieldnames:
item_dict["%s.mime" % tablename] = message.get_content_type()
# Here goes the whole RFC822 body as an email instance
# for controller side custom processing
# The message is stored as a raw string
# >> email.message_from_string(raw string)
# returns a Message object for enhanced object processing
if "%s.email" % tablename in fieldnames:
item_dict["%s.email" % tablename] = self.encode_text(raw_message, charset)
# Size measure as suggested in a Velocity Reviews post
# by Tim Williams: "how to get size of email attachment"
# Note: len() and server RFC822.SIZE reports doesn't match
# To retrieve the server size for representation would add a new
# fetch transaction to the process
for part in message.walk():
if "%s.attachments" % tablename in fieldnames:
if not "text" in part.get_content_maintype():
attachments.append(part.get_payload(decode=True))
if "%s.content" % tablename in fieldnames:
if "text" in part.get_content_maintype():
payload = self.encode_text(part.get_payload(decode=True), charset)
content.append(payload)
if "%s.size" % tablename in fieldnames:
if part is not None:
size += len(str(part))
item_dict["%s.content" % tablename] = bar_encode(content)
item_dict["%s.attachments" % tablename] = bar_encode(attachments)
item_dict["%s.size" % tablename] = size
imapqry_list.append(item_dict)
# extra object mapping for the sake of rows object
# creation (sends an array or lists)
for item_dict in imapqry_list:
imapqry_array_item = list()
for fieldname in fieldnames:
imapqry_array_item.append(item_dict[fieldname])
imapqry_array.append(imapqry_array_item)
return tablename, imapqry_array, fieldnames
def select(self,query,fields,attributes):
tablename, imapqry_array , fieldnames = self._select(query,fields,attributes)
# parse result and return a rows object
colnames = fieldnames
processor = attributes.get('processor',self.parse)
return processor(imapqry_array, fields, colnames)
def update(self, tablename, query, fields):
# print "_update"
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
mark = []
unmark = []
rowcount = 0
query = str(query)
if query:
for item in fields:
field = item[0]
name = field.name
value = item[1]
if self.is_flag(name):
flag = self.search_fields[name]
if (value is not None) and (flag != "\\Recent"):
if value:
mark.append(flag)
else:
unmark.append(flag)
# print "Selecting mailbox ..."
result, data = self.connection.select(self.connection.mailbox_names[tablename])
# print "Retrieving sequence numbers remotely"
string_query = "(%s)" % query
# print "string query", string_query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
# print "Storing values..."
# change marked flags
for number in store_list:
result = None
if len(mark) > 0:
# print "Marking flags ..."
result, data = self.connection.store(number, "+FLAGS", "(%s)" % " ".join(mark))
if len(unmark) > 0:
# print "Unmarking flags ..."
result, data = self.connection.store(number, "-FLAGS", "(%s)" % " ".join(unmark))
if result == "OK":
rowcount += 1
return rowcount
def count(self,query,distinct=None):
counter = 0
tablename = self.get_query_mailbox(query)
if query and tablename is not None:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
# print "Selecting mailbox ..."
result, data = self.connection.select(self.connection.mailbox_names[tablename])
# print "Retrieving sequence numbers remotely"
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
counter = len(store_list)
return counter
def delete(self, tablename, query):
counter = 0
if query:
# print "Selecting mailbox ..."
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
# print "Retrieving sequence numbers remotely"
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
for number in store_list:
result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)")
# print "Deleting message", result, data
if result == "OK":
counter += 1
if counter > 0:
# print "Ereasing permanently"
result, data = self.connection.expunge()
return counter
def BELONGS(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
values = [str(val) for val in second if str(val).isdigit()]
result = "%s" % ",".join(values).strip()
elif name == "UID":
values = [str(val) for val in second if str(val).isdigit()]
result = "UID %s" % ",".join(values).strip()
else:
raise Exception("Operation not supported")
# result = "(%s %s)" % (self.expand(first), self.expand(second))
return result
def CONTAINS(self, first, second):
result = None
name = self.search_fields[first.name]
if name in ("FROM", "TO", "SUBJECT", "TEXT"):
result = "%s \"%s\"" % (name, self.expand(second))
else:
if first.name in ("cc", "bcc"):
result = "%s \"%s\"" % (first.name.upper(), self.expand(second))
elif first.name == "mime":
result = "HEADER Content-Type \"%s\"" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%d:%d" % (int(self.expand(second)) + 1, last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError, e:
logger.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
lower_limit = int(self.expand(second)) + 1
except (ValueError, TypeError), e:
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1))
elif name == "SIZE":
result = "LARGER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%s:%s" % (self.expand(second), last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError, e:
logger.debug("Error requesting uid bounds: %s", str(e))
return ""
lower_limit = self.expand(second)
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second)
else:
raise Exception("Operation not supported")
return result
def LT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, int(self.expand(second)) - 1)
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError, e:
logger.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
upper_limit = int(self.expand(second)) - 1
except (ValueError, TypeError), e:
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second)
elif name == "SIZE":
result = "SMALLER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def LE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, self.expand(second))
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError, e:
logger.debug("Error requesting uid bounds: %s", str(e))
return ""
upper_limit = int(self.expand(second))
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1))
else:
raise Exception("Operation not supported")
return result
def NE(self, first, second):
result = self.NOT(self.EQ(first, second))
result = result.replace("NOT NOT", "").strip()
return result
def EQ(self,first,second):
name = self.search_fields[first.name]
result = None
if name is not None:
if name == "MESSAGE":
# query by message sequence number
result = "%s" % self.expand(second)
elif name == "UID":
result = "UID %s" % self.expand(second)
elif name == "DATE":
result = "ON %s" % self.convert_date(second)
elif name in self.flags:
if second:
result = "%s" % (name.upper()[1:])
else:
result = "NOT %s" % (name.upper()[1:])
else:
raise Exception("Operation not supported")
else:
raise Exception("Operation not supported")
return result
def AND(self, first, second):
result = "%s %s" % (self.expand(first), self.expand(second))
return result
def OR(self, first, second):
result = "OR %s %s" % (self.expand(first), self.expand(second))
return "%s" % result.replace("OR OR", "OR")
def NOT(self, first):
result = "NOT %s" % self.expand(first)
return result
########################################################################
# end of adapters
########################################################################
ADAPTERS = {
'sqlite': SQLiteAdapter,
'sqlite:memory': SQLiteAdapter,
'mysql': MySQLAdapter,
'postgres': PostgreSQLAdapter,
'postgres:psycopg2': PostgreSQLAdapter,
'postgres:pg8000': PostgreSQLAdapter,
'oracle': OracleAdapter,
'mssql': MSSQLAdapter,
'mssql2': MSSQL2Adapter,
'db2': DB2Adapter,
'teradata': TeradataAdapter,
'informix': InformixAdapter,
'firebird': FireBirdAdapter,
'firebird_embedded': FireBirdAdapter,
'ingres': IngresAdapter,
'ingresu': IngresUnicodeAdapter,
'sapdb': SAPDBAdapter,
'cubrid': CubridAdapter,
'jdbc:sqlite': JDBCSQLiteAdapter,
'jdbc:sqlite:memory': JDBCSQLiteAdapter,
'jdbc:postgres': JDBCPostgreSQLAdapter,
'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
'google:datastore': GoogleDatastoreAdapter,
'google:sql': GoogleSQLAdapter,
'couchdb': CouchDBAdapter,
'mongodb': MongoDBAdapter,
'imap': IMAPAdapter
}
def sqlhtml_validators(field):
"""
Field type validation, using web2py's validators mechanism.
makes sure the content of a field is in line with the declared
fieldtype
"""
if not have_validators:
return []
field_type, field_length = field.type, field.length
if isinstance(field_type, SQLCustomType):
if hasattr(field_type, 'validator'):
return field_type.validator
else:
field_type = field_type.type
elif not isinstance(field_type,str):
return []
requires=[]
def ff(r,id):
row=r(id)
if not row:
return id
elif hasattr(r, '_format') and isinstance(r._format,str):
return r._format % row
elif hasattr(r, '_format') and callable(r._format):
return r._format(row)
else:
return id
if field_type == 'string':
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'text':
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'password':
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'double':
requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
elif field_type == 'integer':
requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100))
elif field_type.startswith('decimal'):
requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
elif field_type == 'date':
requires.append(validators.IS_DATE())
elif field_type == 'time':
requires.append(validators.IS_TIME())
elif field_type == 'datetime':
requires.append(validators.IS_DATETIME())
elif field.db and field_type.startswith('reference') and \
field_type.find('.') < 0 and \
field_type[10:] in field.db.tables:
referenced = field.db[field_type[10:]]
def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id)
field.represent = field.represent or repr_ref
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(field.db,referenced._id,
referenced._format)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(field.db,field)
if field.tablename == field_type[10:]:
return validators.IS_EMPTY_OR(requires)
return requires
elif field.db and field_type.startswith('list:reference') and \
field_type.find('.') < 0 and \
field_type[15:] in field.db.tables:
referenced = field.db[field_type[15:]]
def list_ref_repr(ids, row=None, r=referenced, f=ff):
if not ids:
return None
refs = r._db(r._id.belongs(ids)).select(r._id)
return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '')
field.represent = field.represent or list_ref_repr
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(field.db,referenced._id,
referenced._format,multiple=True)
else:
requires = validators.IS_IN_DB(field.db,referenced._id,
multiple=True)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(field.db,field)
return requires
elif field_type.startswith('list:'):
def repr_list(values,row=None): return', '.join(str(v) for v in (values or []))
field.represent = field.represent or repr_list
if field.unique:
requires.insert(0,validators.IS_NOT_IN_DB(field.db,field))
sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
if field.notnull and not field_type[:2] in sff:
requires.insert(0, validators.IS_NOT_EMPTY())
elif not field.notnull and field_type[:2] in sff and requires:
requires[-1] = validators.IS_EMPTY_OR(requires[-1])
return requires
def bar_escape(item):
return str(item).replace('|', '||')
def bar_encode(items):
return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
def bar_decode_integer(value):
return [int(x) for x in value.split('|') if x.strip()]
def bar_decode_string(value):
return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()]
class Row(dict):
"""
a dictionary that lets you do d['a'] as well as d.a
this is only used to store a Row
"""
def __getitem__(self, key):
key=str(key)
m = regex_table_field.match(key)
if key in self.get('_extra',{}):
return self._extra[key]
elif m:
try:
return dict.__getitem__(self, m.group(1))[m.group(2)]
except (KeyError,TypeError):
key = m.group(2)
return dict.__getitem__(self, key)
def __call__(self,key):
return self.__getitem__(key)
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __repr__(self):
return '<Row ' + dict.__repr__(self) + '>'
def __int__(self):
return dict.__getitem__(self,'id')
def __eq__(self,other):
try:
return self.as_dict() == other.as_dict()
except AttributeError:
return False
def __ne__(self,other):
return not (self == other)
def __copy__(self):
return Row(dict(self))
def as_dict(self,datetime_to_str=False):
SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list)
d = dict(self)
for k in copy.copy(d.keys()):
v=d[k]
if d[k] is None:
continue
elif isinstance(v,Row):
d[k]=v.as_dict()
elif isinstance(v,Reference):
d[k]=int(v)
elif isinstance(v,decimal.Decimal):
d[k]=float(v)
elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
if datetime_to_str:
d[k] = v.isoformat().replace('T',' ')[:19]
elif not isinstance(v,SERIALIZABLE_TYPES):
del d[k]
return d
def Row_unpickler(data):
return Row(cPickle.loads(data))
def Row_pickler(data):
return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),)
copy_reg.pickle(Row, Row_pickler, Row_unpickler)
################################################################################
# Everything below should be independent on the specifics of the
# database and should for RDBMs and some NoSQL databases
################################################################################
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
def smart_query(fields,text):
if not isinstance(fields,(list,tuple)):
fields = [fields]
new_fields = []
for field in fields:
if isinstance(field,Field):
new_fields.append(field)
elif isinstance(field,Table):
for ofield in field:
new_fields.append(ofield)
else:
raise RuntimeError, "fields must be a list of fields"
fields = new_fields
field_map = {}
for field in fields:
n = field.name.lower()
if not n in field_map:
field_map[n] = field
n = str(field).lower()
if not n in field_map:
field_map[n] = field
re_constants = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')')
constants = {}
i = 0
while True:
m = re_constants.search(text)
if not m: break
text = text[:m.start()]+('#%i' % i)+text[m.end():]
constants[str(i)] = m.group()[1:-1]
i+=1
text = re.sub('\s+',' ',text).lower()
for a,b in [('&','and'),
('|','or'),
('~','not'),
('==','='),
('<','<'),
('>','>'),
('<=','<='),
('>=','>='),
('<>','!='),
('=<','<='),
('=>','>='),
('=','='),
(' less or equal than ','<='),
(' greater or equal than ','>='),
(' equal or less than ','<='),
(' equal or greater than ','>='),
(' less or equal ','<='),
(' greater or equal ','>='),
(' equal or less ','<='),
(' equal or greater ','>='),
(' not equal to ','!='),
(' not equal ','!='),
(' equal to ','='),
(' equal ','='),
(' equals ','!='),
(' less than ','<'),
(' greater than ','>'),
(' starts with ','startswith'),
(' ends with ','endswith'),
(' is ','=')]:
if a[0]==' ':
text = text.replace(' is'+a,' %s ' % b)
text = text.replace(a,' %s ' % b)
text = re.sub('\s+',' ',text).lower()
text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text)
query = field = neg = op = logic = None
for item in text.split():
if field is None:
if item == 'not':
neg = True
elif not neg and not logic and item in ('and','or'):
logic = item
elif item in field_map:
field = field_map[item]
else:
raise RuntimeError, "Invalid syntax"
elif not field is None and op is None:
op = item
elif not op is None:
if item.startswith('#'):
if not item[1:] in constants:
raise RuntimeError, "Invalid syntax"
value = constants[item[1:]]
else:
value = item
if op == '=': op = 'like'
if op == '=': new_query = field==value
elif op == '<': new_query = field<value
elif op == '>': new_query = field>value
elif op == '<=': new_query = field<=value
elif op == '>=': new_query = field>=value
elif op == '!=': new_query = field!=value
elif field.type in ('text','string'):
if op == 'contains': new_query = field.contains(value)
elif op == 'like': new_query = field.like(value)
elif op == 'startswith': new_query = field.startswith(value)
elif op == 'endswith': new_query = field.endswith(value)
else: raise RuntimeError, "Invalid operation"
else: raise RuntimeError, "Invalid operation"
if neg: new_query = ~new_query
if query is None:
query = new_query
elif logic == 'and':
query &= new_query
elif logic == 'or':
query |= new_query
field = op = neg = logic = None
return query
class DAL(dict):
"""
an instance of this class represents a database connection
Example::
db = DAL('sqlite://test.db')
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
@staticmethod
def set_folder(folder):
"""
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
"""
BaseAdapter.set_folder(folder)
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
instances = enumerate(instances)
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError, \
'distributed transaction not suported by %s' % db._dbname
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError, \
'distributed transaction not suported by %s' % db._dbanme
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError, 'failure to commit distributed transaction'
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(self, uri='sqlite://dummy.db',
pool_size=0, folder=None,
db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
migrate_enabled=True, fake_migrate_all=False,
decode_credentials=False, driver_args=None,
adapter_args=None, attempts=5, auto_import=False):
"""
Creates a new Database Abstraction Layer instance.
Keyword arguments:
:uri: string that contains information for connecting to a database.
(default: 'sqlite://dummy.db')
:pool_size: How many open connections to make to the database object.
:folder: <please update me>
:db_codec: string encoding of the database (default: 'UTF-8')
:check_reserved: list of adapters to check tablenames and column names
against sql reserved keywords. (Default None)
* 'common' List of sql keywords that are common to all database types
such as "SELECT, INSERT". (recommended)
* 'all' Checks against all known SQL keywords. (not recommended)
<adaptername> Checks against the specific adapters list of keywords
(recommended)
* '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
:migrate (defaults to True) sets default migrate behavior for all tables
:fake_migrate (defaults to False) sets default fake_migrate behavior for all tables
:migrate_enabled (defaults to True). If set to False disables ALL migrations
:fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables
:attempts (defaults to 5). Number of times to attempt connecting
"""
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: urllib.unquote(cred)
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._lastsql = ''
self._timings = []
self._pending_references = {}
self._request_tenant = 'request_tenant'
self._common_fields = []
self._referee_name = '%(table)s'
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri,(list,tuple)) and uri or [uri]
error = ''
connected = False
for k in range(attempts):
for uri in uris:
try:
if is_jdbc and not uri.startswith('jdbc:'):
uri = 'jdbc:'+uri
self._dbname = regex_dbname.match(uri).group()
if not self._dbname in ADAPTERS:
raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
args = (self,uri,pool_size,folder,
db_codec, credential_decoder,
driver_args or {}, adapter_args or {})
self._adapter = ADAPTERS[self._dbname](*args)
connected = True
break
except SyntaxError:
raise
except Exception, error:
tb = traceback.format_exc()
sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb))
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, tb)
else:
args = (self,'None',0,folder,db_codec)
self._adapter = BaseAdapter(*args)
migrate = fake_migrate = False
adapter = self._adapter
self._uri_hash = hashlib.md5(adapter.uri).hexdigest()
self._tables = SQLCallableList()
self.check_reserved = check_reserved
if self.check_reserved:
from reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if auto_import:
self.import_table_definitions(adapter.folder)
@property
def tables(self):
return self._tables
def import_table_definitions(self,path,migrate=False,fake_migrate=False):
pattern = os.path.join(path,self._uri_hash+'_*.table')
for filename in glob.glob(pattern):
tfile = self._adapter.file_open(filename, 'r')
try:
sql_fields = cPickle.load(tfile)
name = filename[len(pattern)-7:-6]
mf = [(value['sortable'],Field(key,type=value['type'])) \
for key, value in sql_fields.items()]
mf.sort(lambda a,b: cmp(a[0],b[0]))
self.define_table(name,*[item[1] for item in mf],
**dict(migrate=migrate,fake_migrate=fake_migrate))
finally:
self._adapter.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates ``name`` against SQL keywords
Uses self.check_reserve which is a list of
operators to use.
self.check_reserved
['common', 'postgres', 'mysql']
self.check_reserved
['all']
"""
for backend in self.check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
def __contains__(self, tablename):
if self.has_key(tablename):
return True
else:
return False
def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
"""
EXAMPLE:
db.define_table('person',Field('name'),Field('info'))
db.define_table('pet',Field('owner',db.person),Field('name'),Field('info'))
@request.restful()
def index():
def GET(*args,**vars):
patterns = [
"/friends[person]",
"/{friend.name.startswith}",
"/{friend.name}/:field",
"/{friend.name}/pets[pet.owner]",
"/{friend.name}/pet[pet.owner]/{pet.name}",
"/{friend.name}/pet[pet.owner]/{pet.name}/:field"
]
parser = db.parse_as_rest(patterns,args,vars)
if parser.status == 200:
return dict(content=parser.response)
else:
raise HTTP(parser.status,parser.error)
def POST(table_name,**vars):
if table_name == 'person':
return db.person.validate_and_insert(**vars)
elif table_name == 'pet':
return db.pet.validate_and_insert(**vars)
else:
raise HTTP(400)
return locals()
"""
db = self
re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
re2 = re.compile('^.+\[.+\]$')
def auto_table(table,base='',depth=0):
patterns = []
for field in db[table].fields:
if base:
tag = '%s/%s' % (base,field.replace('_','-'))
else:
tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-'))
f = db[table][field]
if not f.readable: continue
if f.type=='id' or 'slug' in field or f.type.startswith('reference'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('boolean'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('double') or f.type.startswith('integer'):
tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('list:'):
tag += '/{%s.%s.contains}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('date','datetime'):
tag+= '/{%s.%s.year}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.month}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.day}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if f.type in ('datetime','time'):
tag+= '/{%s.%s.hour}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.minute}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.second}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if depth>0:
for rtable,rfield in db[table]._referenced_by:
tag+='/%s[%s.%s]' % (rtable,rtable,rfield)
patterns.append(tag)
patterns += auto_table(rtable,base=tag,depth=depth-1)
return patterns
if patterns=='auto':
patterns=[]
for table in db.tables:
if not table.startswith('auth_'):
patterns.append('/%s[%s]' % (table,table))
patterns += auto_table(table,base='',depth=1)
else:
i = 0
while i<len(patterns):
pattern = patterns[i]
tokens = pattern.split('/')
if tokens[-1].startswith(':auto') and re2.match(tokens[-1]):
new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],
'/'.join(tokens[:-1]))
patterns = patterns[:i]+new_patterns+patterns[i+1:]
i += len(new_patterns)
else:
i += 1
if '/'.join(args) == 'patterns':
return Row({'status':200,'pattern':'list',
'error':None,'response':patterns})
for pattern in patterns:
otable=table=None
if not isinstance(queries,dict):
dbset=db(queries)
i=0
tags = pattern[1:].split('/')
if len(tags)!=len(args):
continue
for tag in tags:
if re1.match(tag):
# print 're1:'+tag
tokens = tag[1:-1].split('.')
table, field = tokens[0], tokens[1]
if not otable or table == otable:
if len(tokens)==2 or tokens[2]=='eq':
query = db[table][field]==args[i]
elif tokens[2]=='ne':
query = db[table][field]!=args[i]
elif tokens[2]=='lt':
query = db[table][field]<args[i]
elif tokens[2]=='gt':
query = db[table][field]>args[i]
elif tokens[2]=='ge':
query = db[table][field]>=args[i]
elif tokens[2]=='le':
query = db[table][field]<=args[i]
elif tokens[2]=='year':
query = db[table][field].year()==args[i]
elif tokens[2]=='month':
query = db[table][field].month()==args[i]
elif tokens[2]=='day':
query = db[table][field].day()==args[i]
elif tokens[2]=='hour':
query = db[table][field].hour()==args[i]
elif tokens[2]=='minute':
query = db[table][field].minutes()==args[i]
elif tokens[2]=='second':
query = db[table][field].seconds()==args[i]
elif tokens[2]=='startswith':
query = db[table][field].startswith(args[i])
elif tokens[2]=='contains':
query = db[table][field].contains(args[i])
else:
raise RuntimeError, "invalid pattern: %s" % pattern
if len(tokens)==4 and tokens[3]=='not':
query = ~query
elif len(tokens)>=4:
raise RuntimeError, "invalid pattern: %s" % pattern
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
dbset=dbset(query)
else:
raise RuntimeError, "missing relation in pattern: %s" % pattern
elif re2.match(tag) and args[i]==tag[:tag.find('[')]:
ref = tag[tag.find('[')+1:-1]
if '.' in ref and otable:
table,field = ref.split('.')
# print table,field
if nested_select:
try:
dbset=db(db[table][field].belongs(dbset._select(db[otable]._id)))
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
else:
items = [item.id for item in dbset.select(db[otable]._id)]
dbset=db(db[table][field].belongs(items))
else:
table = ref
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
dbset=dbset(db[table])
elif tag==':field' and table:
# # print 're3:'+tag
field = args[i]
if not field in db[table]: break
try:
item = dbset.select(db[table][field],limitby=(0,1)).first()
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
if not item:
return Row({'status':404,'pattern':pattern,
'error':'record not found','response':None})
else:
return Row({'status':200,'response':item[field],
'pattern':pattern})
elif tag != args[i]:
break
otable = table
i += 1
if i==len(tags) and table:
ofields = vars.get('order',db[table]._id.name).split('|')
try:
orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields]
except KeyError:
return Row({'status':400,'error':'invalid orderby','response':None})
fields = [field for field in db[table] if field.readable]
count = dbset.count()
try:
offset = int(vars.get('offset',None) or 0)
limits = (offset,int(vars.get('limit',None) or 1000)+offset)
except ValueError:
Row({'status':400,'error':'invalid limits','response':None})
if count > limits[1]-limits[0]:
Row({'status':400,'error':'too many records','response':None})
try:
response = dbset.select(limitby=limits,orderby=orderby,*fields)
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
return Row({'status':200,'response':response,'pattern':pattern})
return Row({'status':400,'error':'no matching pattern','response':None})
def define_table(
self,
tablename,
*fields,
**args
):
for key in args:
if key not in [
'migrate',
'primarykey',
'fake_migrate',
'format',
'singular',
'plural',
'trigger_name',
'sequence_name',
'common_filter',
'polymodel',
'table_class']:
raise SyntaxError, 'invalid table "%s" attribute: %s' \
% (tablename, key)
if not isinstance(tablename,str):
raise SyntaxError, "missing table name"
tablename = cleanup(tablename)
migrate = self._migrate_enabled and args.get('migrate',
self._migrate)
fake_migrate = self._fake_migrate_all or args.get('fake_migrate',
self._fake_migrate)
table_class = args.get('table_class',Table)
format = args.get('format',None)
trigger_name = args.get('trigger_name', None)
sequence_name = args.get('sequence_name', None)
primarykey =args.get('primarykey',None)
polymodel = args.get('polymodel',None)
singular = args.get('singular',tablename.replace('_',' ').capitalize())
plural = args.get('plural',pluralize(singular.lower()).capitalize())
lowertablename = tablename.lower()
if tablename.startswith('_') or hasattr(self,lowertablename) or \
regex_python_keywords.match(tablename):
raise SyntaxError, 'invalid table name: %s' % tablename
elif lowertablename in self.tables:
raise SyntaxError, 'table already defined: %s' % tablename
elif self.check_reserved:
self.check_reserved_keyword(tablename)
if self._common_fields:
fields = [f for f in fields] + [f for f in self._common_fields]
common_filter = args.get('common_filter', None)
t = self[tablename] = table_class(self, tablename, *fields,
**dict(primarykey=primarykey,
trigger_name=trigger_name,
sequence_name=sequence_name,
common_filter=common_filter))
# db magic
if self._uri in (None,'None'):
return t
t._create_references()
if migrate or self._adapter.dbengine=='google:datastore':
try:
sql_locker.acquire()
self._adapter.create_table(t,migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel)
finally:
sql_locker.release()
else:
t._dbt = None
self.tables.append(tablename)
t._format = format
t._singular = singular
t._plural = plural
t._actual = True
return t
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return dict.__getitem__(self, str(key))
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key[:1]!='_' and key in self:
raise SyntaxError, \
'Object %s exists and cannot be redefined' % key
self[key] = value
def __repr__(self):
return '<DAL ' + dict.__repr__(self) + '>'
def smart_query(self,fields,text):
return Set(self, smart_query(fields,text))
def __call__(self, query=None, ignore_common_filters=None):
if isinstance(query,Table):
query = query._id>0
elif isinstance(query,Field):
query = query!=None
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
def rollback(self):
self._adapter.rollback()
def executesql(self, query, placeholders=None, as_dict=False):
"""
placeholders is optional and will always be None when using DAL.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
Added 2009-12-05 "as_dict" optional argument. Will always be
None when using DAL. If using raw SQL can be set to True
and the results cursor returned by the DB driver will be
converted to a sequence of dictionaries keyed with the db
field names. Tested with SQLite but should work with any database
since the cursor.description used to get field names is part of the
Python dbi 2.0 specs. Results returned with as_dict=True are
the same as those returned when applying .to_list() to a DAL query.
[{field1: value1, field2: value2}, {field1: value1b, field2: value2b}]
--bmeredyk
"""
if placeholders:
self._adapter.execute(query, placeholders)
else:
self._adapter.execute(query)
if as_dict:
if not hasattr(self._adapter.cursor,'description'):
raise RuntimeError, "database does not support executesql(...,as_dict=True)"
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = self._adapter.cursor.description
# reduce the column info down to just the field names
fields = [f[0] for f in columns]
# will hold our finished resultset in a list
data = self._adapter.cursor.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
return [dict(zip(fields,row)) for row in data]
# see if any results returned from database
try:
return self._adapter.cursor.fetchall()
except:
return None
def _update_referenced_by(self, other):
for tablename in self.tables:
by = self[tablename]._referenced_by
by[:] = [item for item in by if not item[0] == other]
def export_to_csv_file(self, ofile, *args, **kwargs):
step = int(kwargs.get('max_fetch_rows,',500))
write_colnames = kwargs['write_colnames'] = \
kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write('TABLE %s\r\n' % table)
query = self[table]._id > 0
nrows = self(query).count()
kwargs['write_colnames'] = write_colnames
for k in range(0,nrows,step):
self(query).select(limitby=(k,k+step)).export_to_csv_file(
ofile, *args, **kwargs)
kwargs['write_colnames'] = False
ofile.write('\r\n\r\n')
ofile.write('END')
def import_from_csv_file(self, ifile, id_map=None, null='<NULL>',
unique='uuid', *args, **kwargs):
if id_map is None: id_map={}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == 'END':
return
elif not line.startswith('TABLE ') or not line[6:] in self.tables:
raise SyntaxError, 'invalid file format'
else:
tablename = line[6:]
self[tablename].import_from_csv_file(ifile, id_map, null,
unique, *args, **kwargs)
class SQLALL(object):
"""
Helper class providing a comma-separated string having all the field names
(prefixed by table name and '.')
normally only called from within gluon.sql
"""
def __init__(self, table):
self.table = table
def __str__(self):
return ', '.join([str(field) for field in self.table])
class Reference(int):
def __allocate(self):
if not self._record:
self._record = self._table[int(self)]
if not self._record:
raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self))
def __getattr__(self, key):
if key == 'id':
return int(self)
self.__allocate()
return self._record.get(key, None)
def get(self, key):
return self.__getattr__(key)
def __setattr__(self, key, value):
if key.startswith('_'):
int.__setattr__(self, key, value)
return
self.__allocate()
self._record[key] = value
def __getitem__(self, key):
if key == 'id':
return int(self)
self.__allocate()
return self._record.get(key, None)
def __setitem__(self,key,value):
self.__allocate()
self._record[key] = value
def Reference_unpickler(data):
return marshal.loads(data)
def Reference_pickler(data):
try:
marshal_dump = marshal.dumps(int(data))
except AttributeError:
marshal_dump = 'i%s' % struct.pack('<i', int(data))
return (Reference_unpickler, (marshal_dump,))
copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler)
class Table(dict):
"""
an instance of this class represents a database table
Example::
db = DAL(...)
db.define_table('users', Field('name'))
db.users.insert(name='me') # print db.users._insert(...) to see SQL
db.users.drop()
"""
def __init__(
self,
db,
tablename,
*fields,
**args
):
"""
Initializes the table and performs checking on the provided fields.
Each table will have automatically an 'id'.
If a field is of type Table, the fields (excluding 'id') from that table
will be used instead.
:raises SyntaxError: when a supplied field is of incorrect type.
"""
self._actual = False # set to True by define_table()
self._tablename = tablename
self._sequence_name = args.get('sequence_name',None) or \
db and db._adapter.sequence_name(tablename)
self._trigger_name = args.get('trigger_name',None) or \
db and db._adapter.trigger_name(tablename)
self._common_filter = args.get('common_filter', None)
self._before_insert = []
self._before_update = [lambda self,fs:self.delete_uploaded_files(fs)]
self._before_delete = [lambda self:self.delete_uploaded_files()]
self._after_insert = []
self._after_update = []
self._after_delete = []
primarykey = args.get('primarykey', None)
fieldnames,newfields=set(),[]
if primarykey:
if not isinstance(primarykey,list):
raise SyntaxError, \
"primarykey must be a list of fields from table '%s'" \
% tablename
self._primarykey = primarykey
elif not [f for f in fields if isinstance(f,Field) and f.type=='id']:
field = Field('id', 'id')
newfields.append(field)
fieldnames.add('id')
self._id = field
for field in fields:
if not isinstance(field, (Field, Table)):
raise SyntaxError, \
'define_table argument is not a Field or Table: %s' % field
elif isinstance(field, Field) and not field.name in fieldnames:
if hasattr(field, '_db'):
field = copy.copy(field)
newfields.append(field)
fieldnames.add(field.name)
if field.type=='id':
self._id = field
elif isinstance(field, Table):
table = field
for field in table:
if not field.name in fieldnames and not field.type=='id':
field = copy.copy(field)
# correct self references
if not table._actual and field.type == 'reference '+table._tablename:
field.type = 'reference '+self._tablename
newfields.append(field)
fieldnames.add(field.name)
else:
# let's ignore new fields with duplicated names!!!
pass
fields = newfields
self._db = db
tablename = tablename
self._fields = SQLCallableList()
self.virtualfields = []
fields = list(fields)
if db and self._db._adapter.uploads_in_blob==True:
for field in fields:
if isinstance(field, Field) and field.type == 'upload'\
and field.uploadfield is True:
tmp = field.uploadfield = '%s_blob' % field.name
fields.append(self._db.Field(tmp, 'blob', default=''))
lower_fieldnames = set()
reserved = dir(Table) + ['fields']
for field in fields:
if db and db.check_reserved:
db.check_reserved_keyword(field.name)
elif field.name in reserved:
raise SyntaxError, "field name %s not allowed" % field.name
if field.name.lower() in lower_fieldnames:
raise SyntaxError, "duplicate field %s in table %s" \
% (field.name, tablename)
else:
lower_fieldnames.add(field.name.lower())
self.fields.append(field.name)
self[field.name] = field
if field.type == 'id':
self['id'] = field
field.tablename = field._tablename = tablename
field.table = field._table = self
field.db = field._db = self._db
if self._db and not field.type in ('text','blob') and \
self._db._adapter.maxcharlength < field.length:
field.length = self._db._adapter.maxcharlength
if field.requires is DEFAULT:
field.requires = sqlhtml_validators(field)
self.ALL = SQLALL(self)
if hasattr(self,'_primarykey'):
for k in self._primarykey:
if k not in self.fields:
raise SyntaxError, \
"primarykey must be a list of fields from table '%s " % tablename
else:
self[k].notnull = True
@property
def fields(self):
return self._fields
def update(self,*args,**kwargs):
raise RuntimeError, "Syntax Not Supported"
def _validate(self,**vars):
errors = Row()
for key,value in vars.items():
value,error = self[key].validate(value)
if error:
errors[key] = error
return errors
def _create_references(self):
pr = self._db._pending_references
self._referenced_by = []
for fieldname in self.fields:
field=self[fieldname]
if isinstance(field.type,str) and field.type[:10] == 'reference ':
ref = field.type[10:].strip()
if not ref.split():
raise SyntaxError, 'Table: reference to nothing: %s' %ref
refs = ref.split('.')
rtablename = refs[0]
if not rtablename in self._db:
pr[rtablename] = pr.get(rtablename,[]) + [field]
continue
rtable = self._db[rtablename]
if len(refs)==2:
rfieldname = refs[1]
if not hasattr(rtable,'_primarykey'):
raise SyntaxError,\
'keyed tables can only reference other keyed tables (for now)'
if rfieldname not in rtable.fields:
raise SyntaxError,\
"invalid field '%s' for referenced table '%s' in table '%s'" \
% (rfieldname, rtablename, self._tablename)
rtable._referenced_by.append((self._tablename, field.name))
for referee in pr.get(self._tablename,[]):
self._referenced_by.append((referee._tablename,referee.name))
def _filter_fields(self, record, id=False):
return dict([(k, v) for (k, v) in record.items() if k
in self.fields and (self[k].type!='id' or id)])
def _build_query(self,key):
""" for keyed table only """
query = None
for k,v in key.iteritems():
if k in self._primarykey:
if query:
query = query & (self[k] == v)
else:
query = (self[k] == v)
else:
raise SyntaxError, \
'Field %s is not part of the primary key of %s' % \
(k,self._tablename)
return query
def __getitem__(self, key):
if not key:
return None
elif isinstance(key, dict):
""" for keyed table """
query = self._build_query(key)
rows = self._db(query).select()
if rows:
return rows[0]
return None
elif str(key).isdigit():
return self._db(self._id == key).select(limitby=(0,1)).first()
elif key:
return dict.__getitem__(self, str(key))
def __call__(self, key=DEFAULT, **kwargs):
for_update = kwargs.get('_for_update',False)
if '_for_update' in kwargs: del kwargs['_for_update']
if not key is DEFAULT:
if isinstance(key, Query):
record = self._db(key).select(
limitby=(0,1),for_update=for_update).first()
elif not str(key).isdigit():
record = None
else:
record = self._db(self._id == key).select(
limitby=(0,1),for_update=for_update).first()
if record:
for k,v in kwargs.items():
if record[k]!=v: return None
return record
elif kwargs:
query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()])
return self._db(query).select(limitby=(0,1),for_update=for_update).first()
else:
return None
def __setitem__(self, key, value):
if isinstance(key, dict) and isinstance(value, dict):
""" option for keyed table """
if set(key.keys()) == set(self._primarykey):
value = self._filter_fields(value)
kv = {}
kv.update(value)
kv.update(key)
if not self.insert(**kv):
query = self._build_query(key)
self._db(query).update(**self._filter_fields(value))
else:
raise SyntaxError,\
'key must have all fields from primary key: %s'%\
(self._primarykey)
elif str(key).isdigit():
if key == 0:
self.insert(**self._filter_fields(value))
elif self._db(self._id == key)\
.update(**self._filter_fields(value)) is None:
raise SyntaxError, 'No such record: %s' % key
else:
if isinstance(key, dict):
raise SyntaxError,\
'value must be a dictionary: %s' % value
dict.__setitem__(self, str(key), value)
def __delitem__(self, key):
if isinstance(key, dict):
query = self._build_query(key)
if not self._db(query).delete():
raise SyntaxError, 'No such record: %s' % key
elif not str(key).isdigit() or not self._db(self._id == key).delete():
raise SyntaxError, 'No such record: %s' % key
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key[:1]!='_' and key in self:
raise SyntaxError, 'Object exists and cannot be redefined: %s' % key
self[key] = value
def __iter__(self):
for fieldname in self.fields:
yield self[fieldname]
def __repr__(self):
return '<Table ' + dict.__repr__(self) + '>'
def __str__(self):
if self.get('_ot', None):
return '%s AS %s' % (self._ot, self._tablename)
return self._tablename
def _drop(self, mode = ''):
return self._db._adapter._drop(self, mode)
def drop(self, mode = ''):
return self._db._adapter.drop(self,mode)
def _listify(self,fields,update=False):
new_fields = []
new_fields_names = []
for name in fields:
if not name in self.fields:
if name != 'id':
raise SyntaxError, 'Field %s does not belong to the table' % name
else:
new_fields.append((self[name],fields[name]))
new_fields_names.append(name)
for ofield in self:
if not ofield.name in new_fields_names:
if not update and not ofield.default is None:
new_fields.append((ofield,ofield.default))
elif update and not ofield.update is None:
new_fields.append((ofield,ofield.update))
for ofield in self:
if not ofield.name in new_fields_names and ofield.compute:
try:
new_fields.append((ofield,ofield.compute(Row(fields))))
except KeyError:
pass
if not update and ofield.required and not ofield.name in new_fields_names:
raise SyntaxError,'Table: missing required field: %s' % ofield.name
return new_fields
def _insert(self, **fields):
return self._db._adapter._insert(self,self._listify(fields))
def insert(self, **fields):
if any(f(fields) for f in self._before_insert): return 0
ret = self._db._adapter.insert(self,self._listify(fields))
ret and [f(fields) for f in self._after_insert]
return ret
def validate_and_insert(self,**fields):
response = Row()
response.errors = Row()
new_fields = copy.copy(fields)
for key,value in fields.items():
value,error = self[key].validate(value)
if error:
response.errors[key] = error
else:
new_fields[key] = value
if not response.errors:
response.id = self.insert(**new_fields)
else:
response.id = None
return response
def update_or_insert(self, _key=DEFAULT, **values):
if _key is DEFAULT:
record = self(**values)
else:
record = self(_key)
if record:
record.update_record(**values)
newid = None
else:
newid = self.insert(**values)
return newid
def bulk_insert(self, items):
"""
here items is a list of dictionaries
"""
items = [self._listify(item) for item in items]
if any(f(item) for item in items for f in self._before_insert):return 0
ret = self._db._adapter.bulk_insert(self,items)
ret and [[f(item) for item in items] for f in self._after_insert]
return ret
def _truncate(self, mode = None):
return self._db._adapter._truncate(self, mode)
def truncate(self, mode = None):
return self._db._adapter.truncate(self, mode)
def import_from_csv_file(
self,
csvfile,
id_map=None,
null='<NULL>',
unique='uuid',
*args, **kwargs
):
"""
import records from csv file. Column headers must have same names as
table fields. field 'id' is ignored. If column names read 'table.file'
the 'table.' prefix is ignored.
'unique' argument is a field which must be unique
(typically a uuid field)
"""
delimiter = kwargs.get('delimiter', ',')
quotechar = kwargs.get('quotechar', '"')
quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting)
colnames = None
if isinstance(id_map, dict):
if not self._tablename in id_map:
id_map[self._tablename] = {}
id_map_self = id_map[self._tablename]
def fix(field, value, id_map):
list_reference_s='list:reference'
if value == null:
value = None
elif field.type=='blob':
value = base64.b64decode(value)
elif field.type=='double':
if not value.strip():
value = None
else:
value = float(value)
elif field.type=='integer':
if not value.strip():
value = None
else:
value = int(value)
elif field.type.startswith('list:string'):
value = bar_decode_string(value)
elif field.type.startswith(list_reference_s):
ref_table = field.type[len(list_reference_s):].strip()
value = [id_map[ref_table][int(v)] \
for v in bar_decode_string(value)]
elif field.type.startswith('list:'):
value = bar_decode_integer(value)
elif id_map and field.type.startswith('reference'):
try:
value = id_map[field.type[9:].strip()][int(value)]
except KeyError:
pass
return (field.name, value)
def is_id(colname):
if colname in self:
return self[colname].type == 'id'
else:
return False
for line in reader:
if not line:
break
if not colnames:
colnames = [x.split('.',1)[-1] for x in line][:len(line)]
cols, cid = [], []
for i,colname in enumerate(colnames):
if is_id(colname):
cid = i
else:
cols.append(i)
if colname == unique:
unique_idx = i
else:
items = [fix(self[colnames[i]], line[i], id_map) \
for i in cols if colnames[i] in self.fields]
# Validation. Check for duplicate of 'unique' &,
# if present, update instead of insert.
if not unique or unique not in colnames:
new_id = self.insert(**dict(items))
else:
unique_value = line[unique_idx]
query = self._db[self][unique] == unique_value
record = self._db(query).select().first()
if record:
record.update_record(**dict(items))
new_id = record[self._id.name]
else:
new_id = self.insert(**dict(items))
if id_map and cid != []:
id_map_self[int(line[cid])] = new_id
def with_alias(self, alias):
return self._db._adapter.alias(self,alias)
def on(self, query):
return Expression(self._db,self._db._adapter.ON,self,query)
class Expression(object):
def __init__(
self,
db,
op,
first=None,
second=None,
type=None,
):
self.db = db
self.op = op
self.first = first
self.second = second
### self._tablename = first._tablename ## CHECK
if not type and first and hasattr(first,'type'):
self.type = first.type
else:
self.type = type
def sum(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type)
def max(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type)
def min(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type)
def len(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
def lower(self):
return Expression(self.db, self.db._adapter.LOWER, self, None, self.type)
def upper(self):
return Expression(self.db, self.db._adapter.UPPER, self, None, self.type)
def year(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer')
def month(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer')
def day(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer')
def hour(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer')
def minutes(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer')
def coalesce(self,*others):
return Expression(self.db, self.db._adapter.COALESCE, self, others, self.type)
def coalesce_zero(self):
return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type)
def seconds(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer')
def __getslice__(self, start, stop):
if start < 0:
pos0 = '(%s - %d)' % (self.len(), abs(start) - 1)
else:
pos0 = start + 1
if stop < 0:
length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0)
elif stop == sys.maxint:
length = self.len()
else:
length = '(%s - %s)' % (stop + 1, pos0)
return Expression(self.db,self.db._adapter.SUBSTRING,
self, (pos0, length), self.type)
def __getitem__(self, i):
return self[i:i + 1]
def __str__(self):
return self.db._adapter.expand(self,self.type)
def __or__(self, other): # for use in sortby
return Expression(self.db,self.db._adapter.COMMA,self,other,self.type)
def __invert__(self):
if hasattr(self,'_op') and self.op == self.db._adapter.INVERT:
return self.first
return Expression(self.db,self.db._adapter.INVERT,self,type=self.type)
def __add__(self, other):
return Expression(self.db,self.db._adapter.ADD,self,other,self.type)
def __sub__(self, other):
if self.type == 'integer':
result_type = 'integer'
elif self.type in ['date','time','datetime','double']:
result_type = 'double'
else:
raise SyntaxError, "subtraction operation not supported for type"
return Expression(self.db,self.db._adapter.SUB,self,other,
result_type)
def __mul__(self, other):
return Expression(self.db,self.db._adapter.MUL,self,other,self.type)
def __div__(self, other):
return Expression(self.db,self.db._adapter.DIV,self,other,self.type)
def __mod__(self, other):
return Expression(self.db,self.db._adapter.MOD,self,other,self.type)
def __eq__(self, value):
return Query(self.db, self.db._adapter.EQ, self, value)
def __ne__(self, value):
return Query(self.db, self.db._adapter.NE, self, value)
def __lt__(self, value):
return Query(self.db, self.db._adapter.LT, self, value)
def __le__(self, value):
return Query(self.db, self.db._adapter.LE, self, value)
def __gt__(self, value):
return Query(self.db, self.db._adapter.GT, self, value)
def __ge__(self, value):
return Query(self.db, self.db._adapter.GE, self, value)
def like(self, value, case_sensitive=False):
op = case_sensitive and self.db._adapter.LIKE or self.db._adapter.ILIKE
return Query(self.db, op, self, value)
def regexp(self, value):
return Query(self.db, self.db._adapter.REGEXP, self, value)
def belongs(self, value):
if isinstance(value,Query):
value = self.db(value)._select(value.first._table._id)
return Query(self.db, self.db._adapter.BELONGS, self, value)
def startswith(self, value):
if not self.type in ('string', 'text'):
raise SyntaxError, "startswith used with incompatible field type"
return Query(self.db, self.db._adapter.STARTSWITH, self, value)
def endswith(self, value):
if not self.type in ('string', 'text'):
raise SyntaxError, "endswith used with incompatible field type"
return Query(self.db, self.db._adapter.ENDSWITH, self, value)
def contains(self, value, all=False):
if isinstance(value,(list, tuple)):
subqueries = [self.contains(str(v).strip()) for v in value if str(v).strip()]
return reduce(all and AND or OR, subqueries)
if not self.type in ('string', 'text') and not self.type.startswith('list:'):
raise SyntaxError, "contains used with incompatible field type"
return Query(self.db, self.db._adapter.CONTAINS, self, value)
def with_alias(self, alias):
return Expression(self.db, self.db._adapter.AS, self, alias, self.type)
# GIS functions
def st_asgeojson(self, precision=15, options=0, version=1):
return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self,
dict(precision=precision, options=options, version=version), 'dict')
def st_astext(self):
return Expression(self.db, self.db._adapter.ST_ASTEXT, self)
def st_contained(self, value):
return Query(self.db, self.db._adapter.ST_CONTAINS, value, self)
def st_contains(self, value):
return Query(self.db, self.db._adapter.ST_CONTAINS, self, value)
def st_distance(self, other):
return Expression(self.db,self.db._adapter.ST_DISTANCE,self,other,self.type)
def st_equals(self, value):
return Query(self.db, self.db._adapter.ST_EQUALS, self, value)
def st_intersects(self, value):
return Query(self.db, self.db._adapter.ST_INTERSECTS, self, value)
def st_overlaps(self, value):
return Query(self.db, self.db._adapter.ST_OVERLAPS, self, value)
def st_simplify(self, value):
return Expression(self.db, self.db._adapter.ST_SIMPLIFY, self, value)
def st_touches(self, value):
return Query(self.db, self.db._adapter.ST_TOUCHES, self, value)
def st_within(self, value):
return Query(self.db, self.db._adapter.ST_WITHIN, self, value)
# for use in both Query and sortby
class SQLCustomType(object):
"""
allows defining of custom SQL types
Example::
decimal = SQLCustomType(
type ='double',
native ='integer',
encoder =(lambda x: int(float(x) * 100)),
decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
)
db.define_table(
'example',
Field('value', type=decimal)
)
:param type: the web2py type (default = 'string')
:param native: the backend type
:param encoder: how to encode the value to store it in the backend
:param decoder: how to decode the value retrieved from the backend
:param validator: what validators to use ( default = None, will use the
default validator for type)
"""
def __init__(
self,
type='string',
native=None,
encoder=None,
decoder=None,
validator=None,
_class=None,
):
self.type = type
self.native = native
self.encoder = encoder or (lambda x: x)
self.decoder = decoder or (lambda x: x)
self.validator = validator
self._class = _class or type
def startswith(self, text=None):
try:
return self.type.startswith(self, text)
except TypeError:
return False
def __getslice__(self, a=0, b=100):
return None
def __getitem__(self, i):
return None
def __str__(self):
return self._class
class FieldVirtual(object):
def __init__(self, f):
self.f = f
class FieldLazy(object):
def __init__(self, f, handler=None):
self.f = f
self.handler = handler
class Field(Expression):
Virtual = FieldVirtual
Lazy = FieldLazy
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, default=None, required=False,
requires=IS_NOT_EMPTY(), ondelete='CASCADE',
notnull=False, unique=False,
uploadfield=True, widget=None, label=None, comment=None,
uploadfield=True, # True means store on disk,
# 'a_field_name' means store in this field in db
# False means file content will be discarded.
writable=True, readable=True, update=None, authorize=None,
autodelete=False, represent=None, uploadfolder=None,
uploadseparate=False # upload to separate directories by uuid_keys
# first 2 character and tablename.fieldname
# False - old behavior
# True - put uploaded file in
# <uploaddir>/<tablename>.<fieldname>/uuid_key[:2]
# directory)
to be used as argument of DAL.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql)
fields should have a default or they will be required in SQLFORMs
the requires argument is used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=DEFAULT,
required=False,
requires=DEFAULT,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=DEFAULT,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
uploadseparate=False,
compute=None,
custom_store=None,
custom_retrieve=None,
custom_delete=None,
):
self.db = None
self.op = None
self.first = None
self.second = None
if not isinstance(fieldname,str):
raise SyntaxError, "missing field name"
self.name = fieldname = cleanup(fieldname)
if hasattr(Table,fieldname) or fieldname[0] == '_' or \
regex_python_keywords.match(fieldname):
raise SyntaxError, 'Field: invalid field name: %s' % fieldname
if isinstance(type, Table):
type = 'reference ' + type._tablename
self.type = type # 'string', 'integer'
self.length = (length is None) and DEFAULTLENGTH.get(type,512) or length
if default is DEFAULT:
self.default = update or None
else:
self.default = default
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
self.uploadfolder = uploadfolder
self.uploadseparate = uploadseparate
self.widget = widget
if label is DEFAULT:
self.label = fieldname.replace('_', ' ').title()
else:
self.label = label or ''
self.comment = comment
self.writable = writable
self.readable = readable
self.update = update
self.authorize = authorize
self.autodelete = autodelete
if not represent and type in ('list:integer','list:string'):
represent=lambda x,r=None: ', '.join(str(y) for y in x or [])
self.represent = represent
self.compute = compute
self.isattachment = True
self.custom_store = custom_store
self.custom_retrieve = custom_retrieve
self.custom_delete = custom_delete
if self.label is None:
self.label = fieldname.replace('_',' ').title()
if requires is None:
self.requires = []
else:
self.requires = requires
def store(self, file, filename=None, path=None):
if self.custom_store:
return self.custom_store(file,filename,path)
if isinstance(file, cgi.FieldStorage):
file = file.file
filename = filename or file.filename
elif not filename:
filename = file.name
filename = os.path.basename(filename.replace('/', os.sep)\
.replace('\\', os.sep))
m = re.compile('\.(?P<e>\w{1,5})$').search(filename)
extension = m and m.group('e') or 'txt'
uuid_key = web2py_uuid().replace('-', '')[-16:]
encoded_filename = base64.b16encode(filename).lower()
newfilename = '%s.%s.%s.%s' % \
(self._tablename, self.name, uuid_key, encoded_filename)
newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension
if isinstance(self.uploadfield,Field):
blob_uploadfield_name = self.uploadfield.uploadfield
keys={self.uploadfield.name: newfilename,
blob_uploadfield_name: file.read()}
self.uploadfield.table.insert(**keys)
elif self.uploadfield == True:
if path:
pass
elif self.uploadfolder:
path = self.uploadfolder
elif self.db._adapter.folder:
path = os.path.join(self.db._adapter.folder, '..', 'uploads')
else:
raise RuntimeError, "you must specify a Field(...,uploadfolder=...)"
if self.uploadseparate:
path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2])
if not os.path.exists(path):
os.makedirs(path)
pathfilename = os.path.join(path, newfilename)
dest_file = open(pathfilename, 'wb')
try:
shutil.copyfileobj(file, dest_file)
except IOError:
raise IOError, 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename
dest_file.close()
return newfilename
def retrieve(self, name, path=None):
if self.custom_retrieve:
return self.custom_retrieve(name, path)
import http
if self.authorize or isinstance(self.uploadfield, str):
row = self.db(self == name).select().first()
if not row:
raise http.HTTP(404)
if self.authorize and not self.authorize(row):
raise http.HTTP(403)
try:
m = regex_content.match(name)
if not m or not self.isattachment:
raise TypeError, 'Can\'t retrieve %s' % name
filename = base64.b16decode(m.group('name'), True)
filename = regex_cleanup_fn.sub('_', filename)
except (TypeError, AttributeError):
filename = name
if isinstance(self.uploadfield, str): # ## if file is in DB
return (filename, cStringIO.StringIO(row[self.uploadfield] or ''))
elif isinstance(self.uploadfield,Field):
blob_uploadfield_name = self.uploadfield.uploadfield
query = self.uploadfield == name
data = self.uploadfield.table(query)[blob_uploadfield_name]
return (filename, cStringIO.StringIO(data))
else:
# ## if file is on filesystem
if path:
pass
elif self.uploadfolder:
path = self.uploadfolder
else:
path = os.path.join(self.db._adapter.folder, '..', 'uploads')
if self.uploadseparate:
t = m.group('table')
f = m.group('field')
u = m.group('uuidkey')
path = os.path.join(path,"%s.%s" % (t,f),u[:2])
return (filename, open(os.path.join(path, name), 'rb'))
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
elif isinstance(self.requires, tuple):
requires = list(self.requires)
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def validate(self, value):
if not self.requires:
return (value, None)
requires = self.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
for validator in requires:
(value, error) = validator(value)
if error:
return (value, error)
return (value, None)
def count(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer')
def __nonzero__(self):
return True
def __str__(self):
try:
return '%s.%s' % (self.tablename, self.name)
except:
return '<no table>.%s' % self.name
def raw(s): return Expression(None,s)
class Query(object):
"""
a query object necessary to define a set.
it can be stored or can be passed to DAL.__call__() to obtain a Set
Example::
query = db.users.name=='Max'
set = db(query)
records = set.select()
"""
def __init__(
self,
db,
op,
first=None,
second=None,
ignore_common_filters = False,
):
self.db = self._db = db
self.op = op
self.first = first
self.second = second
self.ignore_common_filters = ignore_common_filters
def __str__(self):
return self.db._adapter.expand(self)
def __and__(self, other):
return Query(self.db,self.db._adapter.AND,self,other)
def __or__(self, other):
return Query(self.db,self.db._adapter.OR,self,other)
def __invert__(self):
if self.op==self.db._adapter.NOT:
return self.first
return Query(self.db,self.db._adapter.NOT,self)
regex_quotes = re.compile("'[^']*'")
def xorify(orderby):
if not orderby:
return None
orderby2 = orderby[0]
for item in orderby[1:]:
orderby2 = orderby2 | item
return orderby2
def use_common_filters(query):
return (query and hasattr(query,'ignore_common_filters') and \
not query.ignore_common_filters)
class Set(object):
"""
a Set represents a set of records in the database,
the records are identified by the query=Query(...) object.
normally the Set is generated by DAL.__call__(Query(...))
given a set, for example
set = db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10))
and take subsets:
subset = set(db.users.id<5)
"""
def __init__(self, db, query, ignore_common_filters = None):
self.db = db
self._db = db # for backward compatibility
if not ignore_common_filters is None and \
use_common_filters(query) == ignore_common_filters:
query = copy.copy(query)
query.ignore_common_filters = ignore_common_filters
self.query = query
def __call__(self, query, ignore_common_filters=False):
if isinstance(query,Table):
query = query._id>0
elif isinstance(query,str):
query = raw(query)
elif isinstance(query,Field):
query = query!=None
if self.query:
return Set(self.db, self.query & query,
ignore_common_filters = ignore_common_filters)
else:
return Set(self.db, query,
ignore_common_filters = ignore_common_filters)
def _count(self,distinct=None):
return self.db._adapter._count(self.query,distinct)
def _select(self, *fields, **attributes):
adapter = self.db._adapter
fields = adapter.expand_all(fields, adapter.tables(self.query))
return adapter._select(self.query,fields,attributes)
def _delete(self):
tablename=self.db._adapter.get_table(self.query)
return self.db._adapter._delete(tablename,self.query)
def _update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
fields = self.db[tablename]._listify(update_fields,update=True)
return self.db._adapter._update(tablename,self.query,fields)
def isempty(self):
return not self.select(limitby=(0,1))
def count(self,distinct=None):
return self.db._adapter.count(self.query,distinct)
def select(self, *fields, **attributes):
adapter = self.db._adapter
fields = adapter.expand_all(fields, adapter.tables(self.query))
return adapter.select(self.query,fields,attributes)
def delete(self):
tablename=self.db._adapter.get_table(self.query)
table = self.db[tablename]
if any(f(self) for f in table._before_delete): return 0
ret = self.db._adapter.delete(tablename,self.query)
ret and [f(self) for f in table._after_delete]
return ret
def update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
table = self.db[tablename]
if any(f(self,update_fields) for f in table._before_update): return 0
fields = table._listify(update_fields,update=True)
if not fields: raise SyntaxError, "No fields to update"
ret = self.db._adapter.update(tablename,self.query,fields)
ret and [f(self,update_fields) for f in table._after_update]
return ret
def update_naive(self, **update_fields):
"""
same as update but does not call table._before_update and _after_update
"""
tablename = self.db._adapter.get_table(self.query)
table = self.db[tablename]
fields = table._listify(update_fields,update=True)
if not fields: raise SyntaxError, "No fields to update"
ret = self.db._adapter.update(tablename,self.query,fields)
return ret
def validate_and_update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
response = Row()
response.errors = Row()
new_fields = copy.copy(update_fields)
for key,value in update_fields.items():
value,error = self.db[tablename][key].validate(value)
if error:
response.errors[key] = error
else:
new_fields[key] = value
table = self.db[tablename]
if response.errors:
response.updated = None
else:
if not any(f(self,new_fields) for f in table._before_update):
fields = table._listify(new_fields,update=True)
if not fields: raise SyntaxError, "No fields to update"
ret = self.db._adapter.update(tablename,self.query,fields)
ret and [f(self,new_fields) for f in table._after_update]
else:
ret = 0
response.update = ret
return response
def delete_uploaded_files(self, upload_fields=None):
table = self.db[self.db._adapter.tables(self.query)[0]]
# ## mind uploadfield==True means file is not in DB
if upload_fields:
fields = upload_fields.keys()
else:
fields = table.fields
fields = [f for f in fields if table[f].type == 'upload'
and table[f].uploadfield == True
and table[f].autodelete]
if not fields:
return False
for record in self.select(*[table[f] for f in fields]):
for fieldname in fields:
field = table[fieldname]
oldname = record.get(fieldname, None)
if not oldname:
continue
if upload_fields and oldname == upload_fields[fieldname]:
continue
if field.custom_delete:
field.custom_delete(oldname)
else:
uploadfolder = field.uploadfolder
if not uploadfolder:
uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads')
if field.uploadseparate:
items = oldname.split('.')
uploadfolder = os.path.join(uploadfolder,
"%s.%s" % (items[0], items[1]),
items[2][:2])
oldpath = os.path.join(uploadfolder, oldname)
if os.path.exists(oldpath):
os.unlink(oldpath)
return False
def update_record(pack, a=None):
(colset, table, id) = pack
b = a or dict(colset)
c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id'])
table._db(table._id==id).update(**c)
for (k, v) in c.items():
colset[k] = v
class VirtualCommand(object):
def __init__(self,method,row):
self.method=method
self.row=row
def __call__(self,*args,**kwargs):
return self.method(self.row,*args,**kwargs)
def lazy_virtualfield(f):
f.__lazy__ = True
return f
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## TODO: this class still needs some work to care for ID/OID
def __init__(
self,
db=None,
records=[],
colnames=[],
compact=True,
rawrows=None
):
self.db = db
self.records = records
self.colnames = colnames
self.compact = compact
self.response = rawrows
def setvirtualfields(self,**keyed_virtualfields):
"""
db.define_table('x',Field('number','integer'))
if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)]
from gluon.dal import lazy_virtualfield
class MyVirtualFields(object):
# normal virtual field (backward compatible, discouraged)
def normal_shift(self): return self.x.number+1
# lazy virtual field (because of @staticmethod)
@lazy_virtualfield
def lazy_shift(instance,row,delta=4): return row.x.number+delta
db.x.virtualfields.append(MyVirtualFields())
for row in db(db.x).select():
print row.number, row.normal_shift, row.lazy_shift(delta=7)
"""
if not keyed_virtualfields:
return self
for row in self.records:
for (tablename,virtualfields) in keyed_virtualfields.items():
attributes = dir(virtualfields)
if not tablename in row:
box = row[tablename] = Row()
else:
box = row[tablename]
updated = False
for attribute in attributes:
if attribute[0] != '_':
method = getattr(virtualfields,attribute)
if hasattr(method,'__lazy__'):
box[attribute]=VirtualCommand(method,row)
elif type(method)==types.MethodType:
if not updated:
virtualfields.__dict__.update(row)
updated = True
box[attribute]=method()
return self
def __and__(self,other):
if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects'
records = self.records+other.records
return Rows(self.db,records,self.colnames)
def __or__(self,other):
if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects'
records = self.records
records += [record for record in other.records \
if not record in records]
return Rows(self.db,records,self.colnames)
def __nonzero__(self):
if len(self.records):
return 1
return 0
def __len__(self):
return len(self.records)
def __getslice__(self, a, b):
return Rows(self.db,self.records[a:b],self.colnames)
def __getitem__(self, i):
row = self.records[i]
keys = row.keys()
if self.compact and len(keys) == 1 and keys[0] != '_extra':
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
self.export_to_csv_file(s)
return s.getvalue()
def first(self):
if not self.records:
return None
return self[0]
def last(self):
if not self.records:
return None
return self[-1]
def find(self,f):
"""
returns a new Rows object, a subset of the original object,
filtered by the function f
"""
if not self.records:
return Rows(self.db, [], self.colnames)
records = []
for i in range(0,len(self)):
row = self[i]
if f(row):
records.append(self.records[i])
return Rows(self.db, records, self.colnames)
def exclude(self, f):
"""
removes elements from the calling Rows object, filtered by the function f,
and returns a new Rows object containing the removed elements
"""
if not self.records:
return Rows(self.db, [], self.colnames)
removed = []
i=0
while i<len(self):
row = self[i]
if f(row):
removed.append(self.records[i])
del self.records[i]
else:
i += 1
return Rows(self.db, removed, self.colnames)
def sort(self, f, reverse=False):
"""
returns a list of sorted elements (not sorted in place)
"""
return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)
def group_by_value(self, field):
"""
regroups the rows, by one of the fields
"""
if not self.records:
return {}
key = str(field)
grouped_row_group = dict()
for row in self:
value = row[key]
if not value in grouped_row_group:
grouped_row_group[value] = [row]
else:
grouped_row_group[value].append(row)
return grouped_row_group
def as_list(self,
compact=True,
storage_to_dict=True,
datetime_to_str=True):
"""
returns the data as a list or dictionary.
:param storage_to_dict: when True returns a dict, otherwise a list(default True)
:param datetime_to_str: convert datetime fields as strings (default True)
"""
(oc, self.compact) = (self.compact, compact)
if storage_to_dict:
items = [item.as_dict(datetime_to_str) for item in self]
else:
items = [item for item in self]
self.compact = compact
return items
def as_dict(self,
key='id',
compact=True,
storage_to_dict=True,
datetime_to_str=True):
"""
returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False)
:param key: the name of the field to be used as dict key, normally the id
:param compact: ? (default True)
:param storage_to_dict: when True returns a dict, otherwise a list(default True)
:param datetime_to_str: convert datetime fields as strings (default True)
"""
rows = self.as_list(compact, storage_to_dict, datetime_to_str)
if isinstance(key,str) and key.count('.')==1:
(table, field) = key.split('.')
return dict([(r[table][field],r) for r in rows])
elif isinstance(key,str):
return dict([(r[key],r) for r in rows])
else:
return dict([(key(r),r) for r in rows])
def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
"""
export data to csv, the first line contains the column names
:param ofile: where the csv must be exported to
:param null: how null values must be represented (default '<NULL>')
:param delimiter: delimiter to separate values (default ',')
:param quotechar: character to use to quote string values (default '"')
:param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL)
:param represent: use the fields .represent value (default False)
:param colnames: list of column names to use (default self.colnames)
This will only work when exporting rows objects!!!!
DO NOT use this with db.export_to_csv()
"""
delimiter = kwargs.get('delimiter', ',')
quotechar = kwargs.get('quotechar', '"')
quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
represent = kwargs.get('represent', False)
writer = csv.writer(ofile, delimiter=delimiter,
quotechar=quotechar, quoting=quoting)
colnames = kwargs.get('colnames', self.colnames)
write_colnames = kwargs.get('write_colnames',True)
# a proper csv starting with the column names
if write_colnames:
writer.writerow(colnames)
def none_exception(value):
"""
returns a cleaned up value that can be used for csv export:
- unicode text is encoded as such
- None values are replaced with the given representation (default <NULL>)
"""
if value is None:
return null
elif isinstance(value, unicode):
return value.encode('utf8')
elif isinstance(value,Reference):
return int(value)
elif hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
elif isinstance(value, (list,tuple)): # for type='list:..'
return bar_encode(value)
return value
for record in self:
row = []
for col in colnames:
if not regex_table_field.match(col):
row.append(record._extra[col])
else:
(t, f) = col.split('.')
field = self.db[t][f]
if isinstance(record.get(t, None), (Row,dict)):
value = record[t][f]
else:
value = record[f]
if field.type=='blob' and not value is None:
value = base64.b64encode(value)
elif represent and field.represent:
value = field.represent(value)
row.append(none_exception(value))
writer.writerow(row)
def xml(self):
"""
serializes the table using sqlhtml.SQLTABLE (if present)
"""
import sqlhtml
return sqlhtml.SQLTABLE(self).xml()
def json(self, mode='object', default=None):
"""
serializes the table to a JSON list of objects
"""
mode = mode.lower()
if not mode in ['object', 'array']:
raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode
def inner_loop(record, col):
(t, f) = col.split('.')
res = None
if not regex_table_field.match(col):
key = col
res = record._extra[col]
else:
key = f
if isinstance(record.get(t, None), Row):
res = record[t][f]
else:
res = record[f]
if mode == 'object':
return (key, res)
else:
return res
if mode == 'object':
items = [dict([inner_loop(record, col) for col in
self.colnames]) for record in self]
else:
items = [[inner_loop(record, col) for col in self.colnames]
for record in self]
if have_serializers:
return serializers.json(items,default=default or serializers.custom_json)
else:
import simplejson
return simplejson.dumps(items)
def Rows_unpickler(data):
return cPickle.loads(data)
def Rows_pickler(data):
return Rows_unpickler, \
(cPickle.dumps(data.as_list(storage_to_dict=False,
datetime_to_str=False)),)
copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler)
################################################################################
# dummy function used to define some doctests
################################################################################
def test_all():
"""
>>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\")
>>> if len(sys.argv)>1: db = DAL(sys.argv[1])
>>> tmp = db.define_table('users',\
Field('stringf', 'string', length=32, required=True),\
Field('booleanf', 'boolean', default=False),\
Field('passwordf', 'password', notnull=True),\
Field('uploadf', 'upload'),\
Field('blobf', 'blob'),\
Field('integerf', 'integer', unique=True),\
Field('doublef', 'double', unique=True,notnull=True),\
Field('datef', 'date', default=datetime.date.today()),\
Field('timef', 'time'),\
Field('datetimef', 'datetime'),\
migrate='test_user.table')
Insert a field
>>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\
uploadf=None, integerf=5, doublef=3.14,\
datef=datetime.date(2001, 1, 1),\
timef=datetime.time(12, 30, 15),\
datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15))
1
Drop the table
>>> db.users.drop()
Examples of insert, select, update, delete
>>> tmp = db.define_table('person',\
Field('name'),\
Field('birth','date'),\
migrate='test_person.table')
>>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21')
commented len(db().select(db.person.ALL))
commented 2
>>> me = db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.name=='Massimo').update(name='massimo') # test update
1
>>> db(db.person.name=='Marco').select().first().delete_record() # test delete
1
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
Examples of complex search conditions
>>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select())
1
>>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select())
1
>>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select())
1
>>> me = db(db.person.id==person_id).select(db.person.name)[0]
>>> me.name
'Max'
Examples of search conditions using extract from date/datetime/time
>>> len(db(db.person.birth.month()==12).select())
1
>>> len(db(db.person.birth.year()>1900).select())
1
Example of usage of NULL
>>> len(db(db.person.birth==None).select()) ### test NULL
0
>>> len(db(db.person.birth!=None).select()) ### test NULL
1
Examples of search conditions using lower, upper, and like
>>> len(db(db.person.name.upper()=='MAX').select())
1
>>> len(db(db.person.name.like('%ax')).select())
1
>>> len(db(db.person.name.upper().like('%AX')).select())
1
>>> len(db(~db.person.name.upper().like('%AX')).select())
0
orderby, groupby and limitby
>>> people = db().select(db.person.name, orderby=db.person.name)
>>> order = db.person.name|~db.person.birth
>>> people = db().select(db.person.name, orderby=order)
>>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name)
>>> people = db().select(db.person.name, orderby=order, limitby=(0,100))
Example of one 2 many relation
>>> tmp = db.define_table('dog',\
Field('name'),\
Field('birth','date'),\
Field('owner',db.person),\
migrate='test_dog.table')
>>> db.dog.insert(name='Snoopy', birth=None, owner=person_id)
1
A simple JOIN
>>> len(db(db.dog.owner==db.person.id).select())
1
>>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id)))
1
Drop tables
>>> db.dog.drop()
>>> db.person.drop()
Example of many 2 many relation and Set
>>> tmp = db.define_table('author', Field('name'),\
migrate='test_author.table')
>>> tmp = db.define_table('paper', Field('title'),\
migrate='test_paper.table')
>>> tmp = db.define_table('authorship',\
Field('author_id', db.author),\
Field('paper_id', db.paper),\
migrate='test_authorship.table')
>>> aid = db.author.insert(name='Massimo')
>>> pid = db.paper.insert(title='QCD')
>>> tmp = db.authorship.insert(author_id=aid, paper_id=pid)
Define a Set
>>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id))
>>> rows = authored_papers.select(db.author.name, db.paper.title)
>>> for row in rows: print row.author.name, row.paper.title
Massimo QCD
Example of search condition using belongs
>>> set = (1, 2, 3)
>>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL)
>>> print rows[0].title
QCD
Example of search condition using nested select
>>> nested_select = db()._select(db.authorship.paper_id)
>>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL)
>>> print rows[0].title
QCD
Example of expressions
>>> mynumber = db.define_table('mynumber', Field('x', 'integer'))
>>> db(mynumber.id>0).delete()
0
>>> for i in range(10): tmp = mynumber.insert(x=i)
>>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum())
45
>>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2)
5
Output in csv
>>> print str(authored_papers.select(db.author.name, db.paper.title)).strip()
author.name,paper.title\r
Massimo,QCD
Delete all leftover tables
>>> DAL.distributed_transaction_commit(db)
>>> db.mynumber.drop()
>>> db.authorship.drop()
>>> db.author.drop()
>>> db.paper.drop()
"""
################################################################################
# deprecated since the new DAL; here only for backward compatibility
################################################################################
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = Row
SQLDB = DAL
GQLDB = DAL
DAL.Field = Field # was necessary in gluon/globals.py session.connect
DAL.Table = Table # was necessary in gluon/globals.py session.connect
################################################################################
# run tests
################################################################################
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __builtin__
import os
import re
import sys
import threading
# Install the new import function:
def custom_import_install(web2py_path):
global _web2py_importer
global _web2py_path
if isinstance(__builtin__.__import__, _Web2pyImporter):
return #aready installed
_web2py_path = web2py_path
_web2py_importer = _Web2pyImporter(web2py_path)
__builtin__.__import__ = _web2py_importer
def is_tracking_changes():
"""
@return: True: neo_importer is tracking changes made to Python source
files. False: neo_import does not reload Python modules.
"""
global _is_tracking_changes
return _is_tracking_changes
def track_changes(track=True):
"""
Tell neo_importer to start/stop tracking changes made to Python modules.
@param track: True: Start tracking changes. False: Stop tracking changes.
"""
global _is_tracking_changes
global _web2py_importer
global _web2py_date_tracker_importer
assert track is True or track is False, "Boolean expected."
if track == _is_tracking_changes:
return
if track:
if not _web2py_date_tracker_importer:
_web2py_date_tracker_importer = \
_Web2pyDateTrackerImporter(_web2py_path)
__builtin__.__import__ = _web2py_date_tracker_importer
else:
__builtin__.__import__ = _web2py_importer
_is_tracking_changes = track
_STANDARD_PYTHON_IMPORTER = __builtin__.__import__ # Keep standard importer
_web2py_importer = None # The standard web2py importer
_web2py_date_tracker_importer = None # The web2py importer with date tracking
_web2py_path = None # Absolute path of the web2py directory
_is_tracking_changes = False # The tracking mode
class _BaseImporter(object):
"""
The base importer. Dispatch the import the call to the standard Python
importer.
"""
def begin(self):
"""
Many imports can be made for a single import statement. This method
help the management of this aspect.
"""
def __call__(self, name, globals=None, locals=None,
fromlist=None, level=-1):
"""
The import method itself.
"""
return _STANDARD_PYTHON_IMPORTER(name,
globals,
locals,
fromlist,
level)
def end(self):
"""
Needed for clean up.
"""
class _DateTrackerImporter(_BaseImporter):
"""
An importer tracking the date of the module files and reloading them when
they have changed.
"""
_PACKAGE_PATH_SUFFIX = os.path.sep+"__init__.py"
def __init__(self):
super(_DateTrackerImporter, self).__init__()
self._import_dates = {} # Import dates of the files of the modules
# Avoid reloading cause by file modifications of reload:
self._tl = threading.local()
self._tl._modules_loaded = None
def begin(self):
self._tl._modules_loaded = set()
def __call__(self, name, globals=None, locals=None,
fromlist=None, level=-1):
"""
The import method itself.
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
call_begin_end = self._tl._modules_loaded is None
if call_begin_end:
self.begin()
try:
self._tl.globals = globals
self._tl.locals = locals
self._tl.level = level
# Check the date and reload if needed:
self._update_dates(name, fromlist)
# Try to load the module and update the dates if it works:
result = super(_DateTrackerImporter, self) \
.__call__(name, globals, locals, fromlist, level)
# Module maybe loaded for the 1st time so we need to set the date
self._update_dates(name, fromlist)
return result
except Exception, e:
raise e # Don't hide something that went wrong
finally:
if call_begin_end:
self.end()
def _update_dates(self, name, fromlist):
"""
Update all the dates associated to the statement import. A single
import statement may import many modules.
"""
self._reload_check(name)
if fromlist:
for fromlist_name in fromlist:
self._reload_check("%s.%s" % (name, fromlist_name))
def _reload_check(self, name):
"""
Update the date associated to the module and reload the module if
the file has changed.
"""
module = sys.modules.get(name)
file = self._get_module_file(module)
if file:
date = self._import_dates.get(file)
new_date = None
reload_mod = False
mod_to_pack = False # Module turning into a package? (special case)
try:
new_date = os.path.getmtime(file)
except:
self._import_dates.pop(file, None) # Clean up
# Handle module changing in package and
#package changing in module:
if file.endswith(".py"):
# Get path without file ext:
file = os.path.splitext(file)[0]
reload_mod = os.path.isdir(file) \
and os.path.isfile(file+self._PACKAGE_PATH_SUFFIX)
mod_to_pack = reload_mod
else: # Package turning into module?
file += ".py"
reload_mod = os.path.isfile(file)
if reload_mod:
new_date = os.path.getmtime(file) # Refresh file date
if reload_mod or not date or new_date > date:
self._import_dates[file] = new_date
if reload_mod or (date and new_date > date):
if module not in self._tl._modules_loaded:
if mod_to_pack:
# Module turning into a package:
mod_name = module.__name__
del sys.modules[mod_name] # Delete the module
# Reload the module:
super(_DateTrackerImporter, self).__call__ \
(mod_name, self._tl.globals, self._tl.locals, [],
self._tl.level)
else:
reload(module)
self._tl._modules_loaded.add(module)
def end(self):
self._tl._modules_loaded = None
@classmethod
def _get_module_file(cls, module):
"""
Get the absolute path file associated to the module or None.
"""
file = getattr(module, "__file__", None)
if file:
# Make path absolute if not:
#file = os.path.join(cls.web2py_path, file)
file = os.path.splitext(file)[0]+".py" # Change .pyc for .py
if file.endswith(cls._PACKAGE_PATH_SUFFIX):
file = os.path.dirname(file) # Track dir for packages
return file
class _Web2pyImporter(_BaseImporter):
"""
The standard web2py importer. Like the standard Python importer but it
tries to transform import statements as something like
"import applications.app_name.modules.x". If the import failed, fall back
on _BaseImporter.
"""
_RE_ESCAPED_PATH_SEP = re.escape(os.path.sep) # os.path.sep escaped for re
def __init__(self, web2py_path):
"""
@param web2py_path: The absolute path of the web2py installation.
"""
global DEBUG
super(_Web2pyImporter, self).__init__()
self.web2py_path = web2py_path
self.__web2py_path_os_path_sep = self.web2py_path+os.path.sep
self.__web2py_path_os_path_sep_len = len(self.__web2py_path_os_path_sep)
self.__RE_APP_DIR = re.compile(
self._RE_ESCAPED_PATH_SEP.join( \
( \
#"^" + re.escape(web2py_path), # Not working with Python 2.5
"^(" + "applications",
"[^",
"]+)",
"",
) ))
def _matchAppDir(self, file_path):
"""
Does the file in a directory inside the "applications" directory?
"""
if file_path.startswith(self.__web2py_path_os_path_sep):
file_path = file_path[self.__web2py_path_os_path_sep_len:]
return self.__RE_APP_DIR.match(file_path)
return False
def __call__(self, name, globals=None, locals=None,
fromlist=None, level=-1):
"""
The import method itself.
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
self.begin()
#try:
# if not relative and not from applications:
if not name.startswith(".") and level <= 0 \
and not name.startswith("applications.") \
and isinstance(globals, dict):
# Get the name of the file do the import
caller_file_name = os.path.join(self.web2py_path, \
globals.get("__file__", ""))
# Is the path in an application directory?
match_app_dir = self._matchAppDir(caller_file_name)
if match_app_dir:
try:
# Get the prefix to add for the import
# (like applications.app_name.modules):
modules_prefix = \
".".join((match_app_dir.group(1). \
replace(os.path.sep, "."), "modules"))
if not fromlist:
# import like "import x" or "import x.y"
return self.__import__dot(modules_prefix, name,
globals, locals, fromlist, level)
else:
# import like "from x import a, b, ..."
return super(_Web2pyImporter, self) \
.__call__(modules_prefix+"."+name,
globals, locals, fromlist, level)
except ImportError:
pass
return super(_Web2pyImporter, self).__call__(name, globals, locals,
fromlist, level)
#except Exception, e:
# raise e # Don't hide something that went wrong
#finally:
self.end()
def __import__dot(self, prefix, name, globals, locals, fromlist,
level):
"""
Here we will import x.y.z as many imports like:
from applications.app_name.modules import x
from applications.app_name.modules.x import y
from applications.app_name.modules.x.y import z.
x will be the module returned.
"""
result = None
for name in name.split("."):
new_mod = super(_Web2pyImporter, self).__call__(prefix, globals,
locals, [name], level)
try:
result = result or new_mod.__dict__[name]
except KeyError:
raise ImportError()
prefix += "." + name
return result
class _Web2pyDateTrackerImporter(_Web2pyImporter, _DateTrackerImporter):
"""
Like _Web2pyImporter but using a _DateTrackerImporter.
"""
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Basic caching classes and methods
=================================
- Cache - The generic caching object interfacing with the others
- CacheInRam - providing caching in ram
- CacheInDisk - provides caches on disk
Memcache is also available via a different module (see gluon.contrib.memcache)
When web2py is running on Google App Engine,
caching will be provided by the GAE memcache
(see gluon.contrib.gae_memcache)
"""
import time
import portalocker
import shelve
import thread
import os
import logging
import re
try:
import settings
have_settings = True
except ImportError:
have_settings = False
logger = logging.getLogger("web2py.cache")
__all__ = ['Cache']
DEFAULT_TIME_EXPIRE = 300
class CacheAbstract(object):
"""
Abstract class for cache implementations.
Main function is now to provide referenced api documentation.
Use CacheInRam or CacheOnDisk instead which are derived from this class.
"""
cache_stats_name = 'web2py_cache_statistics'
def __init__(self, request=None):
"""
Paremeters
----------
request:
the global request object
"""
raise NotImplementedError
def __call__(self, key, f,
time_expire = DEFAULT_TIME_EXPIRE):
"""
Tries retrieve the value corresponding to `key` from the cache of the
object exists and if it did not expire, else it called the function `f`
and stores the output in the cache corresponding to `key`. In the case
the output of the function is returned.
:param key: the key of the object to be store or retrieved
:param f: the function, whose output is to be cached
:param time_expire: expiration of the cache in microseconds
- `time_expire` is used to compare the current time with the time when
the requested object was last saved in cache. It does not affect
future requests.
- Setting `time_expire` to 0 or negative value forces the cache to
refresh.
If the function `f` is `None` the cache is cleared.
"""
raise NotImplementedError
def clear(self, regex=None):
"""
Clears the cache of all keys that match the provided regular expression.
If no regular expression is provided, it clears all entries in cache.
Parameters
----------
regex:
if provided, only keys matching the regex will be cleared.
Otherwise all keys are cleared.
"""
raise NotImplementedError
def increment(self, key, value=1):
"""
Increments the cached value for the given key by the amount in value
Parameters
----------
key:
key for the cached object to be incremeneted
value:
amount of the increment (defaults to 1, can be negative)
"""
raise NotImplementedError
def _clear(self, storage, regex):
"""
Auxiliary function called by `clear` to search and clear cache entries
"""
r = re.compile(regex)
for (key, value) in storage.items():
if r.match(str(key)):
del storage[key]
class CacheInRam(CacheAbstract):
"""
Ram based caching
This is implemented as global (per process, shared by all threads)
dictionary.
A mutex-lock mechanism avoid conflicts.
"""
locker = thread.allocate_lock()
meta_storage = {}
def __init__(self, request=None):
self.locker.acquire()
self.request = request
if request:
app = request.application
else:
app = ''
if not app in self.meta_storage:
self.storage = self.meta_storage[app] = {CacheAbstract.cache_stats_name: {
'hit_total': 0,
'misses': 0,
}}
else:
self.storage = self.meta_storage[app]
self.locker.release()
def clear(self, regex=None):
self.locker.acquire()
storage = self.storage
if regex is None:
storage.clear()
else:
self._clear(storage, regex)
if not CacheAbstract.cache_stats_name in storage.keys():
storage[CacheAbstract.cache_stats_name] = {
'hit_total': 0,
'misses': 0,
}
self.locker.release()
def __call__(self, key, f,
time_expire = DEFAULT_TIME_EXPIRE):
"""
Attention! cache.ram does not copy the cached object. It just stores a reference to it.
Turns out the deepcopying the object has some problems:
1) would break backward compatibility
2) would be limiting because people may want to cache live objects
3) would work unless we deepcopy no storage and retrival which would make things slow.
Anyway. You can deepcopy explicitly in the function generating the value to be cached.
"""
dt = time_expire
self.locker.acquire()
item = self.storage.get(key, None)
if item and f is None:
del self.storage[key]
self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1
self.locker.release()
if f is None:
return None
if item and (dt is None or item[0] > time.time() - dt):
return item[1]
value = f()
self.locker.acquire()
self.storage[key] = (time.time(), value)
self.storage[CacheAbstract.cache_stats_name]['misses'] += 1
self.locker.release()
return value
def increment(self, key, value=1):
self.locker.acquire()
try:
if key in self.storage:
value = self.storage[key][1] + value
self.storage[key] = (time.time(), value)
except BaseException, e:
self.locker.release()
raise e
self.locker.release()
return value
class CacheOnDisk(CacheAbstract):
"""
Disk based cache
This is implemented as a shelve object and it is shared by multiple web2py
processes (and threads) as long as they share the same filesystem.
The file is locked wen accessed.
Disk cache provides persistance when web2py is started/stopped but it slower
than `CacheInRam`
Values stored in disk cache must be pickable.
"""
speedup_checks = set()
def __init__(self, request, folder=None):
self.request = request
# Lets test if the cache folder exists, if not
# we are going to create it
folder = folder or os.path.join(request.folder, 'cache')
if not os.path.exists(folder):
os.mkdir(folder)
### we need this because of a possible bug in shelve that may
### or may not lock
self.locker_name = os.path.join(folder,'cache.lock')
self.shelve_name = os.path.join(folder,'cache.shelve')
locker, locker_locked = None, False
speedup_key = (folder,CacheAbstract.cache_stats_name)
if not speedup_key in self.speedup_checks or \
not os.path.exists(self.shelve_name):
try:
locker = open(self.locker_name, 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
locker_locked = True
storage = shelve.open(self.shelve_name)
try:
if not storage.has_key(CacheAbstract.cache_stats_name):
storage[CacheAbstract.cache_stats_name] = {
'hit_total': 0,
'misses': 0,
}
storage.sync()
finally:
storage.close()
self.speedup_checks.add(speedup_key)
except ImportError:
pass # no module _bsddb, ignoring exception now so it makes a ticket only if used
except:
logger.error('corrupted file %s, will try delete it!' \
% self.shelve_name)
try:
os.unlink(self.shelve_name)
except IOError:
logger.warn('unable to delete file %s' % self.shelve_name)
except OSError:
logger.warn('unable to delete file %s' % self.shelve_name)
if locker_locked:
portalocker.unlock(locker)
if locker:
locker.close()
def clear(self, regex=None):
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage = shelve.open(self.shelve_name)
try:
if regex is None:
storage.clear()
else:
self._clear(storage, regex)
if not CacheAbstract.cache_stats_name in storage.keys():
storage[CacheAbstract.cache_stats_name] = {
'hit_total': 0,
'misses': 0,
}
storage.sync()
finally:
storage.close()
portalocker.unlock(locker)
locker.close()
def __call__(self, key, f,
time_expire = DEFAULT_TIME_EXPIRE):
dt = time_expire
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage = shelve.open(self.shelve_name)
item = storage.get(key, None)
if item and f is None:
del storage[key]
storage[CacheAbstract.cache_stats_name] = {
'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'] + 1,
'misses': storage[CacheAbstract.cache_stats_name]['misses']
}
storage.sync()
portalocker.unlock(locker)
locker.close()
if f is None:
return None
if item and (dt is None or item[0] > time.time() - dt):
return item[1]
value = f()
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage[key] = (time.time(), value)
storage[CacheAbstract.cache_stats_name] = {
'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'],
'misses': storage[CacheAbstract.cache_stats_name]['misses'] + 1
}
storage.sync()
storage.close()
portalocker.unlock(locker)
locker.close()
return value
def increment(self, key, value=1):
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage = shelve.open(self.shelve_name)
try:
if key in storage:
value = storage[key][1] + value
storage[key] = (time.time(), value)
storage.sync()
finally:
storage.close()
portalocker.unlock(locker)
locker.close()
return value
class Cache(object):
"""
Sets up generic caching, creating an instance of both CacheInRam and
CacheOnDisk.
In case of GAE will make use of gluon.contrib.gae_memcache.
- self.ram is an instance of CacheInRam
- self.disk is an instance of CacheOnDisk
"""
def __init__(self, request):
"""
Parameters
----------
request:
the global request object
"""
# GAE will have a special caching
if have_settings and settings.global_settings.web2py_runtime_gae:
from contrib.gae_memcache import MemcacheClient
self.ram=self.disk=MemcacheClient(request)
else:
# Otherwise use ram (and try also disk)
self.ram = CacheInRam(request)
try:
self.disk = CacheOnDisk(request)
except IOError:
logger.warning('no cache.disk (IOError)')
except AttributeError:
# normally not expected anymore, as GAE has already
# been accounted for
logger.warning('no cache.disk (AttributeError)')
def __call__(self,
key = None,
time_expire = DEFAULT_TIME_EXPIRE,
cache_model = None):
"""
Decorator function that can be used to cache any function/method.
Example::
@cache('key', 5000, cache.ram)
def f():
return time.ctime()
When the function f is called, web2py tries to retrieve
the value corresponding to `key` from the cache of the
object exists and if it did not expire, else it calles the function `f`
and stores the output in the cache corresponding to `key`. In the case
the output of the function is returned.
:param key: the key of the object to be store or retrieved
:param time_expire: expiration of the cache in microseconds
:param cache_model: `cache.ram`, `cache.disk`, or other
(like `cache.memcache` if defined). It defaults to `cache.ram`.
Notes
-----
`time_expire` is used to compare the curret time with the time when the
requested object was last saved in cache. It does not affect future
requests.
Setting `time_expire` to 0 or negative value forces the cache to
refresh.
If the function `f` is an action, we suggest using
`request.env.path_info` as key.
"""
if not cache_model:
cache_model = self.ram
def tmp(func):
def action():
return cache_model(key, func, time_expire)
action.__name___ = func.__name__
action.__doc__ = func.__doc__
return action
return tmp
| Python |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Contains:
- wsgibase: the gluon wsgi application
"""
import gc
import cgi
import cStringIO
import Cookie
import os
import re
import copy
import sys
import time
import thread
import datetime
import signal
import socket
import tempfile
import random
import string
from fileutils import abspath, write_file, parse_version
from settings import global_settings
from admin import add_path_first, create_missing_folders, create_missing_app_folders
from globals import current
from custom_import import custom_import_install
# Remarks:
# calling script has inserted path to script directory into sys.path
# applications_parent (path to applications/, site-packages/ etc)
# defaults to that directory set sys.path to
# ("", gluon_parent/site-packages, gluon_parent, ...)
#
# this is wrong:
# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# because we do not want the path to this file which may be Library.zip
# gluon_parent is the directory containing gluon, web2py.py, logging.conf
# and the handlers.
# applications_parent (web2py_path) is the directory containing applications/
# and routes.py
# The two are identical unless web2py_path is changed via the web2py.py -f folder option
# main.web2py_path is the same as applications_parent (for backward compatibility)
if not hasattr(os, 'mkdir'):
global_settings.db_sessions = True
if global_settings.db_sessions is not True:
global_settings.db_sessions = set()
global_settings.gluon_parent = os.environ.get('web2py_path', os.getcwd())
global_settings.applications_parent = global_settings.gluon_parent
web2py_path = global_settings.applications_parent # backward compatibility
global_settings.app_folders = set()
global_settings.debugging = False
custom_import_install(web2py_path)
create_missing_folders()
# set up logging for subsequent imports
import logging
import logging.config
logpath = abspath("logging.conf")
if os.path.exists(logpath):
logging.config.fileConfig(abspath("logging.conf"))
else:
logging.basicConfig()
logger = logging.getLogger("web2py")
from restricted import RestrictedError
from http import HTTP, redirect
from globals import Request, Response, Session
from compileapp import build_environment, run_models_in, \
run_controller_in, run_view_in
from fileutils import copystream
from contenttype import contenttype
from dal import BaseAdapter
from settings import global_settings
from validators import CRYPT
from cache import Cache
from html import URL as Url
import newcron
import rewrite
__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer']
requests = 0 # gc timer
# Security Checks: validate URL and session_id here,
# accept_language is validated in languages
# pattern used to validate client address
regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6
version_info = open(abspath('VERSION', gluon=True), 'r')
web2py_version = parse_version(version_info.read().strip())
version_info.close()
global_settings.web2py_version = web2py_version
try:
import rocket
except:
if not global_settings.web2py_runtime_gae:
logger.warn('unable to import Rocket')
rewrite.load()
def get_client(env):
"""
guess the client address from the environment variables
first tries 'http_x_forwarded_for', secondly 'remote_addr'
if all fails assume '127.0.0.1' (running locally)
"""
g = regex_client.search(env.get('http_x_forwarded_for', ''))
if g:
return g.group()
g = regex_client.search(env.get('remote_addr', ''))
if g:
return g.group()
return '127.0.0.1'
def copystream_progress(request, chunk_size= 10**5):
"""
copies request.env.wsgi_input into request.body
and stores progress upload status in cache.ram
X-Progress-ID:length and X-Progress-ID:uploaded
"""
if not request.env.content_length:
return cStringIO.StringIO()
source = request.env.wsgi_input
size = int(request.env.content_length)
dest = tempfile.TemporaryFile()
if not 'X-Progress-ID' in request.vars:
copystream(source, dest, size, chunk_size)
return dest
cache_key = 'X-Progress-ID:'+request.vars['X-Progress-ID']
cache = Cache(request)
cache.ram(cache_key+':length', lambda: size, 0)
cache.ram(cache_key+':uploaded', lambda: 0, 0)
while size > 0:
if size < chunk_size:
data = source.read(size)
cache.ram.increment(cache_key+':uploaded', size)
else:
data = source.read(chunk_size)
cache.ram.increment(cache_key+':uploaded', chunk_size)
length = len(data)
if length > size:
(data, length) = (data[:size], size)
size -= length
if length == 0:
break
dest.write(data)
if length < chunk_size:
break
dest.seek(0)
cache.ram(cache_key+':length', None)
cache.ram(cache_key+':uploaded', None)
return dest
def serve_controller(request, response, session):
"""
this function is used to generate a dynamic page.
It first runs all models, then runs the function in the controller,
and then tries to render the output using a view/template.
this function must run from the [application] folder.
A typical example would be the call to the url
/[application]/[controller]/[function] that would result in a call
to [function]() in applications/[application]/[controller].py
rendered by applications/[application]/views/[controller]/[function].html
"""
# ##################################################
# build environment for controller and view
# ##################################################
environment = build_environment(request, response, session)
# set default view, controller can override it
response.view = '%s/%s.%s' % (request.controller,
request.function,
request.extension)
# also, make sure the flash is passed through
# ##################################################
# process models, controller and view (if required)
# ##################################################
run_models_in(environment)
response._view_environment = copy.copy(environment)
page = run_controller_in(request.controller, request.function, environment)
if isinstance(page, dict):
response._vars = page
for key in page:
response._view_environment[key] = page[key]
run_view_in(response._view_environment)
page = response.body.getvalue()
# logic to garbage collect after exec, not always, once every 100 requests
global requests
requests = ('requests' in globals()) and (requests+1) % 100 or 0
if not requests: gc.collect()
# end garbage collection logic
raise HTTP(response.status, page, **response.headers)
def start_response_aux(status, headers, exc_info, response=None):
"""
in controller you can use::
- request.wsgi.environ
- request.wsgi.start_response
to call third party WSGI applications
"""
response.status = str(status).split(' ',1)[0]
response.headers = dict(headers)
return lambda *args, **kargs: response.write(escape=False,*args,**kargs)
def middleware_aux(request, response, *middleware_apps):
"""
In you controller use::
@request.wsgi.middleware(middleware1, middleware2, ...)
to decorate actions with WSGI middleware. actions must return strings.
uses a simulated environment so it may have weird behavior in some cases
"""
def middleware(f):
def app(environ, start_response):
data = f()
start_response(response.status,response.headers.items())
if isinstance(data,list):
return data
return [data]
for item in middleware_apps:
app=item(app)
def caller(app):
return app(request.wsgi.environ,request.wsgi.start_response)
return lambda caller=caller, app=app: caller(app)
return middleware
def environ_aux(environ,request):
new_environ = copy.copy(environ)
new_environ['wsgi.input'] = request.body
new_environ['wsgi.version'] = 1
return new_environ
def parse_get_post_vars(request, environ):
# always parse variables in URL for GET, POST, PUT, DELETE, etc. in get_vars
dget = cgi.parse_qsl(request.env.query_string or '', keep_blank_values=1)
for (key, value) in dget:
if key in request.get_vars:
if isinstance(request.get_vars[key], list):
request.get_vars[key] += [value]
else:
request.get_vars[key] = [request.get_vars[key]] + [value]
else:
request.get_vars[key] = value
request.vars[key] = request.get_vars[key]
# parse POST variables on POST, PUT, BOTH only in post_vars
request.body = copystream_progress(request) ### stores request body
if (request.body and request.env.request_method in ('POST', 'PUT', 'BOTH')):
dpost = cgi.FieldStorage(fp=request.body,environ=environ,keep_blank_values=1)
# The same detection used by FieldStorage to detect multipart POSTs
is_multipart = dpost.type[:10] == 'multipart/'
request.body.seek(0)
isle25 = sys.version_info[1] <= 5
def listify(a):
return (not isinstance(a,list) and [a]) or a
try:
keys = sorted(dpost)
except TypeError:
keys = []
for key in keys:
dpk = dpost[key]
# if en element is not a file replace it with its value else leave it alone
if isinstance(dpk, list):
if not dpk[0].filename:
value = [x.value for x in dpk]
else:
value = [x for x in dpk]
elif not dpk.filename:
value = dpk.value
else:
value = dpk
pvalue = listify(value)
if key in request.vars:
gvalue = listify(request.vars[key])
if isle25:
value = pvalue + gvalue
elif is_multipart:
pvalue = pvalue[len(gvalue):]
else:
pvalue = pvalue[:-len(gvalue)]
request.vars[key] = value
if len(pvalue):
request.post_vars[key] = (len(pvalue)>1 and pvalue) or pvalue[0]
def wsgibase(environ, responder):
"""
this is the gluon wsgi application. the first function called when a page
is requested (static or dynamic). it can be called by paste.httpserver
or by apache mod_wsgi.
- fills request with info
- the environment variables, replacing '.' with '_'
- adds web2py path and version info
- compensates for fcgi missing path_info and query_string
- validates the path in url
The url path must be either:
1. for static pages:
- /<application>/static/<file>
2. for dynamic pages:
- /<application>[/<controller>[/<function>[/<sub>]]][.<extension>]
- (sub may go several levels deep, currently 3 levels are supported:
sub1/sub2/sub3)
The naming conventions are:
- application, controller, function and extension may only contain
[a-zA-Z0-9_]
- file and sub may also contain '-', '=', '.' and '/'
"""
current.__dict__.clear()
request = Request()
response = Response()
session = Session()
request.env.web2py_path = global_settings.applications_parent
request.env.web2py_version = web2py_version
request.env.update(global_settings)
static_file = False
try:
try:
try:
# ##################################################
# handle fcgi missing path_info and query_string
# select rewrite parameters
# rewrite incoming URL
# parse rewritten header variables
# parse rewritten URL
# serve file if static
# ##################################################
if not environ.get('PATH_INFO',None) and \
environ.get('REQUEST_URI',None):
# for fcgi, get path_info and query_string from request_uri
items = environ['REQUEST_URI'].split('?')
environ['PATH_INFO'] = items[0]
if len(items) > 1:
environ['QUERY_STRING'] = items[1]
else:
environ['QUERY_STRING'] = ''
if not environ.get('HTTP_HOST',None):
environ['HTTP_HOST'] = '%s:%s' % (environ.get('SERVER_NAME'),
environ.get('SERVER_PORT'))
(static_file, environ) = rewrite.url_in(request, environ)
if static_file:
if environ.get('QUERY_STRING', '')[:10] == 'attachment':
response.headers['Content-Disposition'] = 'attachment'
response.stream(static_file, request=request)
# ##################################################
# fill in request items
# ##################################################
http_host = request.env.http_host.split(':',1)[0]
local_hosts = [http_host,'::1','127.0.0.1','::ffff:127.0.0.1']
if not global_settings.web2py_runtime_gae:
local_hosts += [socket.gethostname(),
socket.gethostbyname(http_host)]
request.client = get_client(request.env)
request.folder = abspath('applications',
request.application) + os.sep
x_req_with = str(request.env.http_x_requested_with).lower()
request.ajax = x_req_with == 'xmlhttprequest'
request.cid = request.env.http_web2py_component_element
request.is_local = request.env.remote_addr in local_hosts
request.is_https = request.env.wsgi_url_scheme \
in ['https', 'HTTPS'] or request.env.https == 'on'
# ##################################################
# compute a request.uuid to be used for tickets and toolbar
# ##################################################
response.uuid = request.compute_uuid()
# ##################################################
# access the requested application
# ##################################################
if not os.path.exists(request.folder):
if request.application == \
rewrite.thread.routes.default_application \
and request.application != 'welcome':
request.application = 'welcome'
redirect(Url(r=request))
elif rewrite.thread.routes.error_handler:
_handler = rewrite.thread.routes.error_handler
redirect(Url(_handler['application'],
_handler['controller'],
_handler['function'],
args=request.application))
else:
raise HTTP(404, rewrite.thread.routes.error_message \
% 'invalid request',
web2py_error='invalid application')
elif not request.is_local and \
os.path.exists(os.path.join(request.folder,'DISABLED')):
raise HTTP(200, "<html><body><h1>Down for maintenance</h1></body></html>")
request.url = Url(r=request, args=request.args,
extension=request.raw_extension)
# ##################################################
# build missing folders
# ##################################################
create_missing_app_folders(request)
# ##################################################
# get the GET and POST data
# ##################################################
parse_get_post_vars(request, environ)
# ##################################################
# expose wsgi hooks for convenience
# ##################################################
request.wsgi.environ = environ_aux(environ,request)
request.wsgi.start_response = \
lambda status='200', headers=[], \
exec_info=None, response=response: \
start_response_aux(status, headers, exec_info, response)
request.wsgi.middleware = \
lambda *a: middleware_aux(request,response,*a)
# ##################################################
# load cookies
# ##################################################
if request.env.http_cookie:
try:
request.cookies.load(request.env.http_cookie)
except Cookie.CookieError, e:
pass # invalid cookies
# ##################################################
# try load session or create new session file
# ##################################################
session.connect(request, response)
# ##################################################
# set no-cache headers
# ##################################################
response.headers['Content-Type'] = \
contenttype('.'+request.extension)
response.headers['Cache-Control'] = \
'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'
response.headers['Expires'] = \
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
response.headers['Pragma'] = 'no-cache'
# ##################################################
# run controller
# ##################################################
if global_settings.debugging and request.application != "admin":
import gluon.debug
# activate the debugger and wait to reach application code
gluon.debug.dbg.do_debug(mainpyfile=request.folder)
serve_controller(request, response, session)
except HTTP, http_response:
if static_file:
return http_response.to(responder)
if request.body:
request.body.close()
# ##################################################
# on success, try store session in database
# ##################################################
session._try_store_in_db(request, response)
# ##################################################
# on success, commit database
# ##################################################
if response.do_not_commit is True:
BaseAdapter.close_all_instances(None)
elif response._custom_commit:
response._custom_commit()
else:
BaseAdapter.close_all_instances('commit')
# ##################################################
# if session not in db try store session on filesystem
# this must be done after trying to commit database!
# ##################################################
session._try_store_on_disk(request, response)
# ##################################################
# store cookies in headers
# ##################################################
if request.cid:
if response.flash and not 'web2py-component-flash' in http_response.headers:
http_response.headers['web2py-component-flash'] = \
str(response.flash).replace('\n','')
if response.js and not 'web2py-component-command' in http_response.headers:
http_response.headers['web2py-component-command'] = \
response.js.replace('\n','')
if session._forget and \
response.session_id_name in response.cookies:
del response.cookies[response.session_id_name]
elif session._secure:
response.cookies[response.session_id_name]['secure'] = True
if len(response.cookies)>0:
http_response.headers['Set-Cookie'] = \
[str(cookie)[11:] for cookie in response.cookies.values()]
ticket=None
except RestrictedError, e:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
ticket = e.log(request) or 'unknown'
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
http_response = \
HTTP(500, rewrite.thread.routes.error_message_ticket % \
dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
except:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
try:
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
except:
pass
e = RestrictedError('Framework', '', '', locals())
ticket = e.log(request) or 'unrecoverable'
http_response = \
HTTP(500, rewrite.thread.routes.error_message_ticket \
% dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
finally:
if response and hasattr(response, 'session_file') \
and response.session_file:
response.session_file.close()
session._unlock(response)
http_response, new_environ = rewrite.try_rewrite_on_error(
http_response, request, environ, ticket)
if not http_response:
return wsgibase(new_environ,responder)
if global_settings.web2py_crontype == 'soft':
newcron.softcron(global_settings.applications_parent).start()
return http_response.to(responder)
def save_password(password, port):
"""
used by main() to save the password in the parameters_port.py file.
"""
password_file = abspath('parameters_%i.py' % port)
if password == '<random>':
# make up a new password
chars = string.letters + string.digits
password = ''.join([random.choice(chars) for i in range(8)])
cpassword = CRYPT()(password)[0]
print '******************* IMPORTANT!!! ************************'
print 'your admin password is "%s"' % password
print '*********************************************************'
elif password == '<recycle>':
# reuse the current password if any
if os.path.exists(password_file):
return
else:
password = ''
elif password.startswith('<pam_user:'):
# use the pam password for specified user
cpassword = password[1:-1]
else:
# use provided password
cpassword = CRYPT()(password)[0]
fp = open(password_file, 'w')
if password:
fp.write('password="%s"\n' % cpassword)
else:
fp.write('password=None\n')
fp.close()
def appfactory(wsgiapp=wsgibase,
logfilename='httpserver.log',
profilerfilename='profiler.log'):
"""
generates a wsgi application that does logging and profiling and calls
wsgibase
.. function:: gluon.main.appfactory(
[wsgiapp=wsgibase
[, logfilename='httpserver.log'
[, profilerfilename='profiler.log']]])
"""
if profilerfilename and os.path.exists(profilerfilename):
os.unlink(profilerfilename)
locker = thread.allocate_lock()
def app_with_logging(environ, responder):
"""
a wsgi app that does logging and profiling and calls wsgibase
"""
status_headers = []
def responder2(s, h):
"""
wsgi responder app
"""
status_headers.append(s)
status_headers.append(h)
return responder(s, h)
time_in = time.time()
ret = [0]
if not profilerfilename:
ret[0] = wsgiapp(environ, responder2)
else:
import cProfile
import pstats
logger.warn('profiler is on. this makes web2py slower and serial')
locker.acquire()
cProfile.runctx('ret[0] = wsgiapp(environ, responder2)',
globals(), locals(), profilerfilename+'.tmp')
stat = pstats.Stats(profilerfilename+'.tmp')
stat.stream = cStringIO.StringIO()
stat.strip_dirs().sort_stats("time").print_stats(80)
profile_out = stat.stream.getvalue()
profile_file = open(profilerfilename, 'a')
profile_file.write('%s\n%s\n%s\n%s\n\n' % \
('='*60, environ['PATH_INFO'], '='*60, profile_out))
profile_file.close()
locker.release()
try:
line = '%s, %s, %s, %s, %s, %s, %f\n' % (
environ['REMOTE_ADDR'],
datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
environ['REQUEST_METHOD'],
environ['PATH_INFO'].replace(',', '%2C'),
environ['SERVER_PROTOCOL'],
(status_headers[0])[:3],
time.time() - time_in,
)
if not logfilename:
sys.stdout.write(line)
elif isinstance(logfilename, str):
write_file(logfilename, line, 'a')
else:
logfilename.write(line)
except:
pass
return ret[0]
return app_with_logging
class HttpServer(object):
"""
the web2py web server (Rocket)
"""
def __init__(
self,
ip='127.0.0.1',
port=8000,
password='',
pid_filename='httpserver.pid',
log_filename='httpserver.log',
profiler_filename=None,
ssl_certificate=None,
ssl_private_key=None,
ssl_ca_certificate=None,
min_threads=None,
max_threads=None,
server_name=None,
request_queue_size=5,
timeout=10,
socket_timeout = 1,
shutdown_timeout=None, # Rocket does not use a shutdown timeout
path=None,
interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string
):
"""
starts the web server.
"""
if interfaces:
# if interfaces is specified, it must be tested for rocket parameter correctness
# not necessarily completely tested (e.g. content of tuples or ip-format)
import types
if isinstance(interfaces,types.ListType):
for i in interfaces:
if not isinstance(i,types.TupleType):
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
else:
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
if path:
# if a path is specified change the global variables so that web2py
# runs from there instead of cwd or os.environ['web2py_path']
global web2py_path
path = os.path.normpath(path)
web2py_path = path
global_settings.applications_parent = path
os.chdir(path)
[add_path_first(p) for p in (path, abspath('site-packages'), "")]
custom_import_install(web2py_path)
if os.path.exists("logging.conf"):
logging.config.fileConfig("logging.conf")
save_password(password, port)
self.pid_filename = pid_filename
if not server_name:
server_name = socket.gethostname()
logger.info('starting web server...')
rocket.SERVER_NAME = server_name
rocket.SOCKET_TIMEOUT = socket_timeout
sock_list = [ip, port]
if not ssl_certificate or not ssl_private_key:
logger.info('SSL is off')
elif not rocket.ssl:
logger.warning('Python "ssl" module unavailable. SSL is OFF')
elif not os.path.exists(ssl_certificate):
logger.warning('unable to open SSL certificate. SSL is OFF')
elif not os.path.exists(ssl_private_key):
logger.warning('unable to open SSL private key. SSL is OFF')
else:
sock_list.extend([ssl_private_key, ssl_certificate])
if ssl_ca_certificate:
sock_list.append(ssl_ca_certificate)
logger.info('SSL is ON')
app_info = {'wsgi_app': appfactory(wsgibase,
log_filename,
profiler_filename) }
self.server = rocket.Rocket(interfaces or tuple(sock_list),
method='wsgi',
app_info=app_info,
min_threads=min_threads,
max_threads=max_threads,
queue_size=int(request_queue_size),
timeout=int(timeout),
handle_signals=False,
)
def start(self):
"""
start the web server
"""
try:
signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop())
signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop())
except:
pass
write_file(self.pid_filename, str(os.getpid()))
self.server.start()
def stop(self, stoplogging=False):
"""
stop cron and the web server
"""
newcron.stopcron()
self.server.stop(stoplogging)
try:
os.unlink(self.pid_filename)
except:
pass
| Python |
# this file exists for backward compatibility
__all__ = ['DAL','Field','drivers']
from dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
::
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942
# Title: Cross-site scripting (XSS) defense
# Submitter: Josh Goldfoot (other recipes)
# Last Updated: 2006/08/05
# Version no: 1.0
"""
from htmllib import HTMLParser
from cgi import escape
from urlparse import urlparse
from formatter import AbstractFormatter
from htmlentitydefs import entitydefs
from xml.sax.saxutils import quoteattr
__all__ = ['sanitize']
def xssescape(text):
"""Gets rid of < and > and & and, for good measure, :"""
return escape(text, quote=True).replace(':', ':')
class XssCleaner(HTMLParser):
def __init__(
self,
permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
],
allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt'
], 'blockquote': ['type']},
fmt=AbstractFormatter,
strip_disallowed = False
):
HTMLParser.__init__(self, fmt)
self.result = ''
self.open_tags = []
self.permitted_tags = [i for i in permitted_tags if i[-1] != '/']
self.requires_no_close = [i[:-1] for i in permitted_tags
if i[-1] == '/']
self.permitted_tags += self.requires_no_close
self.allowed_attributes = allowed_attributes
# The only schemes allowed in URLs (for href and src attributes).
# Adding "javascript" or "vbscript" to this list would not be smart.
self.allowed_schemes = ['http', 'https', 'ftp']
#to strip or escape disallowed tags?
self.strip_disallowed = strip_disallowed
self.in_disallowed = False
def handle_data(self, data):
if data and not self.in_disallowed:
self.result += xssescape(data)
def handle_charref(self, ref):
if self.in_disallowed:
return
elif len(ref) < 7 and ref.isdigit():
self.result += '&#%s;' % ref
else:
self.result += xssescape('&#%s' % ref)
def handle_entityref(self, ref):
if self.in_disallowed:
return
elif ref in entitydefs:
self.result += '&%s;' % ref
else:
self.result += xssescape('&%s' % ref)
def handle_comment(self, comment):
if self.in_disallowed:
return
elif comment:
self.result += xssescape('<!--%s-->' % comment)
def handle_starttag(
self,
tag,
method,
attrs,
):
if tag not in self.permitted_tags:
if self.strip_disallowed:
self.in_disallowed = True
else:
self.result += xssescape('<%s>' % tag)
else:
bt = '<' + tag
if tag in self.allowed_attributes:
attrs = dict(attrs)
self.allowed_attributes_here = [x for x in
self.allowed_attributes[tag] if x in attrs
and len(attrs[x]) > 0]
for attribute in self.allowed_attributes_here:
if attribute in ['href', 'src', 'background']:
if self.url_is_acceptable(attrs[attribute]):
bt += ' %s="%s"' % (attribute,
attrs[attribute])
else:
bt += ' %s=%s' % (xssescape(attribute),
quoteattr(attrs[attribute]))
if bt == '<a' or bt == '<img':
return
if tag in self.requires_no_close:
bt += ' /'
bt += '>'
self.result += bt
self.open_tags.insert(0, tag)
def handle_endtag(self, tag, attrs):
bracketed = '</%s>' % tag
if tag not in self.permitted_tags:
if self.strip_disallowed:
self.in_disallowed = False
else:
self.result += xssescape(bracketed)
elif tag in self.open_tags:
self.result += bracketed
self.open_tags.remove(tag)
def unknown_starttag(self, tag, attributes):
self.handle_starttag(tag, None, attributes)
def unknown_endtag(self, tag):
self.handle_endtag(tag, None)
def url_is_acceptable(self, url):
"""
Accepts relative and absolute urls
"""
parsed = urlparse(url)
return (parsed[0] in self.allowed_schemes and '.' in parsed[1]) \
or (parsed[0] == '' and parsed[2].startswith('/'))
def strip(self, rawstring, escape=True):
"""
Returns the argument stripped of potentially harmful
HTML or Javascript code
@type escape: boolean
@param escape: If True (default) it escapes the potentially harmful
content, otherwise remove it
"""
if not isinstance(rawstring, str): return str(rawstring)
for tag in self.requires_no_close:
rawstring = rawstring.replace("<%s/>" % tag, "<%s />" % tag)
if not escape:
self.strip_disallowed = True
self.result = ''
self.feed(rawstring)
for endtag in self.open_tags:
if endtag not in self.requires_no_close:
self.result += '</%s>' % endtag
return self.result
def xtags(self):
"""
Returns a printable string informing the user which tags are allowed
"""
tg = ''
for x in sorted(self.permitted_tags):
tg += '<' + x
if x in self.allowed_attributes:
for y in self.allowed_attributes[x]:
tg += ' %s=""' % y
tg += '> '
return xssescape(tg.strip())
def sanitize(text, permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
'h1','h2','h3','h4','h5','h6',
'table','tr','td','div',
],
allowed_attributes = {
'a': ['href', 'title'],
'img': ['src', 'alt'],
'blockquote': ['type'],
'td': ['colspan'],
},
escape=True):
if not isinstance(text, str): return str(text)
return XssCleaner(permitted_tags=permitted_tags,
allowed_attributes=allowed_attributes).strip(text, escape)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Attila Csipa <web2py@csipa.in.rs>
Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu>
"""
import sys
import os
import threading
import logging
import time
import sched
import re
import datetime
import platform
import portalocker
import fileutils
import cPickle
from settings import global_settings
logger = logging.getLogger("web2py.cron")
_cron_stopping = False
def absolute_path_link(path):
"""
Return an absolute path for the destination of a symlink
"""
if os.path.islink(path):
link = os.readlink(path)
if not os.path.isabs(link):
link = os.path.join(os.path.dirname(path), link)
else:
link = os.path.abspath(path)
return link
def stopcron():
"graceful shutdown of cron"
global _cron_stopping
_cron_stopping = True
class extcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(False)
self.path = applications_parent
crondance(self.path, 'external', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('external cron invocation')
crondance(self.path, 'external', startup=False)
class hardcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(True)
self.path = applications_parent
crondance(self.path, 'hard', startup=True)
def launch(self):
if not _cron_stopping:
logger.debug('hard cron invocation')
crondance(self.path, 'hard', startup = False)
def run(self):
s = sched.scheduler(time.time, time.sleep)
logger.info('Hard cron daemon started')
while not _cron_stopping:
now = time.time()
s.enter(60 - now % 60, 1, self.launch, ())
s.run()
class softcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.path = applications_parent
crondance(self.path, 'soft', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('soft cron invocation')
crondance(self.path, 'soft', startup=False)
class Token(object):
def __init__(self,path):
self.path = os.path.join(path, 'cron.master')
if not os.path.exists(self.path):
fileutils.write_file(self.path, '', 'wb')
self.master = None
self.now = time.time()
def acquire(self,startup=False):
"""
returns the time when the lock is acquired or
None if cron already running
lock is implemented by writing a pickle (start, stop) in cron.master
start is time when cron job starts and stop is time when cron completed
stop == 0 if job started but did not yet complete
if a cron job started within less than 60 seconds, acquire returns None
if a cron job started before 60 seconds and did not stop,
a warning is issue "Stale cron.master detected"
"""
if portalocker.LOCK_EX is None:
logger.warning('WEB2PY CRON: Disabled because no file locking')
return None
self.master = open(self.path,'rb+')
try:
ret = None
portalocker.lock(self.master,portalocker.LOCK_EX)
try:
(start, stop) = cPickle.load(self.master)
except:
(start, stop) = (0, 1)
if startup or self.now - start > 59.99:
ret = self.now
if not stop:
# this happens if previous cron job longer than 1 minute
logger.warning('WEB2PY CRON: Stale cron.master detected')
logger.debug('WEB2PY CRON: Acquiring lock')
self.master.seek(0)
cPickle.dump((self.now,0),self.master)
finally:
portalocker.unlock(self.master)
if not ret:
# do this so no need to release
self.master.close()
return ret
def release(self):
"""
this function writes into cron.master the time when cron job
was completed
"""
if not self.master.closed:
portalocker.lock(self.master,portalocker.LOCK_EX)
logger.debug('WEB2PY CRON: Releasing cron lock')
self.master.seek(0)
(start, stop) = cPickle.load(self.master)
if start == self.now: # if this is my lock
self.master.seek(0)
cPickle.dump((self.now,time.time()),self.master)
portalocker.unlock(self.master)
self.master.close()
def rangetolist(s, period='min'):
retval = []
if s.startswith('*'):
if period == 'min':
s = s.replace('*', '0-59', 1)
elif period == 'hr':
s = s.replace('*', '0-23', 1)
elif period == 'dom':
s = s.replace('*', '1-31', 1)
elif period == 'mon':
s = s.replace('*', '1-12', 1)
elif period == 'dow':
s = s.replace('*', '0-6', 1)
m = re.compile(r'(\d+)-(\d+)/(\d+)')
match = m.match(s)
if match:
for i in range(int(match.group(1)), int(match.group(2)) + 1):
if i % int(match.group(3)) == 0:
retval.append(i)
return retval
def parsecronline(line):
task = {}
if line.startswith('@reboot'):
line=line.replace('@reboot', '-1 * * * *')
elif line.startswith('@yearly'):
line=line.replace('@yearly', '0 0 1 1 *')
elif line.startswith('@annually'):
line=line.replace('@annually', '0 0 1 1 *')
elif line.startswith('@monthly'):
line=line.replace('@monthly', '0 0 1 * *')
elif line.startswith('@weekly'):
line=line.replace('@weekly', '0 0 * * 0')
elif line.startswith('@daily'):
line=line.replace('@daily', '0 0 * * *')
elif line.startswith('@midnight'):
line=line.replace('@midnight', '0 0 * * *')
elif line.startswith('@hourly'):
line=line.replace('@hourly', '0 * * * *')
params = line.strip().split(None, 6)
if len(params) < 7:
return None
daysofweek={'sun':0,'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6}
for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']):
if not s in [None, '*']:
task[id] = []
vals = s.split(',')
for val in vals:
if val != '-1' and '-' in val and '/' not in val:
val = '%s/1' % val
if '/' in val:
task[id] += rangetolist(val, id)
elif val.isdigit() or val=='-1':
task[id].append(int(val))
elif id=='dow' and val[:3].lower() in daysofweek:
task[id].append(daysofweek(val[:3].lower()))
task['user'] = params[5]
task['cmd'] = params[6]
return task
class cronlauncher(threading.Thread):
def __init__(self, cmd, shell=True):
threading.Thread.__init__(self)
if platform.system() == 'Windows':
shell = False
elif isinstance(cmd,list):
cmd = ' '.join(cmd)
self.cmd = cmd
self.shell = shell
def run(self):
import subprocess
proc = subprocess.Popen(self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell)
(stdoutdata,stderrdata) = proc.communicate()
if proc.returncode != 0:
logger.warning(
'WEB2PY CRON Call returned code %s:\n%s' % \
(proc.returncode, stdoutdata+stderrdata))
else:
logger.debug('WEB2PY CRON Call returned success:\n%s' \
% stdoutdata)
def crondance(applications_parent, ctype='soft', startup=False):
apppath = os.path.join(applications_parent,'applications')
cron_path = os.path.join(applications_parent)
token = Token(cron_path)
cronmaster = token.acquire(startup=startup)
if not cronmaster:
return
now_s = time.localtime()
checks=(('min',now_s.tm_min),
('hr',now_s.tm_hour),
('mon',now_s.tm_mon),
('dom',now_s.tm_mday),
('dow',(now_s.tm_wday+1)%7))
apps = [x for x in os.listdir(apppath)
if os.path.isdir(os.path.join(apppath, x))]
full_apath_links = set()
for app in apps:
if _cron_stopping:
break;
apath = os.path.join(apppath,app)
# if app is a symbolic link to other app, skip it
full_apath_link = absolute_path_link(apath)
if full_apath_link in full_apath_links:
continue
else:
full_apath_links.add(full_apath_link)
cronpath = os.path.join(apath, 'cron')
crontab = os.path.join(cronpath, 'crontab')
if not os.path.exists(crontab):
continue
try:
cronlines = fileutils.readlines_file(crontab, 'rt')
lines = [x.strip() for x in cronlines if x.strip() and not x.strip().startswith('#')]
tasks = [parsecronline(cline) for cline in lines]
except Exception, e:
logger.error('WEB2PY CRON: crontab read error %s' % e)
continue
for task in tasks:
if _cron_stopping:
break;
commands = [sys.executable]
w2p_path = fileutils.abspath('web2py.py', gluon=True)
if os.path.exists(w2p_path):
commands.append(w2p_path)
if global_settings.applications_parent != global_settings.gluon_parent:
commands.extend(('-f', global_settings.applications_parent))
citems = [(k in task and not v in task[k]) for k,v in checks]
task_min= task.get('min',[])
if not task:
continue
elif not startup and task_min == [-1]:
continue
elif task_min != [-1] and reduce(lambda a,b: a or b, citems):
continue
logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s' \
% (ctype, app, task.get('cmd'),
os.getcwd(), datetime.datetime.now()))
action, command, models = False, task['cmd'], ''
if command.startswith('**'):
(action,models,command) = (True,'',command[2:])
elif command.startswith('*'):
(action,models,command) = (True,'-M',command[1:])
else:
action=False
if action and command.endswith('.py'):
commands.extend(('-J', # cron job
models, # import models?
'-S', app, # app name
'-a', '"<recycle>"', # password
'-R', command)) # command
shell = True
elif action:
commands.extend(('-J', # cron job
models, # import models?
'-S', app+'/'+command, # app name
'-a', '"<recycle>"')) # password
shell = True
else:
commands = command
shell = False
try:
cronlauncher(commands, shell=shell).start()
except Exception, e:
logger.warning(
'WEB2PY CRON: Execution error for %s: %s' \
% (task.get('cmd'), e))
token.release()
| Python |
#### WORK IN PROGRESS... NOT SUPPOSED TO WORK YET
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.scheduler import Scheduler
def demo1(*args,**vars):
print 'you passed args=%s and vars=%s' % (args, vars)
return 'done!'
def demo2():
1/0
scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2))
## run worker nodes with:
cd web2py
python gluon/scheduler.py -u sqlite://storage.sqlite \
-f applications/myapp/databases/ \
-t mytasks.py
(-h for info)
python scheduler.py -h
## schedule jobs using
http://127.0.0.1:8000/scheduler/appadmin/insert/db/scheduler_task
## monitor scheduled jobs
http://127.0.0.1:8000/scheduler/appadmin/select/db?query=db.scheduler_task.id>0
## view completed jobs
http://127.0.0.1:8000/scheduler/appadmin/select/db?query=db.scheduler_run.id>0
## view workers
http://127.0.0.1:8000/scheduler/appadmin/select/db?query=db.scheduler_worker.id>0
## Comments
"""
import os
import time
import multiprocessing
import sys
import cStringIO
import threading
import traceback
import signal
import socket
import datetime
import logging
import optparse
try:
from gluon.contrib.simplejson import loads, dumps
except:
from simplejson import loads, dumps
if 'WEB2PY_PATH' in os.environ:
sys.path.append(os.environ['WEB2PY_PATH'])
else:
os.environ['WEB2PY_PATH'] = os.getcwd()
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET
from gluon.utils import web2py_uuid
QUEUED = 'QUEUED'
ASSIGNED = 'ASSIGNED'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
TIMEOUT = 'TIMEOUT'
STOPPED = 'STOPPED'
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
DISABLED = 'DISABLED'
SECONDS = 1
HEARTBEAT = 3*SECONDS
class Task(object):
def __init__(self,app,function,timeout,args='[]',vars='{}',**kwargs):
logging.debug(' new task allocated: %s.%s' % (app,function))
self.app = app
self.function = function
self.timeout = timeout
self.args = args # json
self.vars = vars # json
self.__dict__.update(kwargs)
def __str__(self):
return '<Task: %s>' % self.function
class TaskReport(object):
def __init__(self,status,result=None,output=None,tb=None):
logging.debug(' new task report: %s' % status)
if tb:
logging.debug(' traceback: %s' % tb)
else:
logging.debug(' result: %s' % result)
self.status = status
self.result = result
self.output = output
self.tb = tb
def __str__(self):
return '<TaskReport: %s>' % self.status
def demo_function(*argv,**kwargs):
""" test function """
for i in range(argv[0]):
print 'click',i
time.sleep(1)
return 'done'
#the two functions below deal with simplejson decoding as unicode, esp for the dict decode
#and subsequent usage as function Keyword arguments unicode variable names won't work!
#borrowed from http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python
def _decode_list(lst):
newlist = []
for i in lst:
if isinstance(i, unicode):
i = i.encode('utf-8')
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
newdict = {}
for k, v in dct.iteritems():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
def executor(queue,task):
""" the background process """
logging.debug(' task started')
stdout, sys.stdout = sys.stdout, cStringIO.StringIO()
try:
if task.app:
os.chdir(os.environ['WEB2PY_PATH'])
from gluon.shell import env
from gluon.dal import BaseAdapter
from gluon import current
level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARN)
_env = env(task.app,import_models=True)
logging.getLogger().setLevel(level)
scheduler = current._scheduler
scheduler_tasks = current._scheduler.tasks
_function = scheduler_tasks[task.function]
globals().update(_env)
args = loads(task.args)
vars = loads(task.vars, object_hook=_decode_dict)
result = dumps(_function(*args,**vars))
else:
### for testing purpose only
result = eval(task.function)(
*loads(task.args, object_hook=_decode_dict),
**loads(task.vars, object_hook=_decode_dict))
stdout, sys.stdout = sys.stdout, stdout
queue.put(TaskReport(COMPLETED, result,stdout.getvalue()))
except BaseException,e:
sys.stdout = stdout
tb = traceback.format_exc()
queue.put(TaskReport(FAILED,tb=tb))
class MetaScheduler(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.process = None # the backround process
self.have_heartbeat = True # set to False to kill
def async(self,task):
"""
starts the background process and returns:
('ok',result,output)
('error',exception,None)
('timeout',None,None)
('terminated',None,None)
"""
queue = multiprocessing.Queue(maxsize=1)
p = multiprocessing.Process(target=executor,args=(queue,task))
self.process = p
logging.debug(' task starting')
p.start()
try:
p.join(task.timeout)
except:
p.terminate()
p.join()
self.have_heartbeat = False
logging.debug(' task stopped')
return TaskReport(STOPPED)
if p.is_alive():
p.terminate()
p.join()
logging.debug(' task timeout')
return TaskReport(TIMEOUT)
elif queue.empty():
self.have_heartbeat = False
logging.debug(' task stopped')
return TaskReport(STOPPED)
else:
logging.debug(' task completed or failed')
return queue.get()
def die(self):
logging.info('die!')
self.have_heartbeat = False
self.terminate_process()
def terminate_process(self):
try:
self.process.terminate()
except:
pass # no process to terminate
def run(self):
""" the thread that sends heartbeat """
counter = 0
while self.have_heartbeat:
self.send_heartbeat(counter)
counter += 1
def start_heartbeats(self):
self.start()
def send_heartbeat(self,counter):
print 'thum'
time.sleep(1)
def pop_task(self):
return Task(
app = None,
function = 'demo_function',
timeout = 7,
args = '[2]',
vars = '{}')
def report_task(self,task,task_report):
print 'reporting task'
pass
def sleep(self):
pass
def loop(self):
try:
self.start_heartbeats()
while True and self.have_heartbeat:
logging.debug('looping...')
task = self.pop_task()
if task:
self.report_task(task,self.async(task))
else:
logging.debug('sleeping...')
self.sleep()
except KeyboardInterrupt:
self.die()
TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
WORKER_STATUS = (ACTIVE,INACTIVE,DISABLED)
class TYPE(object):
"""
validator that check whether field is valid json and validate its type
"""
def __init__(self,myclass=list,parse=False):
self.myclass = myclass
self.parse=parse
def __call__(self,value):
from gluon import current
try:
obj = loads(value)
except:
return (value,current.T('invalid json'))
else:
if isinstance(obj,self.myclass):
if self.parse:
return (obj,None)
else:
return (value,None)
else:
return (value,current.T('Not of type: %s') % self.myclass)
class Scheduler(MetaScheduler):
def __init__(self,db,tasks={},migrate=True,
worker_name=None,group_names=None,heartbeat=HEARTBEAT):
MetaScheduler.__init__(self)
self.db = db
self.db_thread = None
self.tasks = tasks
self.group_names = group_names or ['main']
self.heartbeat = heartbeat
self.worker_name = worker_name or socket.gethostname()+'#'+str(web2py_uuid())
from gluon import current
current._scheduler = self
self.define_tables(db,migrate=migrate)
def define_tables(self,db,migrate):
from gluon import current
logging.debug('defining tables (migrate=%s)' % migrate)
now = datetime.datetime.now()
db.define_table(
'scheduler_task',
Field('application_name',requires=IS_NOT_EMPTY(),
default=None,writable=False),
Field('task_name',requires=IS_NOT_EMPTY()),
Field('group_name',default='main',writable=False),
Field('status',requires=IS_IN_SET(TASK_STATUS),
default=QUEUED,writable=False),
Field('function_name',
requires=IS_IN_SET(sorted(self.tasks.keys()))),
Field('args','text',default='[]',requires=TYPE(list)),
Field('vars','text',default='{}',requires=TYPE(dict)),
Field('enabled','boolean',default=True),
Field('start_time','datetime',default=now),
Field('next_run_time','datetime',default=now),
Field('stop_time','datetime',default=now+datetime.timedelta(days=1)),
Field('repeats','integer',default=1,comment="0=unlimted"),
Field('period','integer',default=60,comment='seconds'),
Field('timeout','integer',default=60,comment='seconds'),
Field('times_run','integer',default=0,writable=False),
Field('last_run_time','datetime',writable=False,readable=False),
Field('assigned_worker_name',default='',writable=False),
migrate=migrate,format='%(task_name)s')
if hasattr(current,'request'):
db.scheduler_task.application_name.default=current.request.application
db.define_table(
'scheduler_run',
Field('scheduler_task','reference scheduler_task'),
Field('status',requires=IS_IN_SET(RUN_STATUS)),
Field('start_time','datetime'),
Field('stop_time','datetime'),
Field('output','text'),
Field('result','text'),
Field('traceback','text'),
Field('worker_name',default=self.worker_name),
migrate=migrate)
db.define_table(
'scheduler_worker',
Field('worker_name'),
Field('first_heartbeat','datetime'),
Field('last_heartbeat','datetime'),
Field('status',requires=IS_IN_SET(WORKER_STATUS)),
migrate=migrate)
db.commit()
def loop(self,worker_name=None):
MetaScheduler.loop(self)
def pop_task(self):
now = datetime.datetime.now()
db, ts = self.db, self.db.scheduler_task
try:
logging.debug(' grabbing all queued tasks')
all_available = db(ts.status.belongs((QUEUED,RUNNING)))\
((ts.times_run<ts.repeats)|(ts.repeats==0))\
(ts.start_time<=now)\
(ts.stop_time>now)\
(ts.next_run_time<=now)\
(ts.enabled==True)\
(ts.group_name.belongs(self.group_names))\
(ts.assigned_worker_name.belongs((None,'',self.worker_name))) #None?
number_grabbed = all_available.update(
assigned_worker_name=self.worker_name,status=ASSIGNED)
db.commit()
except:
number_grabbed = None
db.rollback()
if number_grabbed:
logging.debug(' grabbed %s tasks' % number_grabbed)
grabbed = db(ts.assigned_worker_name==self.worker_name)\
(ts.status==ASSIGNED)
task = grabbed.select(limitby=(0,1), orderby=ts.next_run_time).first()
logging.debug(' releasing all but one (running)')
if task:
task.update_record(status=RUNNING,last_run_time=now)
grabbed.update(assigned_worker_name='',status=QUEUED)
db.commit()
else:
return None
next_run_time = task.last_run_time + datetime.timedelta(seconds=task.period)
times_run = task.times_run + 1
if times_run < task.repeats or task.repeats==0:
run_again = True
else:
run_again = False
logging.debug(' new scheduler_run record')
while True:
try:
run_id = db.scheduler_run.insert(
scheduler_task = task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
db.rollback
logging.info('new task %(id)s "%(task_name)s" %(application_name)s.%(function_name)s' % task)
return Task(
app = task.application_name,
function = task.function_name,
timeout = task.timeout,
args = task.args, #in json
vars = task.vars, #in json
task_id = task.id,
run_id = run_id,
run_again = run_again,
next_run_time=next_run_time,
times_run = times_run)
def report_task(self,task,task_report):
logging.debug(' recording task report in db (%s)' % task_report.status)
db = self.db
db(db.scheduler_run.id==task.run_id).update(
status = task_report.status,
stop_time = datetime.datetime.now(),
result = task_report.result,
output = task_report.output,
traceback = task_report.tb)
if task_report.status == COMPLETED:
d = dict(status = task.run_again and QUEUED or COMPLETED,
next_run_time = task.next_run_time,
times_run = task.times_run,
assigned_worker_name = '')
else:
d = dict(
assigned_worker_name = '',
status = {'FAILED':'FAILED',
'TIMEOUT':'TIMEOUT',
'STOPPED':'QUEUED'}[task_report.status])
db(db.scheduler_task.id==task.task_id)\
(db.scheduler_task.status==RUNNING).update(**d)
db.commit()
logging.info('task completed (%s)' % task_report.status)
def send_heartbeat(self,counter):
if not self.db_thread:
logging.debug('thread building own DAL object')
self.db_thread = DAL(self.db._uri,folder = self.db._adapter.folder)
self.define_tables(self.db_thread,migrate=False)
try:
db = self.db_thread
sw, st = db.scheduler_worker, db.scheduler_task
now = datetime.datetime.now()
expiration = now-datetime.timedelta(seconds=self.heartbeat*3)
# record heartbeat
logging.debug('........recording heartbeat')
if not db(sw.worker_name==self.worker_name)\
.update(last_heartbeat = now, status = ACTIVE):
sw.insert(status = ACTIVE,worker_name = self.worker_name,
first_heartbeat = now,last_heartbeat = now)
if counter % 10 == 0:
# deallocate jobs assigned to inactive workers and requeue them
logging.debug(' freeing workers that have not sent heartbeat')
inactive_workers = db(sw.last_heartbeat<expiration)
db(st.assigned_worker_name.belongs(
inactive_workers._select(sw.worker_name)))\
(st.status.belongs((RUNNING,ASSIGNED,QUEUED)))\
.update(assigned_worker_name='',status=QUEUED)
inactive_workers.delete()
db.commit()
except:
db.rollback()
time.sleep(self.heartbeat)
def sleep(self):
time.sleep(self.heartbeat) # should only sleep until next available task
def main():
"""
allows to run worker without python web2py.py .... by simply python this.py
"""
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat",dest="heartbeat", default = 10,
help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level",dest="logger_level",
default = 'INFO',
help="level of logging (DEBUG, INFO, WARNING, ERROR)")
parser.add_option(
"-g", "--group_names",dest="group_names",
default = 'main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder",dest="db_folder",
default = '/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri",dest="db_uri",
default = 'sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks",dest="tasks",default=None,
help="file containing task files, must define" + \
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print USAGE
if options.tasks:
path,filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print 'importing tasks...'
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print 'tasks found: '+', '.join(tasks.keys())
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print 'groups for this worker: '+', '.join(group_names)
print 'connecting to database in folder: ' + options.db_folder or './'
print 'using URI: '+options.db_uri
db = DAL(options.db_uri,folder=options.db_folder)
print 'instantiating scheduler...'
scheduler=Scheduler(db = db,
worker_name = options.worker_name,
tasks = tasks,
migrate = True,
group_names = group_names,
heartbeat = options.heartbeat)
print 'starting main worker loop...'
scheduler.loop()
if __name__=='__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework (Copyrighted, 2007-2011).
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Author: Thadeus Burgess
Contributors:
- Thank you to Massimo Di Pierro for creating the original gluon/template.py
- Thank you to Jonathan Lundell for extensively testing the regex on Jython.
- Thank you to Limodou (creater of uliweb) who inspired the block-element support for web2py.
"""
import os
import re
import cgi
import cStringIO
import logging
try:
from restricted import RestrictedError
except:
def RestrictedError(a,b,c):
logging.error(str(a)+':'+str(b)+':'+str(c))
return RuntimeError
class Node(object):
"""
Basic Container Object
"""
def __init__(self, value = None, pre_extend = False):
self.value = value
self.pre_extend = pre_extend
def __str__(self):
return str(self.value)
class SuperNode(Node):
def __init__(self, name = '', pre_extend = False):
self.name = name
self.value = None
self.pre_extend = pre_extend
def __str__(self):
if self.value:
return str(self.value)
else:
raise SyntaxError("Undefined parent block ``%s``. \n" % self.name + \
"You must define a block before referencing it.\nMake sure you have not left out an ``{{end}}`` tag." )
def __repr__(self):
return "%s->%s" % (self.name, self.value)
class BlockNode(Node):
"""
Block Container.
This Node can contain other Nodes and will render in a hierarchical order
of when nodes were added.
ie::
{{ block test }}
This is default block test
{{ end }}
"""
def __init__(self, name = '', pre_extend = False, delimiters = ('{{','}}')):
"""
name - Name of this Node.
"""
self.nodes = []
self.name = name
self.pre_extend = pre_extend
self.left, self.right = delimiters
def __repr__(self):
lines = ['%sblock %s%s' % (self.left,self.name,self.right)]
for node in self.nodes:
lines.append(str(node))
lines.append('%send%s' % (self.left, self.right))
return ''.join(lines)
def __str__(self):
"""
Get this BlockNodes content, not including child Nodes
"""
lines = []
for node in self.nodes:
if not isinstance(node, BlockNode):
lines.append(str(node))
return ''.join(lines)
def append(self, node):
"""
Add an element to the nodes.
Keyword Arguments
- node -- Node object or string to append.
"""
if isinstance(node, str) or isinstance(node, Node):
self.nodes.append(node)
else:
raise TypeError("Invalid type; must be instance of ``str`` or ``BlockNode``. %s" % node)
def extend(self, other):
"""
Extend the list of nodes with another BlockNode class.
Keyword Arguments
- other -- BlockNode or Content object to extend from.
"""
if isinstance(other, BlockNode):
self.nodes.extend(other.nodes)
else:
raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other)
def output(self, blocks):
"""
Merges all nodes into a single string.
blocks -- Dictionary of blocks that are extending
from this template.
"""
lines = []
# Get each of our nodes
for node in self.nodes:
# If we have a block level node.
if isinstance(node, BlockNode):
# If we can override this block.
if node.name in blocks:
# Override block from vars.
lines.append(blocks[node.name].output(blocks))
# Else we take the default
else:
lines.append(node.output(blocks))
# Else its just a string
else:
lines.append(str(node))
# Now combine all of our lines together.
return ''.join(lines)
class Content(BlockNode):
"""
Parent Container -- Used as the root level BlockNode.
Contains functions that operate as such.
"""
def __init__(self, name = "ContentBlock", pre_extend = False):
"""
Keyword Arguments
name -- Unique name for this BlockNode
"""
self.name = name
self.nodes = []
self.blocks = {}
self.pre_extend = pre_extend
def __str__(self):
lines = []
# For each of our nodes
for node in self.nodes:
# If it is a block node.
if isinstance(node, BlockNode):
# And the node has a name that corresponds with a block in us
if node.name in self.blocks:
# Use the overriding output.
lines.append(self.blocks[node.name].output(self.blocks))
else:
# Otherwise we just use the nodes output.
lines.append(node.output(self.blocks))
else:
# It is just a string, so include it.
lines.append(str(node))
# Merge our list together.
return ''.join(lines)
def _insert(self, other, index = 0):
"""
Inserts object at index.
"""
if isinstance(other, str) or isinstance(other, Node):
self.nodes.insert(index, other)
else:
raise TypeError("Invalid type, must be instance of ``str`` or ``Node``.")
def insert(self, other, index = 0):
"""
Inserts object at index.
You may pass a list of objects and have them inserted.
"""
if isinstance(other, (list, tuple)):
# Must reverse so the order stays the same.
other.reverse()
for item in other:
self._insert(item, index)
else:
self._insert(other, index)
def append(self, node):
"""
Adds a node to list. If it is a BlockNode then we assign a block for it.
"""
if isinstance(node, str) or isinstance(node, Node):
self.nodes.append(node)
if isinstance(node, BlockNode):
self.blocks[node.name] = node
else:
raise TypeError("Invalid type, must be instance of ``str`` or ``BlockNode``. %s" % node)
def extend(self, other):
"""
Extends the objects list of nodes with another objects nodes
"""
if isinstance(other, BlockNode):
self.nodes.extend(other.nodes)
self.blocks.update(other.blocks)
else:
raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other)
def clear_content(self):
self.nodes = []
class TemplateParser(object):
default_delimiters = ('{{','}}')
r_tag = re.compile(r'(\{\{.*?\}\})', re.DOTALL)
r_multiline = re.compile(r'(""".*?""")|(\'\'\'.*?\'\'\')', re.DOTALL)
# These are used for re-indentation.
# Indent + 1
re_block = re.compile('^(elif |else:|except:|except |finally:).*$',
re.DOTALL)
# Indent - 1
re_unblock = re.compile('^(return|continue|break|raise)( .*)?$', re.DOTALL)
# Indent - 1
re_pass = re.compile('^pass( .*)?$', re.DOTALL)
def __init__(self, text,
name = "ParserContainer",
context = dict(),
path = 'views/',
writer = 'response.write',
lexers = {},
delimiters = ('{{','}}'),
_super_nodes = [],
):
"""
text -- text to parse
context -- context to parse in
path -- folder path to templates
writer -- string of writer class to use
lexers -- dict of custom lexers to use.
delimiters -- for example ('{{','}}')
_super_nodes -- a list of nodes to check for inclusion
this should only be set by "self.extend"
It contains a list of SuperNodes from a child
template that need to be handled.
"""
# Keep a root level name.
self.name = name
# Raw text to start parsing.
self.text = text
# Writer to use (refer to the default for an example).
# This will end up as
# "%s(%s, escape=False)" % (self.writer, value)
self.writer = writer
# Dictionary of custom name lexers to use.
if isinstance(lexers, dict):
self.lexers = lexers
else:
self.lexers = {}
# Path of templates
self.path = path
# Context for templates.
self.context = context
# allow optional alternative delimiters
self.delimiters = delimiters
if delimiters != self.default_delimiters:
escaped_delimiters = (re.escape(delimiters[0]),re.escape(delimiters[1]))
self.r_tag = re.compile(r'(%s.*?%s)' % escaped_delimiters, re.DOTALL)
elif context.has_key('response') and hasattr(context['response'],'delimiters'):
if context['response'].delimiters != self.default_delimiters:
escaped_delimiters = (re.escape(context['response'].delimiters[0]),
re.escape(context['response'].delimiters[1]))
self.r_tag = re.compile(r'(%s.*?%s)' % escaped_delimiters,re.DOTALL)
# Create a root level Content that everything will go into.
self.content = Content(name=name)
# Stack will hold our current stack of nodes.
# As we descend into a node, it will be added to the stack
# And when we leave, it will be removed from the stack.
# self.content should stay on the stack at all times.
self.stack = [self.content]
# This variable will hold a reference to every super block
# that we come across in this template.
self.super_nodes = []
# This variable will hold a reference to the child
# super nodes that need handling.
self.child_super_nodes = _super_nodes
# This variable will hold a reference to every block
# that we come across in this template
self.blocks = {}
# Begin parsing.
self.parse(text)
def to_string(self):
"""
Return the parsed template with correct indentation.
Used to make it easier to port to python3.
"""
return self.reindent(str(self.content))
def __str__(self):
"Make sure str works exactly the same as python 3"
return self.to_string()
def __unicode__(self):
"Make sure str works exactly the same as python 3"
return self.to_string()
def reindent(self, text):
"""
Reindents a string of unindented python code.
"""
# Get each of our lines into an array.
lines = text.split('\n')
# Our new lines
new_lines = []
# Keeps track of how many indents we have.
# Used for when we need to drop a level of indentation
# only to reindent on the next line.
credit = 0
# Current indentation
k = 0
#################
# THINGS TO KNOW
#################
# k += 1 means indent
# k -= 1 means unindent
# credit = 1 means unindent on the next line.
for raw_line in lines:
line = raw_line.strip()
# ignore empty lines
if not line:
continue
# If we have a line that contains python code that
# should be unindented for this line of code.
# and then reindented for the next line.
if TemplateParser.re_block.match(line):
k = k + credit - 1
# We obviously can't have a negative indentation
k = max(k,0)
# Add the indentation!
new_lines.append(' '*(4*k)+line)
# Bank account back to 0 again :(
credit = 0
# If we are a pass block, we obviously de-dent.
if TemplateParser.re_pass.match(line):
k -= 1
# If we are any of the following, de-dent.
# However, we should stay on the same level
# But the line right after us will be de-dented.
# So we add one credit to keep us at the level
# while moving back one indentation level.
if TemplateParser.re_unblock.match(line):
credit = 1
k -= 1
# If we are an if statement, a try, or a semi-colon we
# probably need to indent the next line.
if line.endswith(':') and not line.startswith('#'):
k += 1
# This must come before so that we can raise an error with the
# right content.
new_text = '\n'.join(new_lines)
if k > 0:
self._raise_error('missing "pass" in view', new_text)
elif k < 0:
self._raise_error('too many "pass" in view', new_text)
return new_text
def _raise_error(self, message='', text=None):
"""
Raise an error using itself as the filename and textual content.
"""
raise RestrictedError(self.name, text or self.text, message)
def _get_file_text(self, filename):
"""
Attempt to open ``filename`` and retrieve its text.
This will use self.path to search for the file.
"""
# If they didn't specify a filename, how can we find one!
if not filename.strip():
self._raise_error('Invalid template filename')
# Get the filename; filename looks like ``"template.html"``.
# We need to eval to remove the quotes and get the string type.
filename = eval(filename, self.context)
# Get the path of the file on the system.
filepath = self.path and os.path.join(self.path, filename) or filename
# try to read the text.
try:
fileobj = open(filepath, 'rb')
text = fileobj.read()
fileobj.close()
except IOError:
self._raise_error('Unable to open included view file: ' + filepath)
return text
def include(self, content, filename):
"""
Include ``filename`` here.
"""
text = self._get_file_text(filename)
t = TemplateParser(text,
name = filename,
context = self.context,
path = self.path,
writer = self.writer,
delimiters = self.delimiters)
content.append(t.content)
def extend(self, filename):
"""
Extend ``filename``. Anything not declared in a block defined by the
parent will be placed in the parent templates ``{{include}}`` block.
"""
text = self._get_file_text(filename)
# Create out nodes list to send to the parent
super_nodes = []
# We want to include any non-handled nodes.
super_nodes.extend(self.child_super_nodes)
# And our nodes as well.
super_nodes.extend(self.super_nodes)
t = TemplateParser(text,
name = filename,
context = self.context,
path = self.path,
writer = self.writer,
delimiters = self.delimiters,
_super_nodes = super_nodes)
# Make a temporary buffer that is unique for parent
# template.
buf = BlockNode(name='__include__' + filename, delimiters=self.delimiters)
pre = []
# Iterate through each of our nodes
for node in self.content.nodes:
# If a node is a block
if isinstance(node, BlockNode):
# That happens to be in the parent template
if node.name in t.content.blocks:
# Do not include it
continue
if isinstance(node, Node):
# Or if the node was before the extension
# we should not include it
if node.pre_extend:
pre.append(node)
continue
# Otherwise, it should go int the
# Parent templates {{include}} section.
buf.append(node)
else:
buf.append(node)
# Clear our current nodes. We will be replacing this with
# the parent nodes.
self.content.nodes = []
# Set our include, unique by filename
t.content.blocks['__include__' + filename] = buf
# Make sure our pre_extended nodes go first
t.content.insert(pre)
# Then we extend our blocks
t.content.extend(self.content)
# Work off the parent node.
self.content = t.content
def parse(self, text):
# Basically, r_tag.split will split the text into
# an array containing, 'non-tag', 'tag', 'non-tag', 'tag'
# so if we alternate this variable, we know
# what to look for. This is alternate to
# line.startswith("{{")
in_tag = False
extend = None
pre_extend = True
# Use a list to store everything in
# This is because later the code will "look ahead"
# for missing strings or brackets.
ij = self.r_tag.split(text)
# j = current index
# i = current item
for j in range(len(ij)):
i = ij[j]
if i:
if len(self.stack) == 0:
self._raise_error('The "end" tag is unmatched, please check if you have a starting "block" tag')
# Our current element in the stack.
top = self.stack[-1]
if in_tag:
line = i
# If we are missing any strings!!!!
# This usually happens with the following example
# template code
#
# {{a = '}}'}}
# or
# {{a = '}}blahblah{{'}}
#
# This will fix these
# This is commented out because the current template
# system has this same limitation. Since this has a
# performance hit on larger templates, I do not recommend
# using this code on production systems. This is still here
# for "i told you it *can* be fixed" purposes.
#
#
# if line.count("'") % 2 != 0 or line.count('"') % 2 != 0:
#
# # Look ahead
# la = 1
# nextline = ij[j+la]
#
# # As long as we have not found our ending
# # brackets keep going
# while '}}' not in nextline:
# la += 1
# nextline += ij[j+la]
# # clear this line, so we
# # don't attempt to parse it
# # this is why there is an "if i"
# # around line 530
# ij[j+la] = ''
#
# # retrieve our index.
# index = nextline.index('}}')
#
# # Everything before the new brackets
# before = nextline[:index+2]
#
# # Everything after
# after = nextline[index+2:]
#
# # Make the next line everything after
# # so it parses correctly, this *should* be
# # all html
# ij[j+1] = after
#
# # Add everything before to the current line
# line += before
# Get rid of '{{' and '}}'
line = line[2:-2].strip()
# This is bad juju, but let's do it anyway
if not line:
continue
# We do not want to replace the newlines in code,
# only in block comments.
def remove_newline(re_val):
# Take the entire match and replace newlines with
# escaped newlines.
return re_val.group(0).replace('\n', '\\n')
# Perform block comment escaping.
# This performs escaping ON anything
# in between """ and """
line = re.sub(TemplateParser.r_multiline,
remove_newline,
line)
if line.startswith('='):
# IE: {{=response.title}}
name, value = '=', line[1:].strip()
else:
v = line.split(' ', 1)
if len(v) == 1:
# Example
# {{ include }}
# {{ end }}
name = v[0]
value = ''
else:
# Example
# {{ block pie }}
# {{ include "layout.html" }}
# {{ for i in range(10): }}
name = v[0]
value = v[1]
# This will replace newlines in block comments
# with the newline character. This is so that they
# retain their formatting, but squish down to one
# line in the rendered template.
# First check if we have any custom lexers
if name in self.lexers:
# Pass the information to the lexer
# and allow it to inject in the environment
# You can define custom names such as
# '{{<<variable}}' which could potentially
# write unescaped version of the variable.
self.lexers[name](parser = self,
value = value,
top = top,
stack = self.stack,)
elif name == '=':
# So we have a variable to insert into
# the template
buf = "\n%s(%s)" % (self.writer, value)
top.append(Node(buf, pre_extend = pre_extend))
elif name == 'block' and not value.startswith('='):
# Make a new node with name.
node = BlockNode(name = value.strip(),
pre_extend = pre_extend,
delimiters = self.delimiters)
# Append this node to our active node
top.append(node)
# Make sure to add the node to the stack.
# so anything after this gets added
# to this node. This allows us to
# "nest" nodes.
self.stack.append(node)
elif name == 'end' and not value.startswith('='):
# We are done with this node.
# Save an instance of it
self.blocks[top.name] = top
# Pop it.
self.stack.pop()
elif name == 'super' and not value.startswith('='):
# Get our correct target name
# If they just called {{super}} without a name
# attempt to assume the top blocks name.
if value:
target_node = value
else:
target_node = top.name
# Create a SuperNode instance
node = SuperNode(name = target_node,
pre_extend = pre_extend)
# Add this to our list to be taken care of
self.super_nodes.append(node)
# And put in in the tree
top.append(node)
elif name == 'include' and not value.startswith('='):
# If we know the target file to include
if value:
self.include(top, value)
# Otherwise, make a temporary include node
# That the child node will know to hook into.
else:
include_node = BlockNode(name = '__include__' + self.name,
pre_extend = pre_extend,
delimiters = self.delimiters)
top.append(include_node)
elif name == 'extend' and not value.startswith('='):
# We need to extend the following
# template.
extend = value
pre_extend = False
else:
# If we don't know where it belongs
# we just add it anyways without formatting.
if line and in_tag:
# Split on the newlines >.<
tokens = line.split('\n')
# We need to look for any instances of
# for i in range(10):
# = i
# pass
# So we can properly put a response.write() in place.
continuation = False
len_parsed = 0
for k in range(len(tokens)):
tokens[k] = tokens[k].strip()
len_parsed += len(tokens[k])
if tokens[k].startswith('='):
if tokens[k].endswith('\\'):
continuation = True
tokens[k] = "\n%s(%s" % (self.writer, tokens[k][1:].strip())
else:
tokens[k] = "\n%s(%s)" % (self.writer, tokens[k][1:].strip())
elif continuation:
tokens[k] += ')'
continuation = False
buf = "\n%s" % '\n'.join(tokens)
top.append(Node(buf, pre_extend = pre_extend))
else:
# It is HTML so just include it.
buf = "\n%s(%r, escape=False)" % (self.writer, i)
top.append(Node(buf, pre_extend = pre_extend))
# Remember: tag, not tag, tag, not tag
in_tag = not in_tag
# Make a list of items to remove from child
to_rm = []
# Go through each of the children nodes
for node in self.child_super_nodes:
# If we declared a block that this node wants to include
if node.name in self.blocks:
# Go ahead and include it!
node.value = self.blocks[node.name]
# Since we processed this child, we don't need to
# pass it along to the parent
to_rm.append(node)
# Remove some of the processed nodes
for node in to_rm:
# Since this is a pointer, it works beautifully.
# Sometimes I miss C-Style pointers... I want my asterisk...
self.child_super_nodes.remove(node)
# If we need to extend a template.
if extend:
self.extend(extend)
# We need this for integration with gluon
def parse_template(filename,
path = 'views/',
context = dict(),
lexers = {},
delimiters = ('{{','}}')
):
"""
filename can be a view filename in the views folder or an input stream
path is the path of a views folder
context is a dictionary of symbols used to render the template
"""
# First, if we have a str try to open the file
if isinstance(filename, str):
try:
fp = open(os.path.join(path, filename), 'rb')
text = fp.read()
fp.close()
except IOError:
raise RestrictedError(filename, '', 'Unable to find the file')
else:
text = filename.read()
# Use the file contents to get a parsed template and return it.
return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters))
def get_parsed(text):
"""
Returns the indented python code of text. Useful for unit testing.
"""
return str(TemplateParser(text))
# And this is a generic render function.
# Here for integration with gluon.
def render(content = "hello world",
stream = None,
filename = None,
path = None,
context = {},
lexers = {},
delimiters = ('{{','}}')
):
"""
>>> render()
'hello world'
>>> render(content='abc')
'abc'
>>> render(content='abc\\'')
"abc'"
>>> render(content='a"\\'bc')
'a"\\'bc'
>>> render(content='a\\nbc')
'a\\nbc'
>>> render(content='a"bcd"e')
'a"bcd"e'
>>> render(content="'''a\\nc'''")
"'''a\\nc'''"
>>> render(content="'''a\\'c'''")
"'''a\'c'''"
>>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5))
'0<br />1<br />2<br />3<br />4<br />'
>>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}'))
'0<br />1<br />2<br />3<br />4<br />'
>>> render(content="{{='''hello\\nworld'''}}")
'hello\\nworld'
>>> render(content='{{for i in range(3):\\n=i\\npass}}')
'012'
"""
# Here to avoid circular Imports
try:
from globals import Response
except:
# Working standalone. Build a mock Response object.
class Response():
def __init__(self):
self.body = cStringIO.StringIO()
def write(self, data, escape=True):
if not escape:
self.body.write(str(data))
elif hasattr(data,'xml') and callable(data.xml):
self.body.write(data.xml())
else:
# make it a string
if not isinstance(data, (str, unicode)):
data = str(data)
elif isinstance(data, unicode):
data = data.encode('utf8', 'xmlcharrefreplace')
data = cgi.escape(data, True).replace("'","'")
self.body.write(data)
# A little helper to avoid escaping.
class NOESCAPE():
def __init__(self, text):
self.text = text
def xml(self):
return self.text
# Add it to the context so we can use it.
context['NOESCAPE'] = NOESCAPE
# If we don't have anything to render, why bother?
if not content and not stream and not filename:
raise SyntaxError, "Must specify a stream or filename or content"
# Here for legacy purposes, probably can be reduced to something more simple.
close_stream = False
if not stream:
if filename:
stream = open(filename, 'rb')
close_stream = True
elif content:
stream = cStringIO.StringIO(content)
# Get a response class.
context['response'] = Response()
# Execute the template.
code = str(TemplateParser(stream.read(), context=context, path=path, lexers=lexers, delimiters=delimiters))
try:
exec(code) in context
except Exception:
# for i,line in enumerate(code.split('\n')): print i,line
raise
if close_stream:
stream.close()
# Returned the rendered content.
return context['response'].body.getvalue()
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
import codecs, encodings
"""Caller will hand this library a buffer and ask it to either convert
it or auto-detect the type.
Based on http://code.activestate.com/recipes/52257/
Licensed under the PSF License
"""
# None represents a potentially variable byte. "##" in the XML spec...
autodetect_dict={ # bytepattern : ("name",
(0x00, 0x00, 0xFE, 0xFF) : ("ucs4_be"),
(0xFF, 0xFE, 0x00, 0x00) : ("ucs4_le"),
(0xFE, 0xFF, None, None) : ("utf_16_be"),
(0xFF, 0xFE, None, None) : ("utf_16_le"),
(0x00, 0x3C, 0x00, 0x3F) : ("utf_16_be"),
(0x3C, 0x00, 0x3F, 0x00) : ("utf_16_le"),
(0x3C, 0x3F, 0x78, 0x6D): ("utf_8"),
(0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC")
}
def autoDetectXMLEncoding(buffer):
""" buffer -> encoding_name
The buffer should be at least 4 bytes long.
Returns None if encoding cannot be detected.
Note that encoding_name might not have an installed
decoder (e.g. EBCDIC)
"""
# a more efficient implementation would not decode the whole
# buffer at once but otherwise we'd have to decode a character at
# a time looking for the quote character...that's a pain
encoding = "utf_8" # according to the XML spec, this is the default
# this code successively tries to refine the default
# whenever it fails to refine, it falls back to
# the last place encoding was set.
if len(buffer)>=4:
bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4]))
enc_info = autodetect_dict.get(bytes, None)
if not enc_info: # try autodetection again removing potentially
# variable bytes
bytes = (byte1, byte2, None, None)
enc_info = autodetect_dict.get(bytes)
else:
enc_info = None
if enc_info:
encoding = enc_info # we've got a guess... these are
#the new defaults
# try to find a more precise encoding using xml declaration
secret_decoder_ring = codecs.lookup(encoding)[1]
(decoded,length) = secret_decoder_ring(buffer)
first_line = decoded.split("\n")[0]
if first_line and first_line.startswith(u"<?xml"):
encoding_pos = first_line.find(u"encoding")
if encoding_pos!=-1:
# look for double quote
quote_pos=first_line.find('"', encoding_pos)
if quote_pos==-1: # look for single quote
quote_pos=first_line.find("'", encoding_pos)
if quote_pos>-1:
quote_char,rest=(first_line[quote_pos],
first_line[quote_pos+1:])
encoding=rest[:rest.find(quote_char)]
return encoding
def decoder(buffer):
encoding = autoDetectXMLEncoding(buffer)
return buffer.decode(encoding).encode('utf8')
| Python |
# encoding utf-8
__author__ = "Thadeus Burgess <thadeusb@thadeusb.com>"
# we classify as "non-reserved" those key words that are explicitly known
# to the parser but are allowed as column or table names. Some key words
# that are otherwise non-reserved cannot be used as function or data type n
# ames and are in the nonreserved list. (Most of these words represent
# built-in functions or data types with special syntax. The function
# or type is still available but it cannot be redefined by the user.)
# Labeled "reserved" are those tokens that are not allowed as column or
# table names. Some reserved key words are allowable as names for
# functions or data typesself.
# Note at the bottom of the list is a dict containing references to the
# tuples, and also if you add a list don't forget to remove its default
# set of COMMON.
# Keywords that are adapter specific. Such as a list of "postgresql"
# or "mysql" keywords
# These are keywords that are common to all SQL dialects, and should
# never be used as a table or column. Even if you use one of these
# the cursor will throw an OperationalError for the SQL syntax.
COMMON = set((
'SELECT',
'INSERT',
'DELETE',
'UPDATE',
'DROP',
'CREATE',
'ALTER',
'WHERE',
'FROM',
'INNER',
'JOIN',
'AND',
'OR',
'LIKE',
'ON',
'IN',
'SET',
'BY',
'GROUP',
'ORDER',
'LEFT',
'OUTER',
'IF',
'END',
'THEN',
'LOOP',
'AS',
'ELSE',
'FOR',
'CASE',
'WHEN',
'MIN',
'MAX',
'DISTINCT',
))
POSTGRESQL = set((
'FALSE',
'TRUE',
'ALL',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASYMMETRIC',
'AUTHORIZATION',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'CASE',
'CAST',
'CHAR',
'CHARACTER',
'CHECK',
'COALESCE',
'COLLATE',
'COLUMN',
'CONSTRAINT',
'CREATE',
'CROSS',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'DEC',
'DECIMAL',
'DEFAULT',
'DEFERRABLE',
'DESC',
'DISTINCT',
'DO',
'ELSE',
'END',
'EXCEPT',
'EXISTS',
'EXTRACT',
'FETCH',
'FLOAT',
'FOR',
'FOREIGN',
'FREEZE',
'FROM',
'FULL',
'GRANT',
'GREATEST',
'GROUP',
'HAVING',
'ILIKE',
'IN',
'INITIALLY',
'INNER',
'INOUT',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'IS',
'ISNULL',
'JOIN',
'LEADING',
'LEAST',
'LEFT',
'LIKE',
'LIMIT',
'LOCALTIME',
'LOCALTIMESTAMP',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEW',
'NONE',
'NOT',
'NOTNULL',
'NULL',
'NULLIF',
'NUMERIC',
'OFF',
'OFFSET',
'OLD',
'ON',
'ONLY',
'OR',
'ORDER',
'OUT',
'OUTER',
'OVERLAPS',
'OVERLAY',
'PLACING',
'POSITION',
'PRECISION',
'PRIMARY',
'REAL',
'REFERENCES',
'RETURNING',
'RIGHT',
'ROW',
'SELECT',
'SESSION_USER',
'SETOF',
'SIMILAR',
'SMALLINT',
'SOME',
'SUBSTRING',
'SYMMETRIC',
'TABLE',
'THEN',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TREAT',
'TRIM',
'UNION',
'UNIQUE',
'USER',
'USING',
'VALUES',
'VARCHAR',
'VARIADIC',
'VERBOSE',
'WHEN',
'WHERE',
'WITH',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLFOREST',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
))
POSTGRESQL_NONRESERVED = set((
'A',
'ABORT',
'ABS',
'ABSENT',
'ABSOLUTE',
'ACCESS',
'ACCORDING',
'ACTION',
'ADA',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALIAS',
'ALLOCATE',
'ALSO',
'ALTER',
'ALWAYS',
'ARE',
'ARRAY_AGG',
'ASENSITIVE',
'ASSERTION',
'ASSIGNMENT',
'AT',
'ATOMIC',
'ATTRIBUTE',
'ATTRIBUTES',
'AVG',
'BACKWARD',
'BASE64',
'BEFORE',
'BEGIN',
'BERNOULLI',
'BIT_LENGTH',
'BITVAR',
'BLOB',
'BOM',
'BREADTH',
'BY',
'C',
'CACHE',
'CALL',
'CALLED',
'CARDINALITY',
'CASCADE',
'CASCADED',
'CATALOG',
'CATALOG_NAME',
'CEIL',
'CEILING',
'CHAIN',
'CHAR_LENGTH',
'CHARACTER_LENGTH',
'CHARACTER_SET_CATALOG',
'CHARACTER_SET_NAME',
'CHARACTER_SET_SCHEMA',
'CHARACTERISTICS',
'CHARACTERS',
'CHECKED',
'CHECKPOINT',
'CLASS',
'CLASS_ORIGIN',
'CLOB',
'CLOSE',
'CLUSTER',
'COBOL',
'COLLATION',
'COLLATION_CATALOG',
'COLLATION_NAME',
'COLLATION_SCHEMA',
'COLLECT',
'COLUMN_NAME',
'COLUMNS',
'COMMAND_FUNCTION',
'COMMAND_FUNCTION_CODE',
'COMMENT',
'COMMIT',
'COMMITTED',
'COMPLETION',
'CONCURRENTLY',
'CONDITION',
'CONDITION_NUMBER',
'CONFIGURATION',
'CONNECT',
'CONNECTION',
'CONNECTION_NAME',
'CONSTRAINT_CATALOG',
'CONSTRAINT_NAME',
'CONSTRAINT_SCHEMA',
'CONSTRAINTS',
'CONSTRUCTOR',
'CONTAINS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'CONVERT',
'COPY',
'CORR',
'CORRESPONDING',
'COST',
'COUNT',
'COVAR_POP',
'COVAR_SAMP',
'CREATEDB',
'CREATEROLE',
'CREATEUSER',
'CSV',
'CUBE',
'CUME_DIST',
'CURRENT',
'CURRENT_DEFAULT_TRANSFORM_GROUP',
'CURRENT_PATH',
'CURRENT_TRANSFORM_GROUP_FOR_TYPE',
'CURSOR',
'CURSOR_NAME',
'CYCLE',
'DATA',
'DATABASE',
'DATE',
'DATETIME_INTERVAL_CODE',
'DATETIME_INTERVAL_PRECISION',
'DAY',
'DEALLOCATE',
'DECLARE',
'DEFAULTS',
'DEFERRED',
'DEFINED',
'DEFINER',
'DEGREE',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DENSE_RANK',
'DEPTH',
'DEREF',
'DERIVED',
'DESCRIBE',
'DESCRIPTOR',
'DESTROY',
'DESTRUCTOR',
'DETERMINISTIC',
'DIAGNOSTICS',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISCONNECT',
'DISPATCH',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'DYNAMIC',
'DYNAMIC_FUNCTION',
'DYNAMIC_FUNCTION_CODE',
'EACH',
'ELEMENT',
'EMPTY',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END-EXEC',
'ENUM',
'EQUALS',
'ESCAPE',
'EVERY',
'EXCEPTION',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXEC',
'EXECUTE',
'EXISTING',
'EXP',
'EXPLAIN',
'EXTERNAL',
'FAMILY',
'FILTER',
'FINAL',
'FIRST',
'FIRST_VALUE',
'FLAG',
'FLOOR',
'FOLLOWING',
'FORCE',
'FORTRAN',
'FORWARD',
'FOUND',
'FREE',
'FUNCTION',
'FUSION',
'G',
'GENERAL',
'GENERATED',
'GET',
'GLOBAL',
'GO',
'GOTO',
'GRANTED',
'GROUPING',
'HANDLER',
'HEADER',
'HEX',
'HIERARCHY',
'HOLD',
'HOST',
'HOUR',
# 'ID',
'IDENTITY',
'IF',
'IGNORE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLEMENTATION',
'IMPLICIT',
'INCLUDING',
'INCREMENT',
'INDENT',
'INDEX',
'INDEXES',
'INDICATOR',
'INFIX',
'INHERIT',
'INHERITS',
'INITIALIZE',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTANCE',
'INSTANTIABLE',
'INSTEAD',
'INTERSECTION',
'INVOKER',
'ISOLATION',
'ITERATE',
'K',
'KEY',
'KEY_MEMBER',
'KEY_TYPE',
'LAG',
'LANCOMPILER',
'LANGUAGE',
'LARGE',
'LAST',
'LAST_VALUE',
'LATERAL',
'LC_COLLATE',
'LC_CTYPE',
'LEAD',
'LENGTH',
'LESS',
'LEVEL',
'LIKE_REGEX',
'LISTEN',
'LN',
'LOAD',
'LOCAL',
'LOCATION',
'LOCATOR',
'LOCK',
'LOGIN',
'LOWER',
'M',
'MAP',
'MAPPING',
'MATCH',
'MATCHED',
'MAX',
'MAX_CARDINALITY',
'MAXVALUE',
'MEMBER',
'MERGE',
'MESSAGE_LENGTH',
'MESSAGE_OCTET_LENGTH',
'MESSAGE_TEXT',
'METHOD',
'MIN',
'MINUTE',
'MINVALUE',
'MOD',
'MODE',
'MODIFIES',
'MODIFY',
'MODULE',
'MONTH',
'MORE',
'MOVE',
'MULTISET',
'MUMPS',
# 'NAME',
'NAMES',
'NAMESPACE',
'NCLOB',
'NESTING',
'NEXT',
'NFC',
'NFD',
'NFKC',
'NFKD',
'NIL',
'NO',
'NOCREATEDB',
'NOCREATEROLE',
'NOCREATEUSER',
'NOINHERIT',
'NOLOGIN',
'NORMALIZE',
'NORMALIZED',
'NOSUPERUSER',
'NOTHING',
'NOTIFY',
'NOWAIT',
'NTH_VALUE',
'NTILE',
'NULLABLE',
'NULLS',
'NUMBER',
'OBJECT',
'OCCURRENCES_REGEX',
'OCTET_LENGTH',
'OCTETS',
'OF',
'OIDS',
'OPEN',
'OPERATION',
'OPERATOR',
'OPTION',
'OPTIONS',
'ORDERING',
'ORDINALITY',
'OTHERS',
'OUTPUT',
'OVER',
'OVERRIDING',
'OWNED',
'OWNER',
'P',
'PAD',
'PARAMETER',
'PARAMETER_MODE',
'PARAMETER_NAME',
'PARAMETER_ORDINAL_POSITION',
'PARAMETER_SPECIFIC_CATALOG',
'PARAMETER_SPECIFIC_NAME',
'PARAMETER_SPECIFIC_SCHEMA',
'PARAMETERS',
'PARSER',
'PARTIAL',
'PARTITION',
'PASCAL',
'PASSING',
# 'PASSWORD',
'PATH',
'PERCENT_RANK',
'PERCENTILE_CONT',
'PERCENTILE_DISC',
'PLANS',
'PLI',
'POSITION_REGEX',
'POSTFIX',
'POWER',
'PRECEDING',
'PREFIX',
'PREORDER',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PUBLIC',
'QUOTE',
'RANGE',
'RANK',
'READ',
'READS',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCING',
'REGR_AVGX',
'REGR_AVGY',
'REGR_COUNT',
'REGR_INTERCEPT',
'REGR_R2',
'REGR_SLOPE',
'REGR_SXX',
'REGR_SXY',
'REGR_SYY',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESPECT',
'RESTART',
'RESTRICT',
'RESULT',
'RETURN',
'RETURNED_CARDINALITY',
'RETURNED_LENGTH',
'RETURNED_OCTET_LENGTH',
'RETURNED_SQLSTATE',
'RETURNS',
'REVOKE',
# 'ROLE',
'ROLLBACK',
'ROLLUP',
'ROUTINE',
'ROUTINE_CATALOG',
'ROUTINE_NAME',
'ROUTINE_SCHEMA',
'ROW_COUNT',
'ROW_NUMBER',
'ROWS',
'RULE',
'SAVEPOINT',
'SCALE',
'SCHEMA',
'SCHEMA_NAME',
'SCOPE',
'SCOPE_CATALOG',
'SCOPE_NAME',
'SCOPE_SCHEMA',
'SCROLL',
'SEARCH',
'SECOND',
'SECTION',
'SECURITY',
'SELF',
'SENSITIVE',
'SEQUENCE',
'SERIALIZABLE',
'SERVER',
'SERVER_NAME',
'SESSION',
'SET',
'SETS',
'SHARE',
'SHOW',
'SIMPLE',
'SIZE',
'SOURCE',
'SPACE',
'SPECIFIC',
'SPECIFIC_NAME',
'SPECIFICTYPE',
'SQL',
'SQLCODE',
'SQLERROR',
'SQLEXCEPTION',
'SQLSTATE',
'SQLWARNING',
'SQRT',
'STABLE',
'STANDALONE',
'START',
'STATE',
'STATEMENT',
'STATIC',
'STATISTICS',
'STDDEV_POP',
'STDDEV_SAMP',
'STDIN',
'STDOUT',
'STORAGE',
'STRICT',
'STRIP',
'STRUCTURE',
'STYLE',
'SUBCLASS_ORIGIN',
'SUBLIST',
'SUBMULTISET',
'SUBSTRING_REGEX',
'SUM',
'SUPERUSER',
'SYSID',
'SYSTEM',
'SYSTEM_USER',
'T',
# 'TABLE_NAME',
'TABLESAMPLE',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TERMINATE',
'TEXT',
'THAN',
'TIES',
'TIMEZONE_HOUR',
'TIMEZONE_MINUTE',
'TOP_LEVEL_COUNT',
'TRANSACTION',
'TRANSACTION_ACTIVE',
'TRANSACTIONS_COMMITTED',
'TRANSACTIONS_ROLLED_BACK',
'TRANSFORM',
'TRANSFORMS',
'TRANSLATE',
'TRANSLATE_REGEX',
'TRANSLATION',
'TRIGGER',
'TRIGGER_CATALOG',
'TRIGGER_NAME',
'TRIGGER_SCHEMA',
'TRIM_ARRAY',
'TRUNCATE',
'TRUSTED',
'TYPE',
'UESCAPE',
'UNBOUNDED',
'UNCOMMITTED',
'UNDER',
'UNENCRYPTED',
'UNKNOWN',
'UNLISTEN',
'UNNAMED',
'UNNEST',
'UNTIL',
'UNTYPED',
'UPDATE',
'UPPER',
'URI',
'USAGE',
'USER_DEFINED_TYPE_CATALOG',
'USER_DEFINED_TYPE_CODE',
'USER_DEFINED_TYPE_NAME',
'USER_DEFINED_TYPE_SCHEMA',
'VACUUM',
'VALID',
'VALIDATOR',
'VALUE',
'VAR_POP',
'VAR_SAMP',
'VARBINARY',
'VARIABLE',
'VARYING',
'VERSION',
'VIEW',
'VOLATILE',
'WHENEVER',
'WHITESPACE',
'WIDTH_BUCKET',
'WINDOW',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLAGG',
'XMLBINARY',
'XMLCAST',
'XMLCOMMENT',
'XMLDECLARATION',
'XMLDOCUMENT',
'XMLEXISTS',
'XMLITERATE',
'XMLNAMESPACES',
'XMLQUERY',
'XMLSCHEMA',
'XMLTABLE',
'XMLTEXT',
'XMLVALIDATE',
'YEAR',
'YES',
'ZONE',
))
#Thanks villas
FIREBIRD = set((
'ABS',
'ACTIVE',
'ADMIN',
'AFTER',
'ASCENDING',
'AUTO',
'AUTODDL',
'BASED',
'BASENAME',
'BASE_NAME',
'BEFORE',
'BIT_LENGTH',
'BLOB',
'BLOBEDIT',
'BOOLEAN',
'BOTH',
'BUFFER',
'CACHE',
'CHAR_LENGTH',
'CHARACTER_LENGTH',
'CHECK_POINT_LEN',
'CHECK_POINT_LENGTH',
'CLOSE',
'COMMITTED',
'COMPILETIME',
'COMPUTED',
'CONDITIONAL',
'CONNECT',
'CONTAINING',
'CROSS',
'CSTRING',
'CURRENT_CONNECTION',
'CURRENT_ROLE',
'CURRENT_TRANSACTION',
'CURRENT_USER',
'DATABASE',
'DB_KEY',
'DEBUG',
'DESCENDING',
'DISCONNECT',
'DISPLAY',
'DO',
'ECHO',
'EDIT',
'ENTRY_POINT',
'EVENT',
'EXIT',
'EXTERN',
'FALSE',
'FETCH',
'FILE',
'FILTER',
'FREE_IT',
'FUNCTION',
'GDSCODE',
'GENERATOR',
'GEN_ID',
'GLOBAL',
'GROUP_COMMIT_WAIT',
'GROUP_COMMIT_WAIT_TIME',
'HELP',
'IF',
'INACTIVE',
'INDEX',
'INIT',
'INPUT_TYPE',
'INSENSITIVE',
'ISQL',
'LC_MESSAGES',
'LC_TYPE',
'LEADING',
'LENGTH',
'LEV',
'LOGFILE',
'LOG_BUFFER_SIZE',
'LOG_BUF_SIZE',
'LONG',
'LOWER',
'MANUAL',
'MAXIMUM',
'MAXIMUM_SEGMENT',
'MAX_SEGMENT',
'MERGE',
'MESSAGE',
'MINIMUM',
'MODULE_NAME',
'NOAUTO',
'NUM_LOG_BUFS',
'NUM_LOG_BUFFERS',
'OCTET_LENGTH',
'OPEN',
'OUTPUT_TYPE',
'OVERFLOW',
'PAGE',
'PAGELENGTH',
'PAGES',
'PAGE_SIZE',
'PARAMETER',
# 'PASSWORD',
'PLAN',
'POST_EVENT',
'QUIT',
'RAW_PARTITIONS',
'RDB$DB_KEY',
'RECORD_VERSION',
'RECREATE',
'RECURSIVE',
'RELEASE',
'RESERV',
'RESERVING',
'RETAIN',
'RETURN',
'RETURNING_VALUES',
'RETURNS',
# 'ROLE',
'ROW_COUNT',
'ROWS',
'RUNTIME',
'SAVEPOINT',
'SECOND',
'SENSITIVE',
'SHADOW',
'SHARED',
'SHELL',
'SHOW',
'SINGULAR',
'SNAPSHOT',
'SORT',
'STABILITY',
'START',
'STARTING',
'STARTS',
'STATEMENT',
'STATIC',
'STATISTICS',
'SUB_TYPE',
'SUSPEND',
'TERMINATOR',
'TRAILING',
'TRIGGER',
'TRIM',
'TRUE',
'TYPE',
'UNCOMMITTED',
'UNKNOWN',
'USING',
'VARIABLE',
'VERSION',
'WAIT',
'WEEKDAY',
'WHILE',
'YEARDAY',
))
FIREBIRD_NONRESERVED = set((
'BACKUP',
'BLOCK',
'COALESCE',
'COLLATION',
'COMMENT',
'DELETING',
'DIFFERENCE',
'IIF',
'INSERTING',
'LAST',
'LEAVE',
'LOCK',
'NEXT',
'NULLIF',
'NULLS',
'RESTART',
'RETURNING',
'SCALAR_ARRAY',
'SEQUENCE',
'STATEMENT',
'UPDATING',
'ABS',
'ACCENT',
'ACOS',
'ALWAYS',
'ASCII_CHAR',
'ASCII_VAL',
'ASIN',
'ATAN',
'ATAN2',
'BACKUP',
'BIN_AND',
'BIN_OR',
'BIN_SHL',
'BIN_SHR',
'BIN_XOR',
'BLOCK',
'CEIL',
'CEILING',
'COLLATION',
'COMMENT',
'COS',
'COSH',
'COT',
'DATEADD',
'DATEDIFF',
'DECODE',
'DIFFERENCE',
'EXP',
'FLOOR',
'GEN_UUID',
'GENERATED',
'HASH',
'IIF',
'LIST',
'LN',
'LOG',
'LOG10',
'LPAD',
'MATCHED',
'MATCHING',
'MAXVALUE',
'MILLISECOND',
'MINVALUE',
'MOD',
'NEXT',
'OVERLAY',
'PAD',
'PI',
'PLACING',
'POWER',
'PRESERVE',
'RAND',
'REPLACE',
'RESTART',
'RETURNING',
'REVERSE',
'ROUND',
'RPAD',
'SCALAR_ARRAY',
'SEQUENCE',
'SIGN',
'SIN',
'SINH',
'SPACE',
'SQRT',
'TAN',
'TANH',
'TEMPORARY',
'TRUNC',
'WEEK',
))
# Thanks Jonathan Lundell
MYSQL = set((
'ACCESSIBLE',
'ADD',
'ALL',
'ALTER',
'ANALYZE',
'AND',
'AS',
'ASC',
'ASENSITIVE',
'BEFORE',
'BETWEEN',
'BIGINT',
'BINARY',
'BLOB',
'BOTH',
'BY',
'CALL',
'CASCADE',
'CASE',
'CHANGE',
'CHAR',
'CHARACTER',
'CHECK',
'COLLATE',
'COLUMN',
'CONDITION',
'CONSTRAINT',
'CONTINUE',
'CONVERT',
'CREATE',
'CROSS',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'DATABASE',
'DATABASES',
'DAY_HOUR',
'DAY_MICROSECOND',
'DAY_MINUTE',
'DAY_SECOND',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DELAYED',
'DELETE',
'DESC',
'DESCRIBE',
'DETERMINISTIC',
'DISTINCT',
'DISTINCTROW',
'DIV',
'DOUBLE',
'DROP',
'DUAL',
'EACH',
'ELSE',
'ELSEIF',
'ENCLOSED',
'ESCAPED',
'EXISTS',
'EXIT',
'EXPLAIN',
'FALSE',
'FETCH',
'FLOAT',
'FLOAT4',
'FLOAT8',
'FOR',
'FORCE',
'FOREIGN',
'FROM',
'FULLTEXT',
'GRANT',
'GROUP',
'HAVING',
'HIGH_PRIORITY',
'HOUR_MICROSECOND',
'HOUR_MINUTE',
'HOUR_SECOND',
'IF',
'IGNORE',
'IGNORE_SERVER_IDS',
'IGNORE_SERVER_IDS',
'IN',
'INDEX',
'INFILE',
'INNER',
'INOUT',
'INSENSITIVE',
'INSERT',
'INT',
'INT1',
'INT2',
'INT3',
'INT4',
'INT8',
'INTEGER',
'INTERVAL',
'INTO',
'IS',
'ITERATE',
'JOIN',
'KEY',
'KEYS',
'KILL',
'LEADING',
'LEAVE',
'LEFT',
'LIKE',
'LIMIT',
'LINEAR',
'LINES',
'LOAD',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCK',
'LONG',
'LONGBLOB',
'LONGTEXT',
'LOOP',
'LOW_PRIORITY',
'MASTER_HEARTBEAT_PERIOD',
'MASTER_HEARTBEAT_PERIOD',
'MASTER_SSL_VERIFY_SERVER_CERT',
'MATCH',
'MAXVALUE',
'MAXVALUE',
'MEDIUMBLOB',
'MEDIUMINT',
'MEDIUMTEXT',
'MIDDLEINT',
'MINUTE_MICROSECOND',
'MINUTE_SECOND',
'MOD',
'MODIFIES',
'NATURAL',
'NO_WRITE_TO_BINLOG',
'NOT',
'NULL',
'NUMERIC',
'ON',
'OPTIMIZE',
'OPTION',
'OPTIONALLY',
'OR',
'ORDER',
'OUT',
'OUTER',
'OUTFILE',
'PRECISION',
'PRIMARY',
'PROCEDURE',
'PURGE',
'RANGE',
'READ',
'READ_WRITE',
'READS',
'REAL',
'REFERENCES',
'REGEXP',
'RELEASE',
'RENAME',
'REPEAT',
'REPLACE',
'REQUIRE',
'RESIGNAL',
'RESIGNAL',
'RESTRICT',
'RETURN',
'REVOKE',
'RIGHT',
'RLIKE',
'SCHEMA',
'SCHEMAS',
'SECOND_MICROSECOND',
'SELECT',
'SENSITIVE',
'SEPARATOR',
'SET',
'SHOW',
'SIGNAL',
'SIGNAL',
'SMALLINT',
'SPATIAL',
'SPECIFIC',
'SQL',
'SQL_BIG_RESULT',
'SQL_CALC_FOUND_ROWS',
'SQL_SMALL_RESULT',
'SQLEXCEPTION',
'SQLSTATE',
'SQLWARNING',
'SSL',
'STARTING',
'STRAIGHT_JOIN',
'TABLE',
'TERMINATED',
'THEN',
'TINYBLOB',
'TINYINT',
'TINYTEXT',
'TO',
'TRAILING',
'TRIGGER',
'TRUE',
'UNDO',
'UNION',
'UNIQUE',
'UNLOCK',
'UNSIGNED',
'UPDATE',
'USAGE',
'USE',
'USING',
'UTC_DATE',
'UTC_TIME',
'UTC_TIMESTAMP',
'VALUES',
'VARBINARY',
'VARCHAR',
'VARCHARACTER',
'VARYING',
'WHEN',
'WHERE',
'WHILE',
'WITH',
'WRITE',
'XOR',
'YEAR_MONTH',
'ZEROFILL',
))
MSSQL = set((
'ADD',
'ALL',
'ALTER',
'AND',
'ANY',
'AS',
'ASC',
'AUTHORIZATION',
'BACKUP',
'BEGIN',
'BETWEEN',
'BREAK',
'BROWSE',
'BULK',
'BY',
'CASCADE',
'CASE',
'CHECK',
'CHECKPOINT',
'CLOSE',
'CLUSTERED',
'COALESCE',
'COLLATE',
'COLUMN',
'COMMIT',
'COMPUTE',
'CONSTRAINT',
'CONTAINS',
'CONTAINSTABLE',
'CONTINUE',
'CONVERT',
'CREATE',
'CROSS',
'CURRENT',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'DATABASE',
'DBCC',
'DEALLOCATE',
'DECLARE',
'DEFAULT',
'DELETE',
'DENY',
'DESC',
'DISK',
'DISTINCT',
'DISTRIBUTED',
'DOUBLE',
'DROP',
'DUMMY',
'DUMP',
'ELSE',
'END',
'ERRLVL',
'ESCAPE',
'EXCEPT',
'EXEC',
'EXECUTE',
'EXISTS',
'EXIT',
'FETCH',
'FILE',
'FILLFACTOR',
'FOR',
'FOREIGN',
'FREETEXT',
'FREETEXTTABLE',
'FROM',
'FULL',
'FUNCTION',
'GOTO',
'GRANT',
'GROUP',
'HAVING',
'HOLDLOCK',
'IDENTITY',
'IDENTITY_INSERT',
'IDENTITYCOL',
'IF',
'IN',
'INDEX',
'INNER',
'INSERT',
'INTERSECT',
'INTO',
'IS',
'JOIN',
'KEY',
'KILL',
'LEFT',
'LIKE',
'LINENO',
'LOAD',
'NATIONAL ',
'NOCHECK',
'NONCLUSTERED',
'NOT',
'NULL',
'NULLIF',
'OF',
'OFF',
'OFFSETS',
'ON',
'OPEN',
'OPENDATASOURCE',
'OPENQUERY',
'OPENROWSET',
'OPENXML',
'OPTION',
'OR',
'ORDER',
'OUTER',
'OVER',
'PERCENT',
'PLAN',
'PRECISION',
'PRIMARY',
'PRINT',
'PROC',
'PROCEDURE',
'PUBLIC',
'RAISERROR',
'READ',
'READTEXT',
'RECONFIGURE',
'REFERENCES',
'REPLICATION',
'RESTORE',
'RESTRICT',
'RETURN',
'REVOKE',
'RIGHT',
'ROLLBACK',
'ROWCOUNT',
'ROWGUIDCOL',
'RULE',
'SAVE',
'SCHEMA',
'SELECT',
'SESSION_USER',
'SET',
'SETUSER',
'SHUTDOWN',
'SOME',
'STATISTICS',
'SYSTEM_USER',
'TABLE',
'TEXTSIZE',
'THEN',
'TO',
'TOP',
'TRAN',
'TRANSACTION',
'TRIGGER',
'TRUNCATE',
'TSEQUAL',
'UNION',
'UNIQUE',
'UPDATE',
'UPDATETEXT',
'USE',
'USER',
'VALUES',
'VARYING',
'VIEW',
'WAITFOR',
'WHEN',
'WHERE',
'WHILE',
'WITH',
'WRITETEXT',
))
ORACLE = set((
'ACCESS',
'ADD',
'ALL',
'ALTER',
'AND',
'ANY',
'AS',
'ASC',
'AUDIT',
'BETWEEN',
'BY',
'CHAR',
'CHECK',
'CLUSTER',
'COLUMN',
'COMMENT',
'COMPRESS',
'CONNECT',
'CREATE',
'CURRENT',
'DATE',
'DECIMAL',
'DEFAULT',
'DELETE',
'DESC',
'DISTINCT',
'DROP',
'ELSE',
'EXCLUSIVE',
'EXISTS',
'FILE',
'FLOAT',
'FOR',
'FROM',
'GRANT',
'GROUP',
'HAVING',
'IDENTIFIED',
'IMMEDIATE',
'IN',
'INCREMENT',
'INDEX',
'INITIAL',
'INSERT',
'INTEGER',
'INTERSECT',
'INTO',
'IS',
'LEVEL',
'LIKE',
'LOCK',
'LONG',
'MAXEXTENTS',
'MINUS',
'MLSLABEL',
'MODE',
'MODIFY',
'NOAUDIT',
'NOCOMPRESS',
'NOT',
'NOWAIT',
'NULL',
'NUMBER',
'OF',
'OFFLINE',
'ON',
'ONLINE',
'OPTION',
'OR',
'ORDER',
'PCTFREE',
'PRIOR',
'PRIVILEGES',
'PUBLIC',
'RAW',
'RENAME',
'RESOURCE',
'REVOKE',
'ROW',
'ROWID',
'ROWNUM',
'ROWS',
'SELECT',
'SESSION',
'SET',
'SHARE',
'SIZE',
'SMALLINT',
'START',
'SUCCESSFUL',
'SYNONYM',
'SYSDATE',
'TABLE',
'THEN',
'TO',
'TRIGGER',
'UID',
'UNION',
'UNIQUE',
'UPDATE',
'USER',
'VALIDATE',
'VALUES',
'VARCHAR',
'VARCHAR2',
'VIEW',
'WHENEVER',
'WHERE',
'WITH',
))
SQLITE = set((
'ABORT',
'ACTION',
'ADD',
'AFTER',
'ALL',
'ALTER',
'ANALYZE',
'AND',
'AS',
'ASC',
'ATTACH',
'AUTOINCREMENT',
'BEFORE',
'BEGIN',
'BETWEEN',
'BY',
'CASCADE',
'CASE',
'CAST',
'CHECK',
'COLLATE',
'COLUMN',
'COMMIT',
'CONFLICT',
'CONSTRAINT',
'CREATE',
'CROSS',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'DATABASE',
'DEFAULT',
'DEFERRABLE',
'DEFERRED',
'DELETE',
'DESC',
'DETACH',
'DISTINCT',
'DROP',
'EACH',
'ELSE',
'END',
'ESCAPE',
'EXCEPT',
'EXCLUSIVE',
'EXISTS',
'EXPLAIN',
'FAIL',
'FOR',
'FOREIGN',
'FROM',
'FULL',
'GLOB',
'GROUP',
'HAVING',
'IF',
'IGNORE',
'IMMEDIATE',
'IN',
'INDEX',
'INDEXED',
'INITIALLY',
'INNER',
'INSERT',
'INSTEAD',
'INTERSECT',
'INTO',
'IS',
'ISNULL',
'JOIN',
'KEY',
'LEFT',
'LIKE',
'LIMIT',
'MATCH',
'NATURAL',
'NO',
'NOT',
'NOTNULL',
'NULL',
'OF',
'OFFSET',
'ON',
'OR',
'ORDER',
'OUTER',
'PLAN',
'PRAGMA',
'PRIMARY',
'QUERY',
'RAISE',
'REFERENCES',
'REGEXP',
'REINDEX',
'RELEASE',
'RENAME',
'REPLACE',
'RESTRICT',
'RIGHT',
'ROLLBACK',
'ROW',
'SAVEPOINT',
'SELECT',
'SET',
'TABLE',
'TEMP',
'TEMPORARY',
'THEN',
'TO',
'TRANSACTION',
'TRIGGER',
'UNION',
'UNIQUE',
'UPDATE',
'USING',
'VACUUM',
'VALUES',
'VIEW',
'VIRTUAL',
'WHEN',
'WHERE',
))
# remove from here when you add a list.
JDBCSQLITE = SQLITE
DB2 = INFORMIX = INGRES = JDBCPOSTGRESQL = COMMON
ADAPTERS = {
'sqlite': SQLITE,
'mysql': MYSQL,
'postgres': POSTGRESQL,
'postgres_nonreserved': POSTGRESQL_NONRESERVED,
'oracle': ORACLE,
'mssql': MSSQL,
'mssql2': MSSQL,
'db2': DB2,
'informix': INFORMIX,
'firebird': FIREBIRD,
'firebird_embedded': FIREBIRD,
'firebird_nonreserved': FIREBIRD_NONRESERVED,
'ingres': INGRES,
'ingresu': INGRES,
'jdbc:sqlite': JDBCSQLITE,
'jdbc:postgres': JDBCPOSTGRESQL,
'common': COMMON,
}
ADAPTERS['all'] = reduce(lambda a,b:a.union(b),(x for x in ADAPTERS.values()))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>,
limodou <limodou@gmail.com> and srackham <srackham@gmail.com>.
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import os
import sys
import code
import logging
import types
import re
import optparse
import glob
import traceback
import fileutils
import settings
from utils import web2py_uuid
from compileapp import build_environment, read_pyc, run_models_in
from restricted import RestrictedError
from globals import Request, Response, Session
from storage import Storage
from admin import w2p_unpack
from dal import BaseAdapter
logger = logging.getLogger("web2py")
def exec_environment(
pyfile='',
request=None,
response=None,
session=None,
):
"""
.. function:: gluon.shell.exec_environment([pyfile=''[, request=Request()
[, response=Response[, session=Session()]]]])
Environment builder and module loader.
Builds a web2py environment and optionally executes a Python
file into the environment.
A Storage dictionary containing the resulting environment is returned.
The working directory must be web2py root -- this is the web2py default.
"""
if request is None: request = Request()
if response is None: response = Response()
if session is None: session = Session()
if request.folder is None:
mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile)
if mo:
appname = mo.group('appname')
request.folder = os.path.join('applications', appname)
else:
request.folder = ''
env = build_environment(request, response, session, store_current=False)
if pyfile:
pycfile = pyfile + 'c'
if os.path.isfile(pycfile):
exec read_pyc(pycfile) in env
else:
execfile(pyfile, env)
return Storage(env)
def env(
a,
import_models=False,
c=None,
f=None,
dir='',
extra_request={},
):
"""
Return web2py execution environment for application (a), controller (c),
function (f).
If import_models is True the exec all application models into the
environment.
extra_request allows you to pass along any extra
variables to the request object before your models
get executed. This was mainly done to support
web2py_utils.test_runner, however you can use it
with any wrapper scripts that need access to the
web2py environment.
"""
request = Request()
response = Response()
session = Session()
request.application = a
# Populate the dummy environment with sensible defaults.
if not dir:
request.folder = os.path.join('applications', a)
else:
request.folder = dir
request.controller = c or 'default'
request.function = f or 'index'
response.view = '%s/%s.html' % (request.controller,
request.function)
request.env.path_info = '/%s/%s/%s' % (a, c, f)
request.env.http_host = '127.0.0.1:8000'
request.env.remote_addr = '127.0.0.1'
request.env.web2py_runtime_gae = settings.global_settings.web2py_runtime_gae
for k,v in extra_request.items():
request[k] = v
# Monkey patch so credentials checks pass.
def check_credentials(request, other_application='admin'):
return True
fileutils.check_credentials = check_credentials
environment = build_environment(request, response, session)
if import_models:
try:
run_models_in(environment)
except RestrictedError, e:
sys.stderr.write(e.traceback+'\n')
sys.exit(1)
environment['__name__'] = '__main__'
return environment
def exec_pythonrc():
pythonrc = os.environ.get('PYTHONSTARTUP')
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
def run(
appname,
plain=False,
import_models=False,
startfile=None,
bpython=False,
python_code=False
):
"""
Start interactive shell or run Python script (startfile) in web2py
controller environment. appname is formatted like:
a web2py application name
a/c exec the controller c into the application environment
"""
(a, c, f) = parse_path_info(appname)
errmsg = 'invalid application name: %s' % appname
if not a:
die(errmsg)
adir = os.path.join('applications', a)
if not os.path.exists(adir):
if raw_input('application %s does not exist, create (y/n)?'
% a).lower() in ['y', 'yes']:
os.mkdir(adir)
w2p_unpack('welcome.w2p', adir)
for subfolder in ['models','views','controllers', 'databases',
'modules','cron','errors','sessions',
'languages','static','private','uploads']:
subpath = os.path.join(adir,subfolder)
if not os.path.exists(subpath):
os.mkdir(subpath)
db = os.path.join(adir,'models/db.py')
if os.path.exists(db):
data = fileutils.read_file(db)
data = data.replace('<your secret key>','sha512:'+web2py_uuid())
fileutils.write_file(db, data)
if c:
import_models = True
_env = env(a, c=c, import_models=import_models)
if c:
cfile = os.path.join('applications', a, 'controllers', c + '.py')
if not os.path.isfile(cfile):
cfile = os.path.join('applications', a, 'compiled', "controllers_%s_%s.pyc" % (c,f))
if not os.path.isfile(cfile):
die(errmsg)
else:
exec read_pyc(cfile) in _env
else:
execfile(cfile, _env)
if f:
exec ('print %s()' % f, _env)
elif startfile:
exec_pythonrc()
try:
execfile(startfile, _env)
if import_models: BaseAdapter.close_all_instances('commit')
except Exception, e:
print traceback.format_exc()
if import_models: BaseAdapter.close_all_instances('rollback')
elif python_code:
exec_pythonrc()
try:
exec(python_code, _env)
if import_models: BaseAdapter.close_all_instances('commit')
except Exception, e:
print traceback.format_exc()
if import_models: BaseAdapter.close_all_instances('rollback')
else:
if not plain:
if bpython:
try:
import bpython
bpython.embed(locals_=_env)
return
except:
logger.warning(
'import bpython error; trying ipython...')
else:
try:
import IPython
if IPython.__version__ >= '0.11':
from IPython.frontend.terminal.embed import InteractiveShellEmbed
shell = InteractiveShellEmbed(user_ns=_env)
shell()
return
else:
# following 2 lines fix a problem with
# IPython; thanks Michael Toomim
if '__builtins__' in _env:
del _env['__builtins__']
shell = IPython.Shell.IPShell(argv=[],user_ns=_env)
shell.mainloop()
return
except:
logger.warning(
'import IPython error; use default python shell')
try:
import readline
import rlcompleter
except ImportError:
pass
else:
readline.set_completer(rlcompleter.Completer(_env).complete)
readline.parse_and_bind('tab:complete')
exec_pythonrc()
code.interact(local=_env)
def parse_path_info(path_info):
"""
Parse path info formatted like a/c/f where c and f are optional
and a leading / accepted.
Return tuple (a, c, f). If invalid path_info a is set to None.
If c or f are omitted they are set to None.
"""
mo = re.match(r'^/?(?P<a>\w+)(/(?P<c>\w+)(/(?P<f>\w+))?)?$',
path_info)
if mo:
return (mo.group('a'), mo.group('c'), mo.group('f'))
else:
return (None, None, None)
def die(msg):
print >> sys.stderr, msg
sys.exit(1)
def test(testpath, import_models=True, verbose=False):
"""
Run doctests in web2py environment. testpath is formatted like:
a tests all controllers in application a
a/c tests controller c in application a
a/c/f test function f in controller c, application a
Where a, c and f are application, controller and function names
respectively. If the testpath is a file name the file is tested.
If a controller is specified models are executed by default.
"""
import doctest
if os.path.isfile(testpath):
mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath)
if not mo:
die('test file is not in application directory: %s'
% testpath)
a = mo.group('a')
c = f = None
files = [testpath]
else:
(a, c, f) = parse_path_info(testpath)
errmsg = 'invalid test path: %s' % testpath
if not a:
die(errmsg)
cdir = os.path.join('applications', a, 'controllers')
if not os.path.isdir(cdir):
die(errmsg)
if c:
cfile = os.path.join(cdir, c + '.py')
if not os.path.isfile(cfile):
die(errmsg)
files = [cfile]
else:
files = glob.glob(os.path.join(cdir, '*.py'))
for testfile in files:
globs = env(a, import_models)
ignores = globs.keys()
execfile(testfile, globs)
def doctest_object(name, obj):
"""doctest obj and enclosed methods and classes."""
if type(obj) in (types.FunctionType, types.TypeType,
types.ClassType, types.MethodType,
types.UnboundMethodType):
# Reload environment before each test.
globs = env(a, c=c, f=f, import_models=import_models)
execfile(testfile, globs)
doctest.run_docstring_examples(obj, globs=globs,
name='%s: %s' % (os.path.basename(testfile),
name), verbose=verbose)
if type(obj) in (types.TypeType, types.ClassType):
for attr_name in dir(obj):
# Execute . operator so decorators are executed.
o = eval('%s.%s' % (name, attr_name), globs)
doctest_object(attr_name, o)
for (name, obj) in globs.items():
if name not in ignores and (f is None or f == name):
doctest_object(name, obj)
def get_usage():
usage = """
%prog [options] pythonfile
"""
return usage
def execute_from_command_line(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(usage=get_usage())
parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME',
help='run web2py in interactive shell or IPython(if installed) ' + \
'with specified appname')
msg = 'run web2py in interactive shell or bpython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += '\n Use combined with --shell'
parser.add_option(
'-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg,
)
parser.add_option(
'-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help='only use plain python shell, should be used with --shell option',
)
parser.add_option(
'-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help='auto import model files, default is False, ' + \
' should be used with --shell option',
)
parser.add_option(
'-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help='run PYTHON_FILE in web2py environment, ' + \
'should be used with --shell option',
)
(options, args) = parser.parse_args(argv[1:])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
if len(args) > 0:
startfile = args[0]
else:
startfile = ''
run(options.shell, options.plain, startfile=startfile, bpython=options.bpython)
if __name__ == '__main__':
execute_from_command_line()
| Python |
# -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Import System Modules
import sys
import errno
import socket
import logging
import platform
import traceback
# Define Constants
VERSION = '1.2.4'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 1 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE = DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS = DEFAULT_MIN_THREADS,
MAX_THREADS = DEFAULT_MAX_THREADS)
PY3K = sys.version_info[0] > 2
class NullHandler(logging.Handler):
"A Logging handler to prevent library errors."
def emit(self, record):
pass
if PY3K:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, bytes):
return val.decode(encoding)
else:
return val
else:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, unicode):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.decode(encoding)
else:
return val
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket\__init__.py
# Monolithic build...start of module: rocket\connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
#from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.sendall = self.socket.sendall
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket\connection.py
# Monolithic build...start of module: rocket\filelike.py
# Import System Modules
import socket
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket\filelike.py
# Monolithic build...start of module: rocket\futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future:
pass
class ThreadPoolExecutor:
pass
class _WorkItem:
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name + \
'A future already exists with that name.' )
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError('Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"Futures middleware that adds a Futures Executor to the environment"
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket\futures.py
# Monolithic build...start of module: rocket\listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile = self.interface[2],
certfile = self.interface[3],
server_side = True,
cert_reqs = cert_reqs,
ca_certs = ca_certs,
ssl_version = ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile = self.interface[2],
certfile = self.interface[3],
server_side = True,
ssl_version = ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
# self.err_log.error('SSL Error: %s' % traceback.format_exc())
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every THREAD_STOP_CHECK_INTERVAL
# seconds. When that happens, we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket\listener.py
# Monolithic build...start of module: rocket\main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
try:
from queue import Queue
except ImportError:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces = ('127.0.0.1', 8000),
method = 'wsgi',
app_info = None,
min_threads = None,
max_threads = None,
queue_size = None,
timeout = 600,
handle_signals = True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info = app_info,
active_queue = self.active_queue,
monitor_queue = self.monitor_queue,
min_threads = min_threads,
max_threads = max_threads)
# Build our socket listeners
self.listeners = [Listener(i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners)-1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging = False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
import warnings
raise warnings.DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads = 10,
server_name = None,
max = -1,
request_queue_size = 5,
timeout = 10,
shutdown_timeout = 5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads = numthreads,
max_threads = max_threads,
queue_size = request_queue_size,
timeout = timeout)
# Monolithic build...end of module: rocket\main.py
# Monolithic build...start of module: rocket\monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug('Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket\monitor.py
# Monolithic build...start of module: rocket\threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads/10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
#active_threads = [t for t in self.threads if t.isAlive()]
#while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket\threadpool.py
# Monolithic build...start of module: rocket\worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method
\ # (single space)
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]+))? # Query String
\ # (single space)
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
HTTP/1.1 %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.'+self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in val.args[0]:
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
log_info = dict(client_ip = conn.client_addr,
time = datetime.now().strftime('%c'),
status = self.status.split(' ')[0],
size = self.size,
request_line = self.request_line)
self.req_log.info(LOG_LINE % log_info)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
else:
if self.request_line:
log_info = dict(client_ip = conn.client_addr,
time = datetime.now().strftime('%c'),
status = self.status.split(' ')[0],
size = self.size,
request_line = self.request_line + ' - not stopping')
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.error:
self.closeConnection = True
self.err_log.error('Tried to send "%s" to client but received socket'
' error' % status)
#def kill(self):
# if self.isAlive() and hasattr(self, 'conn'):
# try:
# self.conn.shutdown(socket.SHUT_RDWR)
# except socket.error:
# info = sys.exc_info()
# if info[1].args[0] != socket.EBADF:
# self.err_log.debug('Error on shutdown: '+str(info))
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
d = d.strip()
if not d:
if __debug__:
self.err_log.debug('Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k,v in req.items():
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join([unquote(x) for x in re_SLASH.split(v)])
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol = proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
l = sock_file.readline()
lname = None
lval = None
while True:
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning('Client sent invalid header: ' + repr(l))
if l == '\r\n':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ',' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
l = sock_file.readline()
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"Exception for when a socket times out between requests."
pass
class BadRequest(Exception):
"Exception for when a client sends an incomprehensible request."
pass
class SocketClosed(Exception):
"Exception for when a socket is closed by the client."
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker,
fs=FileSystemWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket\worker.py
# Monolithic build...start of module: rocket\methods\__init__.py
# Monolithic build...end of module: rocket\methods\__init__.py
# Monolithic build...start of module: rocket\methods\fs.py
# Import System Modules
import os
import time
import mimetypes
from email.utils import formatdate
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
CHUNK_SIZE = 2**16 # 64 Kilobyte chunks
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
INDEX_HEADER = '''\
<html>
<head><title>Directory Index: %(path)s</title>
<style> .parent { margin-bottom: 1em; }</style>
</head>
<body><h1>Directory Index: %(path)s</h1>
<table>
<tr><th>Directories</th></tr>
'''
INDEX_ROW = '''<tr><td><div class="%(cls)s"><a href="/%(link)s">%(name)s</a></div></td></tr>'''
INDEX_FOOTER = '''</table></body></html>\r\n'''
class LimitingFileWrapper(FileWrapper):
def __init__(self, limit=None, *args, **kwargs):
self.limit = limit
FileWrapper.__init__(self, *args, **kwargs)
def read(self, amt):
if amt > self.limit:
amt = self.limit
self.limit -= amt
return FileWrapper.read(self, amt)
class FileSystemWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
self.root = os.path.abspath(self.app_info['document_root'])
self.display_index = self.app_info['display_index']
def serve_file(self, filepath, headers):
filestat = os.stat(filepath)
self.size = filestat.st_size
modtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(filestat.st_mtime))
self.headers.add_header('Last-Modified', modtime)
if headers.get('if_modified_since') == modtime:
# The browser cache is up-to-date, send a 304.
self.status = "304 Not Modified"
self.data = []
return
ct = mimetypes.guess_type(filepath)[0]
self.content_type = ct if ct else 'text/plain'
try:
f = open(filepath, 'rb')
self.headers['Pragma'] = 'cache'
self.headers['Cache-Control'] = 'private'
self.headers['Content-Length'] = str(self.size)
if self.etag:
self.headers.add_header('Etag', self.etag)
if self.expires:
self.headers.add_header('Expires', self.expires)
try:
# Implement 206 partial file support.
start, end = headers['range'].split('-')
start = 0 if not start.isdigit() else int(start)
end = self.size if not end.isdigit() else int(end)
if self.size < end or start < 0:
self.status = "214 Unsatisfiable Range Requested"
self.data = FileWrapper(f, CHUNK_SIZE)
else:
f.seek(start)
self.data = LimitingFileWrapper(f, CHUNK_SIZE, limit=end)
self.status = "206 Partial Content"
except:
self.data = FileWrapper(f, CHUNK_SIZE)
except IOError:
self.status = "403 Forbidden"
def serve_dir(self, pth, rpth):
def rel_path(path):
return os.path.normpath(path[len(self.root):] if path.startswith(self.root) else path)
if not self.display_index:
self.status = '404 File Not Found'
return b('')
else:
self.content_type = 'text/html'
dir_contents = [os.path.join(pth, x) for x in os.listdir(os.path.normpath(pth))]
dir_contents.sort()
dirs = [rel_path(x)+'/' for x in dir_contents if os.path.isdir(x)]
files = [rel_path(x) for x in dir_contents if os.path.isfile(x)]
self.data = [INDEX_HEADER % dict(path='/'+rpth)]
if rpth:
self.data += [INDEX_ROW % dict(name='(parent directory)', cls='dir parent', link='/'.join(rpth[:-1].split('/')[:-1]))]
self.data += [INDEX_ROW % dict(name=os.path.basename(x[:-1]), link=os.path.join(rpth, os.path.basename(x[:-1])).replace('\\', '/'), cls='dir') for x in dirs]
self.data += ['<tr><th>Files</th></tr>']
self.data += [INDEX_ROW % dict(name=os.path.basename(x), link=os.path.join(rpth, os.path.basename(x)).replace('\\', '/'), cls='file') for x in files]
self.data += [INDEX_FOOTER]
self.headers['Content-Length'] = self.size = str(sum([len(x) for x in self.data]))
self.status = '200 OK'
def run_app(self, conn):
self.status = "200 OK"
self.size = 0
self.expires = None
self.etag = None
self.content_type = 'text/plain'
self.content_length = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
sock_file = conn.makefile('rb',BUF_SIZE)
request = self.read_request_line(sock_file)
if request['method'].upper() not in ('GET', ):
self.status = "501 Not Implemented"
try:
# Get our file path
headers = dict([(str(k.lower()), v) for k, v in self.read_headers(sock_file).items()])
rpath = request.get('path', '').lstrip('/')
filepath = os.path.join(self.root, rpath)
filepath = os.path.abspath(filepath)
if __debug__:
self.err_log.debug('Request for path: %s' % filepath)
self.closeConnection = headers.get('connection', 'close').lower() == 'close'
self.headers = Headers([('Date', formatdate(usegmt=True)),
('Server', HTTP_SERVER_SOFTWARE),
('Connection', headers.get('connection', 'close')),
])
if not filepath.lower().startswith(self.root.lower()):
# File must be within our root directory
self.status = "400 Bad Request"
self.closeConnection = True
elif not os.path.exists(filepath):
self.status = "404 File Not Found"
self.closeConnection = True
elif os.path.isdir(filepath):
self.serve_dir(filepath, rpath)
elif os.path.isfile(filepath):
self.serve_file(filepath, headers)
else:
# It exists but it's not a file or a directory????
# What is it then?
self.status = "501 Not Implemented"
self.closeConnection = True
h = self.headers
statcode, statstr = self.status.split(' ', 1)
statcode = int(statcode)
if statcode >= 400:
h.add_header('Content-Type', self.content_type)
self.data = [statstr]
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
for data in self.data:
self.conn.sendall(b(data))
if hasattr(self.data, 'close'):
self.data.close()
finally:
if __debug__:
self.err_log.debug('Finally closing sock_file')
sock_file.close()
# Monolithic build...end of module: rocket\methods\fs.py
# Monolithic build...start of module: rocket\methods\wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict({'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).items():
environ[str('HTTP_'+k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
else:
environ['wsgi.url_scheme'] = 'http'
if conn.ssl:
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and ssl.DER_cert_to_PEM_cert(peercert)
except Exception,e:
print e
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('transfer-encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'content-length' in h_set:
self.size = int(h_set['content-length'])
else:
s = int(self.status.split(' ')[0])
if s < 200 or s not in (204, 205, 304):
if not self.chunked:
if sections == 1:
# Add a Content-Length header if it's not there already
h_set['Content-Length'] = str(len(data))
self.size = len(data)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'connection' not in h_set:
# If the application did not provide a connection header, fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data)))
else:
self.conn.sendall(data)
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if self.chunked:
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
elif not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output,'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket\methods\wsgi.py
#
# the following code is not part of Rocket but was added in web2py for testing purposes
#
def demo_app(environ, start_response):
global static_folder
import os
types = {'htm': 'text/html','html': 'text/html','gif': 'image/gif',
'jpg': 'image/jpeg','png': 'image/png','pdf': 'applications/pdf'}
if static_folder:
if not static_folder.startswith('/'):
static_folder = os.path.join(os.getcwd(),static_folder)
path = os.path.join(static_folder, environ['PATH_INFO'][1:] or 'index.html')
type = types.get(path.split('.')[-1],'text')
if os.path.exists(path):
try:
data = open(path,'rb').read()
start_response('200 OK', [('Content-Type', type)])
except IOError:
start_response('404 NOT FOUND', [])
data = '404 NOT FOUND'
else:
start_response('500 INTERNAL SERVER ERROR', [])
data = '500 INTERNAL SERVER ERROR'
else:
start_response('200 OK', [('Content-Type', 'text/html')])
data = '<html><body><h1>Hello from Rocket Web Server</h1></body></html>'
return [data]
def demo():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--ip", dest="ip",default="127.0.0.1",
help="ip address of the network interface")
parser.add_option("-p", "--port", dest="port",default="8000",
help="post where to run web server")
parser.add_option("-s", "--static", dest="static",default=None,
help="folder containing static files")
(options, args) = parser.parse_args()
global static_folder
static_folder = options.static
print 'Rocket running on %s:%s' % (options.ip, options.port)
r=Rocket((options.ip,int(options.port)),'wsgi', {'wsgi_app':demo_app})
r.start()
if __name__=='__main__':
demo()
| Python |
#!/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import base64
import cPickle
import datetime
import thread
import logging
import sys
import glob
import os
import re
import time
import traceback
import smtplib
import urllib
import urllib2
import Cookie
import cStringIO
from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string
from contenttype import contenttype
from storage import Storage, PickleableStorage, StorageList, Settings, Messages
from utils import web2py_uuid
from fileutils import read_file
from gluon import *
import serializers
try:
import json as json_parser # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json_parser # try external module
except:
import contrib.simplejson as json_parser # fallback to pure-Python module
__all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service',
'PluginManager', 'fetch', 'geocode', 'prettydate']
### mind there are two loggers here (logger and crud.settings.logger)!
logger = logging.getLogger("web2py")
DEFAULT = lambda: None
def callback(actions,form,tablename=None):
if actions:
if tablename and isinstance(actions,dict):
actions = actions.get(tablename, [])
if not isinstance(actions,(list, tuple)):
actions = [actions]
[action(form) for action in actions]
def validators(*a):
b = []
for item in a:
if isinstance(item, (list, tuple)):
b = b + list(item)
else:
b.append(item)
return b
def call_or_redirect(f,*args):
if callable(f):
redirect(f(*args))
else:
redirect(f)
def replace_id(url, form):
if url:
url = url.replace('[id]', str(form.vars.id))
if url[0] == '/' or url[:4] == 'http':
return url
return URL(url)
class Mail(object):
"""
Class for configuring and sending emails with alternative text / html
body, multiple attachments and encryption support
Works with SMTP and Google App Engine.
"""
class Attachment(MIMEBase.MIMEBase):
"""
Email attachment
Arguments:
payload: path to file or file-like object with read() method
filename: name of the attachment stored in message; if set to
None, it will be fetched from payload path; file-like
object payload must have explicit filename specified
content_id: id of the attachment; automatically contained within
< and >
content_type: content type of the attachment; if set to None,
it will be fetched from filename using gluon.contenttype
module
encoding: encoding of all strings passed to this function (except
attachment body)
Content ID is used to identify attachments within the html body;
in example, attached image with content ID 'photo' may be used in
html message as a source of img tag <img src="cid:photo" />.
Examples:
#Create attachment from text file:
attachment = Mail.Attachment('/path/to/file.txt')
Content-Type: text/plain
MIME-Version: 1.0
Content-Disposition: attachment; filename="file.txt"
Content-Transfer-Encoding: base64
SOMEBASE64CONTENT=
#Create attachment from image file with custom filename and cid:
attachment = Mail.Attachment('/path/to/file.png',
filename='photo.png',
content_id='photo')
Content-Type: image/png
MIME-Version: 1.0
Content-Disposition: attachment; filename="photo.png"
Content-Id: <photo>
Content-Transfer-Encoding: base64
SOMEOTHERBASE64CONTENT=
"""
def __init__(
self,
payload,
filename=None,
content_id=None,
content_type=None,
encoding='utf-8'):
if isinstance(payload, str):
if filename is None:
filename = os.path.basename(payload)
payload = read_file(payload, 'rb')
else:
if filename is None:
raise Exception('Missing attachment name')
payload = payload.read()
filename = filename.encode(encoding)
if content_type is None:
content_type = contenttype(filename)
self.my_filename = filename
self.my_payload = payload
MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1))
self.set_payload(payload)
self['Content-Disposition'] = 'attachment; filename="%s"' % filename
if not content_id is None:
self['Content-Id'] = '<%s>' % content_id.encode(encoding)
Encoders.encode_base64(self)
def __init__(self, server=None, sender=None, login=None, tls=True):
"""
Main Mail object
Arguments:
server: SMTP server address in address:port notation
sender: sender email address
login: sender login name and password in login:password notation
or None if no authentication is required
tls: enables/disables encryption (True by default)
In Google App Engine use:
server='gae'
For sake of backward compatibility all fields are optional and default
to None, however, to be able to send emails at least server and sender
must be specified. They are available under following fields:
mail.settings.server
mail.settings.sender
mail.settings.login
When server is 'logging', email is logged but not sent (debug mode)
Optionally you can use PGP encryption or X509:
mail.settings.cipher_type = None
mail.settings.sign = True
mail.settings.sign_passphrase = None
mail.settings.encrypt = True
mail.settings.x509_sign_keyfile = None
mail.settings.x509_sign_certfile = None
mail.settings.x509_crypt_certfiles = None
cipher_type : None
gpg - need a python-pyme package and gpgme lib
x509 - smime
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message
... x509 only ...
x509_sign_keyfile : the signers private key filename (PEM format)
x509_sign_certfile: the signers certificate filename (PEM format)
x509_crypt_certfiles: the certificates file to encrypt the messages
with can be a file name or a list of
file names (PEM format)
Examples:
#Create Mail object with authentication data for remote server:
mail = Mail('example.com:25', 'me@example.com', 'me:password')
"""
settings = self.settings = Settings()
settings.server = server
settings.sender = sender
settings.login = login
settings.tls = tls
settings.ssl = False
settings.cipher_type = None
settings.sign = True
settings.sign_passphrase = None
settings.encrypt = True
settings.x509_sign_keyfile = None
settings.x509_sign_certfile = None
settings.x509_crypt_certfiles = None
settings.debug = False
settings.lock_keys = True
self.result = {}
self.error = None
def send(
self,
to,
subject='None',
message='None',
attachments=None,
cc=None,
bcc=None,
reply_to=None,
encoding='utf-8',
raw=False,
headers={}
):
"""
Sends an email using data specified in constructor
Arguments:
to: list or tuple of receiver addresses; will also accept single
object
subject: subject of the email
message: email body text; depends on type of passed object:
if 2-list or 2-tuple is passed: first element will be
source of plain text while second of html text;
otherwise: object will be the only source of plain text
and html source will be set to None;
If text or html source is:
None: content part will be ignored,
string: content part will be set to it,
file-like object: content part will be fetched from
it using it's read() method
attachments: list or tuple of Mail.Attachment objects; will also
accept single object
cc: list or tuple of carbon copy receiver addresses; will also
accept single object
bcc: list or tuple of blind carbon copy receiver addresses; will
also accept single object
reply_to: address to which reply should be composed
encoding: encoding of all strings passed to this method (including
message bodies)
headers: dictionary of headers to refine the headers just before
sending mail, e.g. {'Return-Path' : 'bounces@example.org'}
Examples:
#Send plain text message to single address:
mail.send('you@example.com',
'Message subject',
'Plain text body of the message')
#Send html message to single address:
mail.send('you@example.com',
'Message subject',
'<html>Plain text body of the message</html>')
#Send text and html message to three addresses (two in cc):
mail.send('you@example.com',
'Message subject',
('Plain text body', '<html>html body</html>'),
cc=['other1@example.com', 'other2@example.com'])
#Send html only message with image attachment available from
the message by 'photo' content id:
mail.send('you@example.com',
'Message subject',
(None, '<html><img src="cid:photo" /></html>'),
Mail.Attachment('/path/to/photo.jpg'
content_id='photo'))
#Send email with two attachments and no body text
mail.send('you@example.com,
'Message subject',
None,
[Mail.Attachment('/path/to/fist.file'),
Mail.Attachment('/path/to/second.file')])
Returns True on success, False on failure.
Before return, method updates two object's fields:
self.result: return value of smtplib.SMTP.sendmail() or GAE's
mail.send_mail() method
self.error: Exception message or None if above was successful
"""
def encode_header(key):
if [c for c in key if 32>ord(c) or ord(c)>127]:
return Header.Header(key.encode('utf-8'),'utf-8')
else:
return key
# encoded or raw text
def encoded_or_raw(text):
if raw:
text = encode_header(text)
return text
if not isinstance(self.settings.server, str):
raise Exception('Server address not specified')
if not isinstance(self.settings.sender, str):
raise Exception('Sender address not specified')
if not raw:
payload_in = MIMEMultipart.MIMEMultipart('mixed')
else:
# no encoding configuration for raw messages
if isinstance(message, basestring):
text = message.decode(encoding).encode('utf-8')
else:
text = message.read().decode(encoding).encode('utf-8')
# No charset passed to avoid transport encoding
# NOTE: some unicode encoded strings will produce
# unreadable mail contents.
payload_in = MIMEText.MIMEText(text)
if to:
if not isinstance(to, (list,tuple)):
to = [to]
else:
raise Exception('Target receiver address not specified')
if cc:
if not isinstance(cc, (list, tuple)):
cc = [cc]
if bcc:
if not isinstance(bcc, (list, tuple)):
bcc = [bcc]
if message is None:
text = html = None
elif isinstance(message, (list, tuple)):
text, html = message
elif message.strip().startswith('<html') and message.strip().endswith('</html>'):
text = self.settings.server=='gae' and message or None
html = message
else:
text = message
html = None
if (not text is None or not html is None) and (not raw):
attachment = MIMEMultipart.MIMEMultipart('alternative')
if not text is None:
if isinstance(text, basestring):
text = text.decode(encoding).encode('utf-8')
else:
text = text.read().decode(encoding).encode('utf-8')
attachment.attach(MIMEText.MIMEText(text,_charset='utf-8'))
if not html is None:
if isinstance(html, basestring):
html = html.decode(encoding).encode('utf-8')
else:
html = html.read().decode(encoding).encode('utf-8')
attachment.attach(MIMEText.MIMEText(html, 'html',_charset='utf-8'))
payload_in.attach(attachment)
if (attachments is None) or raw:
pass
elif isinstance(attachments, (list, tuple)):
for attachment in attachments:
payload_in.attach(attachment)
else:
payload_in.attach(attachments)
#######################################################
# CIPHER #
#######################################################
cipher_type = self.settings.cipher_type
sign = self.settings.sign
sign_passphrase = self.settings.sign_passphrase
encrypt = self.settings.encrypt
#######################################################
# GPGME #
#######################################################
if cipher_type == 'gpg':
if not sign and not encrypt:
self.error="No sign and no encrypt is set but cipher type to gpg"
return False
# need a python-pyme package and gpgme lib
from pyme import core, errors
from pyme.constants.sig import mode
############################################
# sign #
############################################
if sign:
import string
core.check_version(None)
pin=string.replace(payload_in.as_string(),'\n','\r\n')
plain = core.Data(pin)
sig = core.Data()
c = core.Context()
c.set_armor(1)
c.signers_clear()
# search for signing key for From:
for sigkey in c.op_keylist_all(self.settings.sender, 1):
if sigkey.can_sign:
c.signers_add(sigkey)
if not c.signers_enum(0):
self.error='No key for signing [%s]' % self.settings.sender
return False
c.set_passphrase_cb(lambda x,y,z: sign_passphrase)
try:
# make a signature
c.op_sign(plain,sig,mode.DETACH)
sig.seek(0,0)
# make it part of the email
payload=MIMEMultipart.MIMEMultipart('signed',
boundary=None,
_subparts=None,
**dict(micalg="pgp-sha1",
protocol="application/pgp-signature"))
# insert the origin payload
payload.attach(payload_in)
# insert the detached signature
p=MIMEBase.MIMEBase("application",'pgp-signature')
p.set_payload(sig.read())
payload.attach(p)
# it's just a trick to handle the no encryption case
payload_in=payload
except errors.GPGMEError, ex:
self.error="GPG error: %s" % ex.getstring()
return False
############################################
# encrypt #
############################################
if encrypt:
core.check_version(None)
plain = core.Data(payload_in.as_string())
cipher = core.Data()
c = core.Context()
c.set_armor(1)
# collect the public keys for encryption
recipients=[]
rec=to[:]
if cc:
rec.extend(cc)
if bcc:
rec.extend(bcc)
for addr in rec:
c.op_keylist_start(addr,0)
r = c.op_keylist_next()
if r is None:
self.error='No key for [%s]' % addr
return False
recipients.append(r)
try:
# make the encryption
c.op_encrypt(recipients, 1, plain, cipher)
cipher.seek(0,0)
# make it a part of the email
payload=MIMEMultipart.MIMEMultipart('encrypted',
boundary=None,
_subparts=None,
**dict(protocol="application/pgp-encrypted"))
p=MIMEBase.MIMEBase("application",'pgp-encrypted')
p.set_payload("Version: 1\r\n")
payload.attach(p)
p=MIMEBase.MIMEBase("application",'octet-stream')
p.set_payload(cipher.read())
payload.attach(p)
except errors.GPGMEError, ex:
self.error="GPG error: %s" % ex.getstring()
return False
#######################################################
# X.509 #
#######################################################
elif cipher_type == 'x509':
if not sign and not encrypt:
self.error="No sign and no encrypt is set but cipher type to x509"
return False
x509_sign_keyfile=self.settings.x509_sign_keyfile
if self.settings.x509_sign_certfile:
x509_sign_certfile=self.settings.x509_sign_certfile
else:
# if there is no sign certfile we'll assume the
# cert is in keyfile
x509_sign_certfile=self.settings.x509_sign_keyfile
# crypt certfiles could be a string or a list
x509_crypt_certfiles=self.settings.x509_crypt_certfiles
# need m2crypto
from M2Crypto import BIO, SMIME, X509
msg_bio = BIO.MemoryBuffer(payload_in.as_string())
s = SMIME.SMIME()
# SIGN
if sign:
#key for signing
try:
s.load_key(x509_sign_keyfile, x509_sign_certfile, callback=lambda x: sign_passphrase)
if encrypt:
p7 = s.sign(msg_bio)
else:
p7 = s.sign(msg_bio,flags=SMIME.PKCS7_DETACHED)
msg_bio = BIO.MemoryBuffer(payload_in.as_string()) # Recreate coz sign() has consumed it.
except Exception,e:
self.error="Something went wrong on signing: <%s>" %str(e)
return False
# ENCRYPT
if encrypt:
try:
sk = X509.X509_Stack()
if not isinstance(x509_crypt_certfiles, (list, tuple)):
x509_crypt_certfiles = [x509_crypt_certfiles]
# make an encryption cert's stack
for x in x509_crypt_certfiles:
sk.push(X509.load_cert(x))
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
else:
tmp_bio.write(payload_in.as_string())
p7 = s.encrypt(tmp_bio)
except Exception,e:
self.error="Something went wrong on encrypting: <%s>" %str(e)
return False
# Final stage in sign and encryption
out = BIO.MemoryBuffer()
if encrypt:
s.write(out, p7)
else:
if sign:
s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED)
else:
out.write('\r\n')
out.write(payload_in.as_string())
out.close()
st=str(out.read())
payload=message_from_string(st)
else:
# no cryptography process as usual
payload=payload_in
payload['From'] = encoded_or_raw(self.settings.sender.decode(encoding))
origTo = to[:]
if to:
payload['To'] = encoded_or_raw(', '.join(to).decode(encoding))
if reply_to:
payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding))
if cc:
payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding))
to.extend(cc)
if bcc:
to.extend(bcc)
payload['Subject'] = encoded_or_raw(subject.decode(encoding))
payload['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S +0000",
time.gmtime())
for k,v in headers.iteritems():
payload[k] = encoded_or_raw(v.decode(encoding))
result = {}
try:
if self.settings.server == 'logging':
logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % \
('-'*40,self.settings.sender,
', '.join(to),subject,
text or html,'-'*40))
elif self.settings.server == 'gae':
xcc = dict()
if cc:
xcc['cc'] = cc
if bcc:
xcc['bcc'] = bcc
if reply_to:
xcc['reply_to'] = reply_to
from google.appengine.api import mail
attachments = attachments and [(a.my_filename,a.my_payload) for a in attachments if not raw]
if attachments:
result = mail.send_mail(sender=self.settings.sender, to=origTo,
subject=subject, body=text, html=html,
attachments=attachments, **xcc)
elif html and (not raw):
result = mail.send_mail(sender=self.settings.sender, to=origTo,
subject=subject, body=text, html=html, **xcc)
else:
result = mail.send_mail(sender=self.settings.sender, to=origTo,
subject=subject, body=text, **xcc)
else:
smtp_args = self.settings.server.split(':')
if self.settings.ssl:
server = smtplib.SMTP_SSL(*smtp_args)
else:
server = smtplib.SMTP(*smtp_args)
if self.settings.tls and not self.settings.ssl:
server.ehlo()
server.starttls()
server.ehlo()
if not self.settings.login is None:
server.login(*self.settings.login.split(':',1))
result = server.sendmail(self.settings.sender, to, payload.as_string())
server.quit()
except Exception, e:
logger.warn('Mail.send failure:%s' % e)
self.result = result
self.error = e
return False
self.result = result
self.error = None
return True
class Recaptcha(DIV):
API_SSL_SERVER = 'https://www.google.com/recaptcha/api'
API_SERVER = 'http://www.google.com/recaptcha/api'
VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify'
def __init__(
self,
request,
public_key='',
private_key='',
use_ssl=False,
error=None,
error_message='invalid',
label = 'Verify:',
options = ''
):
self.remote_addr = request.env.remote_addr
self.public_key = public_key
self.private_key = private_key
self.use_ssl = use_ssl
self.error = error
self.errors = Storage()
self.error_message = error_message
self.components = []
self.attributes = {}
self.label = label
self.options = options
self.comment = ''
def _validate(self):
# for local testing:
recaptcha_challenge_field = \
self.request_vars.recaptcha_challenge_field
recaptcha_response_field = \
self.request_vars.recaptcha_response_field
private_key = self.private_key
remoteip = self.remote_addr
if not (recaptcha_response_field and recaptcha_challenge_field
and len(recaptcha_response_field)
and len(recaptcha_challenge_field)):
self.errors['captcha'] = self.error_message
return False
params = urllib.urlencode({
'privatekey': private_key,
'remoteip': remoteip,
'challenge': recaptcha_challenge_field,
'response': recaptcha_response_field,
})
request = urllib2.Request(
url=self.VERIFY_SERVER,
data=params,
headers={'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'})
httpresp = urllib2.urlopen(request)
return_values = httpresp.read().splitlines()
httpresp.close()
return_code = return_values[0]
if return_code == 'true':
del self.request_vars.recaptcha_challenge_field
del self.request_vars.recaptcha_response_field
self.request_vars.captcha = ''
return True
self.errors['captcha'] = self.error_message
return False
def xml(self):
public_key = self.public_key
use_ssl = self.use_ssl
error_param = ''
if self.error:
error_param = '&error=%s' % self.error
if use_ssl:
server = self.API_SSL_SERVER
else:
server = self.API_SERVER
captcha = DIV(
SCRIPT("var RecaptchaOptions = {%s};" % self.options),
SCRIPT(_type="text/javascript",
_src="%s/challenge?k=%s%s" % (server,public_key,error_param)),
TAG.noscript(IFRAME(_src="%s/noscript?k=%s%s" % (server,public_key,error_param),
_height="300",_width="500",_frameborder="0"), BR(),
INPUT(_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
if not self.errors.captcha:
return XML(captcha).xml()
else:
captcha.append(DIV(self.errors['captcha'], _class='error'))
return XML(captcha).xml()
def addrow(form, a, b, c, style, _id, position=-1):
if style == "divs":
form[0].insert(position, DIV(DIV(LABEL(a),_class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id = _id))
elif style == "table2cols":
form[0].insert(position, TR(TD(LABEL(a),_class='w2p_fl'),
TD(c,_class='w2p_fc')))
form[0].insert(position+1, TR(TD(b,_class='w2p_fw'),
_colspan=2, _id = _id))
elif style == "ul":
form[0].insert(position, LI(DIV(LABEL(a),_class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id = _id))
else:
form[0].insert(position, TR(TD(LABEL(a),_class='w2p_fl'),
TD(b,_class='w2p_fw'),
TD(c,_class='w2p_fc'),_id = _id))
class Auth(object):
"""
Class for authentication, authorization, role based access control.
Includes:
- registration and profile
- login and logout
- username and password retrieval
- event logging
- role creation and assignment
- user defined group/role based permission
Authentication Example:
from contrib.utils import *
mail=Mail()
mail.settings.server='smtp.gmail.com:587'
mail.settings.sender='you@somewhere.com'
mail.settings.login='username:password'
auth=Auth(db)
auth.settings.mailer=mail
# auth.settings....=...
auth.define_tables()
def authentication():
return dict(form=auth())
exposes:
- http://.../{application}/{controller}/authentication/login
- http://.../{application}/{controller}/authentication/logout
- http://.../{application}/{controller}/authentication/register
- http://.../{application}/{controller}/authentication/verify_email
- http://.../{application}/{controller}/authentication/retrieve_username
- http://.../{application}/{controller}/authentication/retrieve_password
- http://.../{application}/{controller}/authentication/reset_password
- http://.../{application}/{controller}/authentication/profile
- http://.../{application}/{controller}/authentication/change_password
On registration a group with role=new_user.id is created
and user is given membership of this group.
You can create a group with:
group_id=auth.add_group('Manager', 'can access the manage action')
auth.add_permission(group_id, 'access to manage')
Here \"access to manage\" is just a user defined string.
You can give access to a user:
auth.add_membership(group_id, user_id)
If user id is omitted, the logged in user is assumed
Then you can decorate any action:
@auth.requires_permission('access to manage')
def manage():
return dict()
You can restrict a permission to a specific table:
auth.add_permission(group_id, 'edit', db.sometable)
@auth.requires_permission('edit', db.sometable)
Or to a specific record:
auth.add_permission(group_id, 'edit', db.sometable, 45)
@auth.requires_permission('edit', db.sometable, 45)
If authorization is not granted calls:
auth.settings.on_failed_authorization
Other options:
auth.settings.mailer=None
auth.settings.expiration=3600 # seconds
...
### these are messages that can be customized
...
"""
@staticmethod
def get_or_create_key(filename=None):
request = current.request
if not filename:
filename = os.path.join(request.folder,'private','auth.key')
if os.path.exists(filename):
key = open(filename,'r').read().strip()
else:
key = web2py_uuid()
open(filename,'w').write(key)
return key
def url(self, f=None, args=None, vars=None):
if args is None: args=[]
if vars is None: vars={}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def here(self):
return URL(args=current.request.args,vars=current.request.vars)
def __init__(self, environment=None, db=None, mailer=True,
hmac_key=None, controller='default', function='user', cas_provider=None):
"""
auth=Auth(db)
- environment is there for legacy but unused (awful)
- db has to be the database where to create tables for authentication
- mailer=Mail(...) or None (no mailed) or True (make a mailer)
- hmac_key can be a hmac_key or hmac_key=Auth.get_or_create_key()
- controller (where is the user action?)
- cas_provider (delegate authentication to the URL, CAS2)
"""
## next two lines for backward compatibility
if not db and environment and isinstance(environment,DAL):
db = environment
self.db = db
self.environment = current
request = current.request
session = current.session
auth = session.auth
self.user_groups = auth and auth.user_groups or {}
if auth and auth.last_visit and auth.last_visit + \
datetime.timedelta(days=0, seconds=auth.expiration) > request.now:
self.user = auth.user
# this is a trick to speed up sessions
if (request.now - auth.last_visit).seconds > (auth.expiration/10):
auth.last_visit = request.now
else:
self.user = None
session.auth = None
settings = self.settings = Settings()
# ## what happens after login?
self.next = current.request.vars._next
if isinstance(self.next,(list,tuple)):
self.next = self.next[0]
# ## what happens after registration?
settings.hideerror = False
settings.password_min_length = 4
settings.cas_domains = [request.env.http_host]
settings.cas_provider = cas_provider
settings.cas_actions = {'login':'login',
'validate':'validate',
'servicevalidate':'serviceValidate',
'proxyvalidate':'proxyValidate',
'logout':'logout'}
settings.cas_maps = None
settings.extra_fields = {}
settings.actions_disabled = []
settings.reset_password_requires_verification = False
settings.registration_requires_verification = False
settings.registration_requires_approval = False
settings.login_after_registration = False
settings.alternate_requires_registration = False
settings.create_user_groups = True
settings.controller = controller
settings.function = function
settings.login_url = self.url(function, args='login')
settings.logged_url = self.url(function, args='profile')
settings.download_url = self.url('download')
settings.mailer = (mailer==True) and Mail() or mailer
settings.login_captcha = None
settings.register_captcha = None
settings.retrieve_username_captcha = None
settings.retrieve_password_captcha = None
settings.captcha = None
settings.expiration = 3600 # one hour
settings.long_expiration = 3600*30*24 # one month
settings.remember_me_form = True
settings.allow_basic_login = False
settings.allow_basic_login_only = False
settings.on_failed_authorization = \
self.url(function, args='not_authorized')
settings.on_failed_authentication = lambda x: redirect(x)
settings.formstyle = 'table3cols'
settings.label_separator = ': '
# ## table names to be used
settings.password_field = 'password'
settings.table_user_name = 'auth_user'
settings.table_group_name = 'auth_group'
settings.table_membership_name = 'auth_membership'
settings.table_permission_name = 'auth_permission'
settings.table_event_name = 'auth_event'
settings.table_cas_name = 'auth_cas'
# ## if none, they will be created
settings.table_user = None
settings.table_group = None
settings.table_membership = None
settings.table_permission = None
settings.table_event = None
settings.table_cas = None
# ##
settings.showid = False
# ## these should be functions or lambdas
settings.login_next = self.url('index')
settings.login_onvalidation = []
settings.login_onaccept = []
settings.login_methods = [self]
settings.login_form = self
settings.login_email_validate = True
settings.login_userfield = None
settings.logout_next = self.url('index')
settings.logout_onlogout = None
settings.register_next = self.url('index')
settings.register_onvalidation = []
settings.register_onaccept = []
settings.register_fields = None
settings.register_verify_password = True
settings.verify_email_next = self.url(function, args='login')
settings.verify_email_onaccept = []
settings.profile_next = self.url('index')
settings.profile_onvalidation = []
settings.profile_onaccept = []
settings.profile_fields = None
settings.retrieve_username_next = self.url('index')
settings.retrieve_password_next = self.url('index')
settings.request_reset_password_next = self.url(function, args='login')
settings.reset_password_next = self.url(function, args='login')
settings.change_password_next = self.url('index')
settings.change_password_onvalidation = []
settings.change_password_onaccept = []
settings.retrieve_password_onvalidation = []
settings.reset_password_onvalidation = []
settings.reset_password_onaccept = []
settings.email_case_sensitive = True
settings.username_case_sensitive = True
settings.hmac_key = hmac_key
settings.lock_keys = True
# ## these are messages that can be customized
messages = self.messages = Messages(current.T)
messages.login_button = 'Login'
messages.register_button = 'Register'
messages.password_reset_button = 'Request reset password'
messages.password_change_button = 'Change password'
messages.profile_save_button = 'Save profile'
messages.submit_button = 'Submit'
messages.verify_password = 'Verify Password'
messages.delete_label = 'Check to delete'
messages.function_disabled = 'Function disabled'
messages.access_denied = 'Insufficient privileges'
messages.registration_verifying = 'Registration needs verification'
messages.registration_pending = 'Registration is pending approval'
messages.login_disabled = 'Login disabled by administrator'
messages.logged_in = 'Logged in'
messages.email_sent = 'Email sent'
messages.unable_to_send_email = 'Unable to send email'
messages.email_verified = 'Email verified'
messages.logged_out = 'Logged out'
messages.registration_successful = 'Registration successful'
messages.invalid_email = 'Invalid email'
messages.unable_send_email = 'Unable to send email'
messages.invalid_login = 'Invalid login'
messages.invalid_user = 'Invalid user'
messages.invalid_password = 'Invalid password'
messages.is_empty = "Cannot be empty"
messages.mismatched_password = "Password fields don't match"
messages.verify_email = \
'Click on the link http://' + current.request.env.http_host + \
URL('default','user',args=['verify_email']) + \
'/%(key)s to verify your email'
messages.verify_email_subject = 'Email verification'
messages.username_sent = 'Your username was emailed to you'
messages.new_password_sent = 'A new password was emailed to you'
messages.password_changed = 'Password changed'
messages.retrieve_username = 'Your username is: %(username)s'
messages.retrieve_username_subject = 'Username retrieve'
messages.retrieve_password = 'Your password is: %(password)s'
messages.retrieve_password_subject = 'Password retrieve'
messages.reset_password = \
'Click on the link http://' + current.request.env.http_host + \
URL('default','user',args=['reset_password']) + \
'/%(key)s to reset your password'
messages.reset_password_subject = 'Password reset'
messages.invalid_reset_password = 'Invalid reset password'
messages.profile_updated = 'Profile updated'
messages.new_password = 'New password'
messages.old_password = 'Old password'
messages.group_description = \
'Group uniquely assigned to user %(id)s'
messages.register_log = 'User %(id)s Registered'
messages.login_log = 'User %(id)s Logged-in'
messages.login_failed_log = None
messages.logout_log = 'User %(id)s Logged-out'
messages.profile_log = 'User %(id)s Profile updated'
messages.verify_email_log = 'User %(id)s Verification email sent'
messages.retrieve_username_log = 'User %(id)s Username retrieved'
messages.retrieve_password_log = 'User %(id)s Password retrieved'
messages.reset_password_log = 'User %(id)s Password reset'
messages.change_password_log = 'User %(id)s Password changed'
messages.add_group_log = 'Group %(group_id)s created'
messages.del_group_log = 'Group %(group_id)s deleted'
messages.add_membership_log = None
messages.del_membership_log = None
messages.has_membership_log = None
messages.add_permission_log = None
messages.del_permission_log = None
messages.has_permission_log = None
messages.impersonate_log = 'User %(id)s is impersonating %(other_id)s'
messages.label_first_name = 'First name'
messages.label_last_name = 'Last name'
messages.label_username = 'Username'
messages.label_email = 'E-mail'
messages.label_password = 'Password'
messages.label_registration_key = 'Registration key'
messages.label_reset_password_key = 'Reset Password key'
messages.label_registration_id = 'Registration identifier'
messages.label_role = 'Role'
messages.label_description = 'Description'
messages.label_user_id = 'User ID'
messages.label_group_id = 'Group ID'
messages.label_name = 'Name'
messages.label_table_name = 'Object or table name'
messages.label_record_id = 'Record ID'
messages.label_time_stamp = 'Timestamp'
messages.label_client_ip = 'Client IP'
messages.label_origin = 'Origin'
messages.label_remember_me = "Remember me (for 30 days)"
messages['T'] = current.T
messages.verify_password_comment = 'please input your password again'
messages.lock_keys = True
# for "remember me" option
response = current.response
if auth and auth.remember: #when user wants to be logged in for longer
response.cookies[response.session_id_name]["expires"] = \
auth.expiration
def lazy_user (auth = self): return auth.user_id
reference_user = 'reference %s' % settings.table_user_name
def represent(id,record=None,s=settings):
try:
user = s.table_user(id)
return '%(first_name)s %(last_name)s' % user
except: return id
self.signature = db.Table(self.db,'auth_signature',
Field('is_active','boolean',default=True),
Field('created_on','datetime',
default=request.now,
writable=False,readable=False),
Field('created_by',
reference_user,
default=lazy_user,represent=represent,
writable=False,readable=False,
),
Field('modified_on','datetime',
update=request.now,default=request.now,
writable=False,readable=False),
Field('modified_by',
reference_user,represent=represent,
default=lazy_user,update=lazy_user,
writable=False,readable=False))
def _get_user_id(self):
"accessor for auth.user_id"
return self.user and self.user.id or None
user_id = property(_get_user_id, doc="user.id or None")
def _HTTP(self, *a, **b):
"""
only used in lambda: self._HTTP(404)
"""
raise HTTP(*a, **b)
def __call__(self):
"""
usage:
def authentication(): return dict(form=auth())
"""
request = current.request
args = request.args
if not args:
redirect(self.url(args='login',vars=request.vars))
elif args[0] in self.settings.actions_disabled:
raise HTTP(404)
if args[0] in ('login','logout','register','verify_email',
'retrieve_username','retrieve_password',
'reset_password','request_reset_password',
'change_password','profile','groups',
'impersonate','not_authorized'):
return getattr(self,args[0])()
elif args[0]=='cas' and not self.settings.cas_provider:
if args(1) == self.settings.cas_actions['login']:
return self.cas_login(version=2)
elif args(1) == self.settings.cas_actions['validate']:
return self.cas_validate(version=1)
elif args(1) == self.settings.cas_actions['servicevalidate']:
return self.cas_validate(version=2, proxy=False)
elif args(1) == self.settings.cas_actions['proxyvalidate']:
return self.cas_validate(version=2, proxy=True)
elif args(1) == self.settings.cas_actions['logout']:
return self.logout(next=request.vars.service or DEFAULT)
else:
raise HTTP(404)
def navbar(self, prefix='Welcome', action=None, separators=(' [ ',' | ',' ] ')):
request = current.request
T = current.T
if isinstance(prefix,str):
prefix = T(prefix)
if not action:
action=self.url(self.settings.function)
if prefix:
prefix = prefix.strip()+' '
s1,s2,s3 = separators
if URL() == action:
next = ''
else:
next = '?_next='+urllib.quote(URL(args=request.args,vars=request.vars))
li_next = '?_next='+urllib.quote(self.settings.login_next)
lo_next = '?_next='+urllib.quote(self.settings.logout_next)
if self.user_id:
logout=A(T('Logout'),_href=action+'/logout'+lo_next)
profile=A(T('Profile'),_href=action+'/profile'+next)
password=A(T('Password'),_href=action+'/change_password'+next)
bar = SPAN(prefix,self.user.first_name,s1, logout,s3,_class='auth_navbar')
if not 'profile' in self.settings.actions_disabled:
bar.insert(4, s2)
bar.insert(5, profile)
if not 'change_password' in self.settings.actions_disabled:
bar.insert(-1, s2)
bar.insert(-1, password)
else:
login=A(T('Login'),_href=action+'/login'+li_next)
register=A(T('Register'),_href=action+'/register'+next)
retrieve_username=A(T('forgot username?'),
_href=action+'/retrieve_username'+next)
lost_password=A(T('Lost password?'),
_href=action+'/request_reset_password'+next)
bar = SPAN(s1, login, s3, _class='auth_navbar')
if not 'register' in self.settings.actions_disabled:
bar.insert(2, s2)
bar.insert(3, register)
if 'username' in self.settings.table_user.fields() and \
not 'retrieve_username' in self.settings.actions_disabled:
bar.insert(-1, s2)
bar.insert(-1, retrieve_username)
if not 'request_reset_password' in self.settings.actions_disabled:
bar.insert(-1, s2)
bar.insert(-1, lost_password)
return bar
def __get_migrate(self, tablename, migrate=True):
if type(migrate).__name__ == 'str':
return (migrate + tablename + '.table')
elif migrate == False:
return False
else:
return True
def define_tables(self, username=False, migrate=True, fake_migrate=False):
"""
to be called unless tables are defined manually
usages:
# defines all needed tables and table files
# 'myprefix_auth_user.table', ...
auth.define_tables(migrate='myprefix_')
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = self.db
settings = self.settings
if not settings.table_user_name in db.tables:
passfield = settings.password_field
if username or settings.cas_provider:
table = db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name),
Field('last_name', length=128, default='',
label=self.messages.label_last_name),
Field('email', length=512, default='',
label=self.messages.label_email),
Field('username', length=128, default='',
label=self.messages.label_username),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*settings.extra_fields.get(settings.table_user_name,[]),
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(username)s'))
table.username.requires = \
[IS_MATCH('[\w\.\-]+'),
IS_NOT_IN_DB(db, table.username)]
if not self.settings.username_case_sensitive:
table.username.requires.insert(1,IS_LOWER())
else:
table = db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name),
Field('last_name', length=128, default='',
label=self.messages.label_last_name),
Field('email', length=512, default='',
label=self.messages.label_email),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*settings.extra_fields.get(settings.table_user_name,[]),
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(first_name)s %(last_name)s (%(id)s)'))
table.first_name.requires = \
IS_NOT_EMPTY(error_message=self.messages.is_empty)
table.last_name.requires = \
IS_NOT_EMPTY(error_message=self.messages.is_empty)
table[passfield].requires = [
CRYPT(key=settings.hmac_key,
min_length=self.settings.password_min_length)]
table.email.requires = \
[IS_EMAIL(error_message=self.messages.invalid_email),
IS_NOT_IN_DB(db, table.email)]
if not self.settings.email_case_sensitive:
table.email.requires.insert(1,IS_LOWER())
table.registration_key.default = ''
settings.table_user = db[settings.table_user_name]
if not settings.table_group_name in db.tables:
table = db.define_table(
settings.table_group_name,
Field('role', length=512, default='',
label=self.messages.label_role),
Field('description', 'text',
label=self.messages.label_description),
*settings.extra_fields.get(settings.table_group_name,[]),
**dict(
migrate=self.__get_migrate(
settings.table_group_name, migrate),
fake_migrate=fake_migrate,
format = '%(role)s (%(id)s)'))
table.role.requires = IS_NOT_IN_DB(db, '%s.role'
% settings.table_group_name)
settings.table_group = db[settings.table_group_name]
if not settings.table_membership_name in db.tables:
table = db.define_table(
settings.table_membership_name,
Field('user_id', settings.table_user,
label=self.messages.label_user_id),
Field('group_id', settings.table_group,
label=self.messages.label_group_id),
*settings.extra_fields.get(settings.table_membership_name,[]),
**dict(
migrate=self.__get_migrate(
settings.table_membership_name, migrate),
fake_migrate=fake_migrate))
table.user_id.requires = IS_IN_DB(db, '%s.id' %
settings.table_user_name,
'%(first_name)s %(last_name)s (%(id)s)')
table.group_id.requires = IS_IN_DB(db, '%s.id' %
settings.table_group_name,
'%(role)s (%(id)s)')
settings.table_membership = db[settings.table_membership_name]
if not settings.table_permission_name in db.tables:
table = db.define_table(
settings.table_permission_name,
Field('group_id', settings.table_group,
label=self.messages.label_group_id),
Field('name', default='default', length=512,
label=self.messages.label_name),
Field('table_name', length=512,
label=self.messages.label_table_name),
Field('record_id', 'integer',default=0,
label=self.messages.label_record_id),
*settings.extra_fields.get(settings.table_permission_name,[]),
**dict(
migrate=self.__get_migrate(
settings.table_permission_name, migrate),
fake_migrate=fake_migrate))
table.group_id.requires = IS_IN_DB(db, '%s.id' %
settings.table_group_name,
'%(role)s (%(id)s)')
table.name.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty)
#table.table_name.requires = IS_EMPTY_OR(IS_IN_SET(self.db.tables))
table.record_id.requires = IS_INT_IN_RANGE(0, 10 ** 9)
settings.table_permission = db[settings.table_permission_name]
if not settings.table_event_name in db.tables:
table = db.define_table(
settings.table_event_name,
Field('time_stamp', 'datetime',
default=current.request.now,
label=self.messages.label_time_stamp),
Field('client_ip',
default=current.request.client,
label=self.messages.label_client_ip),
Field('user_id', settings.table_user, default=None,
label=self.messages.label_user_id),
Field('origin', default='auth', length=512,
label=self.messages.label_origin),
Field('description', 'text', default='',
label=self.messages.label_description),
*settings.extra_fields.get(settings.table_event_name,[]),
**dict(
migrate=self.__get_migrate(
settings.table_event_name, migrate),
fake_migrate=fake_migrate))
table.user_id.requires = IS_IN_DB(db, '%s.id' %
settings.table_user_name,
'%(first_name)s %(last_name)s (%(id)s)')
table.origin.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty)
table.description.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty)
settings.table_event = db[settings.table_event_name]
now = current.request.now
if settings.cas_domains:
if not settings.table_cas_name in db.tables:
table = db.define_table(
settings.table_cas_name,
Field('user_id', settings.table_user, default=None,
label=self.messages.label_user_id),
Field('created_on','datetime',default=now),
Field('service',requires=IS_URL()),
Field('ticket'),
Field('renew', 'boolean', default=False),
*settings.extra_fields.get(settings.table_cas_name,[]),
**dict(
migrate=self.__get_migrate(
settings.table_event_name, migrate),
fake_migrate=fake_migrate))
table.user_id.requires = IS_IN_DB(db, '%s.id' % \
settings.table_user_name,
'%(first_name)s %(last_name)s (%(id)s)')
settings.table_cas = db[settings.table_cas_name]
if settings.cas_provider:
settings.actions_disabled = \
['profile','register','change_password','request_reset_password']
from gluon.contrib.login_methods.cas_auth import CasAuth
maps = self.settings.cas_maps
if not maps:
maps = dict((name,lambda v,n=name:v.get(n,None)) for name in \
settings.table_user.fields if name!='id' \
and settings.table_user[name].readable)
maps['registration_id'] = \
lambda v,p=settings.cas_provider:'%s/%s' % (p,v['user'])
actions = [self.settings.cas_actions['login'],
self.settings.cas_actions['servicevalidate'],
self.settings.cas_actions['logout']]
settings.login_form = CasAuth(
casversion = 2,
urlbase = settings.cas_provider,
actions=actions,
maps=maps)
def log_event(self, description, vars=None, origin='auth'):
"""
usage:
auth.log_event(description='this happened', origin='auth')
"""
if not description:
return
elif self.is_logged_in():
user_id = self.user.id
else:
user_id = None # user unknown
vars = vars or {}
self.settings.table_event.insert(description=description % vars,
origin=origin, user_id=user_id)
def get_or_create_user(self, keys):
"""
Used for alternate login methods:
If the user exists already then password is updated.
If the user doesn't yet exist, then they are created.
"""
table_user = self.settings.table_user
user = None
checks = []
# make a guess about who this user is
for fieldname in ['registration_id','username','email']:
if fieldname in table_user.fields() and keys.get(fieldname,None):
checks.append(fieldname)
user = user or table_user(**{fieldname:keys[fieldname]})
# if we think we found the user but registration_id does not match, make new user
if 'registration_id' in checks and user and user.registration_id and user.registration_id!=keys.get('registration_id',None):
user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER?
keys['registration_key']=''
if user:
user.update_record(**table_user._filter_fields(keys))
elif checks:
if not 'first_name' in keys and 'first_name' in table_user.fields:
keys['first_name'] = keys.get('username',keys.get('email','anonymous')).split('@')[0]
user_id = table_user.insert(**table_user._filter_fields(keys))
user = self.user = table_user[user_id]
if self.settings.create_user_groups:
group_id = self.add_group("user_%s" % user_id)
self.add_membership(group_id, user_id)
return user
def basic(self):
if not self.settings.allow_basic_login:
return (False,False,False)
basic = current.request.env.http_authorization
if not basic or not basic[:6].lower() == 'basic ':
return (True, False, False)
(username, password) = base64.b64decode(basic[6:]).split(':')
return (True, True, self.login_bare(username, password))
def login_bare(self, username, password):
"""
logins user
"""
request = current.request
session = current.session
table_user = self.settings.table_user
if self.settings.login_userfield:
userfield = self.settings.login_userfield
elif 'username' in table_user.fields:
userfield = 'username'
else:
userfield = 'email'
passfield = self.settings.password_field
user = self.db(table_user[userfield] == username).select().first()
if user:
password = table_user[passfield].validate(password)[0]
if not user.registration_key and user[passfield] == password:
user = Storage(table_user._filter_fields(user, id=True))
session.auth = Storage(user=user, last_visit=request.now,
expiration=self.settings.expiration,
hmac_key = web2py_uuid())
self.user = user
self.update_groups()
return user
else:
# user not in database try other login methods
for login_method in self.settings.login_methods:
if login_method != self and login_method(username, password):
self.user = username
return username
return False
def cas_login(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
version=2,
):
request = current.request
response = current.response
session = current.session
db, table = self.db, self.settings.table_cas
session._cas_service = request.vars.service or session._cas_service
if not request.env.http_host in self.settings.cas_domains or \
not session._cas_service:
raise HTTP(403,'not authorized')
def allow_access(interactivelogin=False):
row = table(service=session._cas_service,user_id=self.user.id)
if row:
ticket = row.ticket
else:
ticket = 'ST-'+web2py_uuid()
table.insert(service=session._cas_service,
user_id=self.user.id,
ticket=ticket,
created_on=request.now,
renew=interactivelogin)
service = session._cas_service
del session._cas_service
if request.vars.has_key('warn') and not interactivelogin:
response.headers['refresh'] = "5;URL=%s"%service+"?ticket="+ticket
return A("Continue to %s"%service,
_href=service+"?ticket="+ticket)
else:
redirect(service+"?ticket="+ticket)
if self.is_logged_in() and not request.vars.has_key('renew'):
return allow_access()
elif not self.is_logged_in() and request.vars.has_key('gateway'):
redirect(service)
def cas_onaccept(form, onaccept=onaccept):
if not onaccept is DEFAULT: onaccept(form)
return allow_access(interactivelogin=True)
return self.login(next,onvalidation,cas_onaccept,log)
def cas_validate(self, version=2, proxy=False):
request = current.request
db, table = self.db, self.settings.table_cas
current.response.headers['Content-Type']='text'
ticket = request.vars.ticket
renew = True if request.vars.has_key('renew') else False
row = table(ticket=ticket)
success = False
if row:
if self.settings.login_userfield:
userfield = self.settings.login_userfield
elif 'username' in table.fields:
userfield = 'username'
else:
userfield = 'email'
# If ticket is a service Ticket and RENEW flag respected
if ticket[0:3] == 'ST-' and \
not ((row.renew and renew) ^ renew):
user = self.settings.table_user(row.user_id)
row.delete_record()
success = True
def build_response(body):
return '<?xml version="1.0" encoding="UTF-8"?>\n'+\
TAG['cas:serviceResponse'](
body,**{'_xmlns:cas':'http://www.yale.edu/tp/cas'}).xml()
if success:
if version == 1:
message = 'yes\n%s' % user[userfield]
else: # assume version 2
username = user.get('username',user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
*[TAG['cas:'+field.name](user[field.name]) \
for field in self.settings.table_user \
if field.readable]))
else:
if version == 1:
message = 'no\n'
elif row:
message = build_response(TAG['cas:authenticationFailure']())
else:
message = build_response(
TAG['cas:authenticationFailure'](
'Ticket %s not recognized' % ticket,
_code='INVALID TICKET'))
raise HTTP(200,message)
def login(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a login form
method: Auth.login([next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
if self.settings.login_userfield:
username = self.settings.login_userfield
elif 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
if 'username' in table_user.fields or \
not self.settings.login_email_validate:
tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty)
else:
tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email)
old_requires = table_user[username].requires
table_user[username].requires = tmpvalidator
request = current.request
response = current.response
session = current.session
passfield = self.settings.password_field
try: table_user[passfield].requires[-1].min_length = 0
except: pass
### use session for federated login
if self.next:
session._auth_next = self.next
elif session._auth_next:
self.next = session._auth_next
### pass
if next is DEFAULT:
next = self.next or self.settings.login_next
if onvalidation is DEFAULT:
onvalidation = self.settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.login_onaccept
if log is DEFAULT:
log = self.messages.login_log
user = None # default
# do we use our own login form, or from a central source?
if self.settings.login_form == self:
form = SQLFORM(
table_user,
fields=[username, passfield],
hidden = dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.login_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if self.settings.remember_me_form:
## adds a new input checkbox "remember me for longer"
addrow(form,XML(" "),
DIV(XML(" "),
INPUT(_type='checkbox',
_class='checkbox',
_id="auth_user_remember",
_name="remember",
),
XML(" "),
LABEL(
self.messages.label_remember_me,
_for="auth_user_remember",
)),"",
self.settings.formstyle,
'auth_user_remember__row')
captcha = self.settings.login_captcha or \
(self.settings.login_captcha!=False and self.settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle,'captcha__row')
accepted_form = False
if form.accepts(request, session,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
accepted_form = True
# check for username in db
user = self.db(table_user[username] == form.vars[username]).select().first()
if user:
# user in db, check if registration pending or disabled
temp_user = user
if temp_user.registration_key == 'pending':
response.flash = self.messages.registration_pending
return form
elif temp_user.registration_key in ('disabled','blocked'):
response.flash = self.messages.login_disabled
return form
elif not temp_user.registration_key is None and \
temp_user.registration_key.strip():
response.flash = \
self.messages.registration_verifying
return form
# try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in self.settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in self.settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(form.vars)
break
if not user:
# alternates have failed, maybe because service inaccessible
if self.settings.login_methods[0] == self:
# try logging in locally using cached credentials
if temp_user[passfield] == form.vars.get(passfield, ''):
# success
user = temp_user
else:
# user not in db
if not self.settings.alternate_requires_registration:
# we're allowed to auto-register users from external systems
for login_method in self.settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in self.settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(form.vars)
break
if not user:
self.log_event(self.settings.login_failed_log,
request.post_vars)
# invalid login
session.flash = self.messages.invalid_login
redirect(self.url(args=request.args,vars=request.get_vars))
else:
# use a central authentication server
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(table_user._filter_fields(cas_user))
elif hasattr(cas,'login_form'):
return cas.login_form()
else:
# we need to pass through login again before going on
next = self.url(self.settings.function, args='login')
redirect(cas.login_url(next))
# process authenticated users
if user:
user = Storage(table_user._filter_fields(user, id=True))
# process authenticated users
# user wants to be logged in for longer
session.auth = Storage(
user = user,
last_visit = request.now,
expiration = request.vars.get("remember",False) and \
self.settings.long_expiration or self.settings.expiration,
remember = request.vars.has_key("remember"),
hmac_key = web2py_uuid()
)
self.user = user
self.log_event(log, user)
session.flash = self.messages.logged_in
self.update_groups()
# how to continue
if self.settings.login_form == self:
if accepted_form:
callback(onaccept,form)
if next == session._auth_next:
session._auth_next = None
next = replace_id(next, form)
redirect(next)
table_user[username].requires = old_requires
return form
elif user:
callback(onaccept,None)
if next == session._auth_next:
del session._auth_next
redirect(next)
def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT):
"""
logout and redirects to login
method: Auth.logout ([next=DEFAULT[, onlogout=DEFAULT[,
log=DEFAULT]]])
"""
if next is DEFAULT:
next = self.settings.logout_next
if onlogout is DEFAULT:
onlogout = self.settings.logout_onlogout
if onlogout:
onlogout(self.user)
if log is DEFAULT:
log = self.messages.logout_log
if self.user:
self.log_event(log, self.user)
if self.settings.login_form != self:
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
next = cas.logout_url(next)
current.session.auth = None
current.session.flash = self.messages.logged_out
redirect(next)
def register(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a registration form
method: Auth.register([next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
request = current.request
response = current.response
session = current.session
if self.is_logged_in():
redirect(self.settings.logged_url)
if next is DEFAULT:
next = self.next or self.settings.register_next
if onvalidation is DEFAULT:
onvalidation = self.settings.register_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.register_onaccept
if log is DEFAULT:
log = self.messages.register_log
passfield = self.settings.password_field
formstyle = self.settings.formstyle
form = SQLFORM(table_user,
fields = self.settings.register_fields,
hidden = dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.register_button,
delete_label=self.messages.delete_label,
formstyle=formstyle,
separator=self.settings.label_separator
)
if self.settings.register_verify_password:
for i, row in enumerate(form[0].components):
item = row.element('input',_name=passfield)
if item:
form.custom.widget.password_two = \
INPUT(_name="password_two", _type="password",
requires=IS_EXPR(
'value==%s' % \
repr(request.vars.get(passfield, None)),
error_message=self.messages.mismatched_password))
addrow(form, self.messages.verify_password + self.settings.label_separator,
form.custom.widget.password_two,
self.messages.verify_password_comment,
formstyle,
'%s_%s__row' % (table_user, 'password_two'),
position=i+1)
break
captcha = self.settings.register_captcha or self.settings.captcha
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,self.settings.formstyle, 'captcha__row')
table_user.registration_key.default = key = web2py_uuid()
if form.accepts(request, session, formname='register',
onvalidation=onvalidation,hideerror=self.settings.hideerror):
description = self.messages.group_description % form.vars
if self.settings.create_user_groups:
group_id = self.add_group("user_%s" % form.vars.id, description)
self.add_membership(group_id, form.vars.id)
if self.settings.registration_requires_verification:
if not self.settings.mailer or \
not self.settings.mailer.send(to=form.vars.email,
subject=self.messages.verify_email_subject,
message=self.messages.verify_email
% dict(key=key)):
self.db.rollback()
response.flash = self.messages.unable_send_email
return form
session.flash = self.messages.email_sent
if self.settings.registration_requires_approval and \
not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='pending')
session.flash = self.messages.registration_pending
elif (not self.settings.registration_requires_verification or \
self.settings.login_after_registration):
if not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='')
session.flash = self.messages.registration_successful
table_user = self.settings.table_user
if 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
user = self.db(table_user[username] == form.vars[username]).select().first()
user = Storage(table_user._filter_fields(user, id=True))
session.auth = Storage(user=user, last_visit=request.now,
expiration=self.settings.expiration,
hmac_key = web2py_uuid())
self.user = user
self.update_groups()
session.flash = self.messages.logged_in
self.log_event(log, form.vars)
callback(onaccept,form)
if not next:
next = self.url(args = request.args)
else:
next = replace_id(next, form)
redirect(next)
return form
def is_logged_in(self):
"""
checks if the user is logged in and returns True/False.
if so user is in auth.user as well as in session.auth.user
"""
if self.user:
return True
return False
def verify_email(
self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
action user to verify the registration email, XXXXXXXXXXXXXXXX
method: Auth.verify_email([next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
key = current.request.args[-1]
table_user = self.settings.table_user
user = self.db(table_user.registration_key == key).select().first()
if not user:
redirect(self.settings.login_url)
if self.settings.registration_requires_approval:
user.update_record(registration_key = 'pending')
current.session.flash = self.messages.registration_pending
else:
user.update_record(registration_key = '')
current.session.flash = self.messages.email_verified
# make sure session has same user.registrato_key as db record
if current.session.auth and current.session.auth.user:
current.session.auth.user.registration_key = user.registration_key
if log is DEFAULT:
log = self.messages.verify_email_log
if next is DEFAULT:
next = self.settings.verify_email_next
if onaccept is DEFAULT:
onaccept = self.settings.verify_email_onaccept
self.log_event(log, user)
callback(onaccept,user)
redirect(next)
def retrieve_username(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form to retrieve the user username
(only if there is a username field)
method: Auth.retrieve_username([next=DEFAULT
[, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
if not 'username' in table_user.fields:
raise HTTP(404)
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_username_captcha or \
(self.settings.retrieve_username_captcha!=False and self.settings.captcha)
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.next or self.settings.retrieve_username_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_username_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_username_onaccept
if log is DEFAULT:
log = self.messages.retrieve_username_log
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden = dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,self.settings.formstyle, 'captcha__row')
if form.accepts(request, session,
formname='retrieve_username', dbio=False,
onvalidation=onvalidation,hideerror=self.settings.hideerror):
user = self.db(table_user.email == form.vars.email).select().first()
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
username = user.username
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_username_subject,
message=self.messages.retrieve_username
% dict(username=username))
session.flash = self.messages.email_sent
self.log_event(log, user)
callback(onaccept,form)
if not next:
next = self.url(args = request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def random_password(self):
import string
import random
password = ''
specials=r'!#$*'
for i in range(0,3):
password += random.choice(string.lowercase)
password += random.choice(string.uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password,len(password)))
def reset_password_deprecated(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form to reset the user password (deprecated)
method: Auth.reset_password_deprecated([next=DEFAULT
[, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
request = current.request
response = current.response
session = current.session
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.next or self.settings.retrieve_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_password_onaccept
if log is DEFAULT:
log = self.messages.retrieve_password_log
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden = dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='retrieve_password', dbio=False,
onvalidation=onvalidation,hideerror=self.settings.hideerror):
user = self.db(table_user.email == form.vars.email).select().first()
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
elif user.registration_key in ('pending','disabled','blocked'):
current.session.flash = \
self.messages.registration_pending
redirect(self.url(args=request.args))
password = self.random_password()
passfield = self.settings.password_field
d = {passfield: table_user[passfield].validate(password)[0],
'registration_key': ''}
user.update_record(**d)
if self.settings.mailer and \
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_password_subject,
message=self.messages.retrieve_password \
% dict(password=password)):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept,form)
if not next:
next = self.url(args = request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def reset_password(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form to reset the user password
method: Auth.reset_password([next=DEFAULT
[, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.next or self.settings.reset_password_next
try:
key = request.vars.key or request.args[-1]
t0 = int(key.split('-')[0])
if time.time()-t0 > 60*60*24: raise Exception
user = self.db(table_user.reset_password_key == key).select().first()
if not user: raise Exception
except Exception:
session.flash = self.messages.invalid_reset_password
redirect(next)
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.settings.table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_reset_button,
hidden = dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request,session,hideerror=self.settings.hideerror):
user.update_record(**{passfield:form.vars.new_password,
'registration_key':'',
'reset_password_key':''})
session.flash = self.messages.password_changed
redirect(next)
return form
def request_reset_password(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form to reset the user password
method: Auth.reset_password([next=DEFAULT
[, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_password_captcha or \
(self.settings.retrieve_password_captcha!=False and self.settings.captcha)
if next is DEFAULT:
next = self.next or self.settings.request_reset_password_next
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if onvalidation is DEFAULT:
onvalidation = self.settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.reset_password_onaccept
if log is DEFAULT:
log = self.messages.reset_password_log
table_user.email.requires = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden = dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.password_reset_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle,'captcha__row')
if form.accepts(request, session,
formname='reset_password', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user = self.db(table_user.email == form.vars.email).select().first()
if not user:
session.flash = self.messages.invalid_email
redirect(self.url(args=request.args))
elif user.registration_key in ('pending','disabled','blocked'):
session.flash = self.messages.registration_pending
redirect(self.url(args=request.args))
reset_password_key = str(int(time.time()))+'-' + web2py_uuid()
if self.settings.mailer.send(to=form.vars.email,
subject=self.messages.reset_password_subject,
message=self.messages.reset_password % \
dict(key=reset_password_key)):
session.flash = self.messages.email_sent
user.update_record(reset_password_key=reset_password_key)
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept,form)
if not next:
next = self.url(args = request.args)
else:
next = replace_id(next, form)
redirect(next)
# old_requires = table_user.email.requires
return form
def retrieve_password(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
if self.settings.reset_password_requires_verification:
return self.request_reset_password(next,onvalidation,onaccept,log)
else:
return self.reset_password_deprecated(next,onvalidation,onaccept,log)
def change_password(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form that lets the user change password
method: Auth.change_password([next=DEFAULT[, onvalidation=DEFAULT[,
onaccept=DEFAULT[, log=DEFAULT]]]])
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
db = self.db
table_user = self.settings.table_user
usern = self.settings.table_user_name
s = db(table_user.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.next or self.settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.change_password_onaccept
if log is DEFAULT:
log = self.messages.change_password_log
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('old_password', 'password',
label=self.messages.old_password,
requires=validators(
table_user[passfield].requires,
IS_IN_DB(s, '%s.%s' % (usern, passfield),
error_message=self.messages.invalid_password))),
Field('new_password', 'password',
label=self.messages.new_password,
requires=table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_change_button,
hidden = dict(_next=next),
formstyle = self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='change_password',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
d = {passfield: form.vars.new_password}
s.update(**d)
session.flash = self.messages.password_changed
self.log_event(log, self.user)
callback(onaccept,form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
return form
def profile(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form that lets the user change his/her profile
method: Auth.profile([next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
table_user = self.settings.table_user
if not self.is_logged_in():
redirect(self.settings.login_url)
passfield = self.settings.password_field
self.settings.table_user[passfield].writable = False
request = current.request
session = current.session
if next is DEFAULT:
next = self.next or self.settings.profile_next
if onvalidation is DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.profile_onaccept
if log is DEFAULT:
log = self.messages.profile_log
form = SQLFORM(
table_user,
self.user.id,
fields = self.settings.profile_fields,
hidden = dict(_next=next),
showid = self.settings.showid,
submit_button = self.messages.profile_save_button,
delete_label = self.messages.delete_label,
upload = self.settings.download_url,
formstyle = self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation, hideerror=self.settings.hideerror):
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
self.log_event(log,self.user)
callback(onaccept,form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
return form
def is_impersonating(self):
if not current.session.auth: return None
return current.session.auth.get('impersonator',None)
def impersonate(self, user_id=DEFAULT):
"""
usage: POST TO http://..../impersonate request.post_vars.user_id=<id>
set request.post_vars.user_id to 0 to restore original user.
requires impersonator is logged in and
has_permission('impersonate', 'auth_user', user_id)
"""
request = current.request
session = current.session
auth = session.auth
if not self.is_logged_in():
raise HTTP(401, "Not Authorized")
current_id = auth.user.id
requested_id = user_id
if user_id is DEFAULT:
user_id = current.request.post_vars.user_id
if user_id and user_id != self.user.id and user_id != '0':
if not self.has_permission('impersonate',
self.settings.table_user_name,
user_id):
raise HTTP(403, "Forbidden")
user = self.settings.table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
auth.impersonator = cPickle.dumps(session)
auth.user.update(
self.settings.table_user._filter_fields(user, True))
self.user = auth.user
if self.settings.login_onaccept:
form = Storage(dict(vars=self.user))
self.settings.login_onaccept(form)
log = self.messages.impersonate_log
self.log_event(log,dict(id=current_id, other_id=auth.user.id))
elif user_id in (0, '0') and self.is_impersonating():
session.clear()
session.update(cPickle.loads(auth.impersonator))
self.user = session.auth.user
if requested_id is DEFAULT and not request.post_vars:
return SQLFORM.factory(Field('user_id', 'integer'))
return self.user
def update_groups(self):
if not self.user:
return
user_groups = self.user_groups = {}
if current.session.auth:
current.session.auth.user_groups = self.user_groups
memberships = self.db(self.settings.table_membership.user_id
== self.user.id).select()
for membership in memberships:
group = self.settings.table_group(membership.group_id)
if group:
user_groups[membership.group_id] = group.role
def groups(self):
"""
displays the groups and their roles for the logged in user
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
memberships = self.db(self.settings.table_membership.user_id
== self.user.id).select()
table = TABLE()
for membership in memberships:
groups = self.db(self.settings.table_group.id
== membership.group_id).select()
if groups:
group = groups[0]
table.append(TR(H3(group.role, '(%s)' % group.id)))
table.append(TR(P(group.description)))
if not memberships:
return None
return table
def not_authorized(self):
"""
you can change the view for this page to make it look as you like
"""
if current.request.ajax:
raise HTTP(403,'ACCESS DENIED')
return 'ACCESS DENIED'
def requires(self, condition, requires_login=True):
"""
decorator that prevents access to action if not logged in
"""
def decorator(action):
def f(*a, **b):
basic_allowed,basic_accepted,user = self.basic()
user = user or self.user
if requires_login:
if not user:
if self.settings.allow_basic_login_only or \
basic_accepted or current.request.is_restful:
raise HTTP(403,"Not authorized")
elif current.request.ajax:
return A('login',_href=self.settings.login_url)
else:
next = self.here()
current.session.flash = current.response.flash
return call_or_redirect(
self.settings.on_failed_authentication,
self.settings.login_url+\
'?_next='+urllib.quote(next))
if callable(condition):
flag = condition()
else:
flag = condition
if not flag:
current.session.flash = self.messages.access_denied
return call_or_redirect(
self.settings.on_failed_authorization)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
def requires_login(self):
"""
decorator that prevents access to action if not logged in
"""
return self.requires(True)
def requires_membership(self, role=None, group_id=None):
"""
decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
return self.requires(lambda: self.has_membership(group_id=group_id, role=role))
def requires_permission(self, name, table_name='', record_id=0):
"""
decorator that prevents access to action if not logged in or
if user logged in is not a member of any group (role) that
has 'name' access to 'table_name', 'record_id'.
"""
return self.requires(lambda: self.has_permission(name, table_name, record_id))
def requires_signature(self):
"""
decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
return self.requires(lambda: URL.verify(current.request,user_signature=True))
def add_group(self, role, description=''):
"""
creates a group associated to a role
"""
group_id = self.settings.table_group.insert(
role=role, description=description)
self.log_event(self.messages.add_group_log,
dict(group_id=group_id, role=role))
return group_id
def del_group(self, group_id):
"""
deletes a group
"""
self.db(self.settings.table_group.id == group_id).delete()
self.db(self.settings.table_membership.group_id == group_id).delete()
self.db(self.settings.table_permission.group_id == group_id).delete()
self.update_groups()
self.log_event(self.messages.del_group_log,dict(group_id=group_id))
def id_group(self, role):
"""
returns the group_id of the group specified by the role
"""
rows = self.db(self.settings.table_group.role == role).select()
if not rows:
return None
return rows[0].id
def user_group(self, user_id = None):
"""
returns the group_id of the group uniquely associated to this user
i.e. role=user:[user_id]
"""
if not user_id and self.user:
user_id = self.user.id
role = 'user_%s' % user_id
return self.id_group(role)
def has_membership(self, group_id=None, user_id=None, role=None):
"""
checks if user is member of group_id or role
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.settings.table_membership
if self.db((membership.user_id == user_id)
& (membership.group_id == group_id)).select():
r = True
else:
r = False
self.log_event(self.messages.has_membership_log,
dict(user_id=user_id,group_id=group_id, check=r))
return r
def add_membership(self, group_id=None, user_id=None, role=None):
"""
gives user_id membership of group_id or role
if user is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.settings.table_membership
record = membership(user_id = user_id,group_id = group_id)
if record:
return record.id
else:
id = membership.insert(group_id=group_id, user_id=user_id)
self.update_groups()
self.log_event(self.messages.add_membership_log,
dict(user_id=user_id, group_id=group_id))
return id
def del_membership(self, group_id=None, user_id=None, role=None):
"""
revokes membership from group_id to user_id
if user_id is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
if not user_id and self.user:
user_id = self.user.id
membership = self.settings.table_membership
self.log_event(self.messages.del_membership_log,
dict(user_id=user_id,group_id=group_id))
ret = self.db(membership.user_id
== user_id)(membership.group_id
== group_id).delete()
self.update_groups()
return ret
def has_permission(
self,
name='any',
table_name='',
record_id=0,
user_id=None,
group_id=None,
):
"""
checks if user_id or current logged in user is member of a group
that has 'name' permission on 'table_name' and 'record_id'
if group_id is passed, it checks whether the group has the permission
"""
if not user_id and not group_id and self.user:
user_id = self.user.id
if user_id:
membership = self.settings.table_membership
rows = self.db(membership.user_id
== user_id).select(membership.group_id)
groups = set([row.group_id for row in rows])
if group_id and not group_id in groups:
return False
else:
groups = set([group_id])
permission = self.settings.table_permission
rows = self.db(permission.name == name)(permission.table_name
== str(table_name))(permission.record_id
== record_id).select(permission.group_id)
groups_required = set([row.group_id for row in rows])
if record_id:
rows = self.db(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== 0).select(permission.group_id)
groups_required = groups_required.union(set([row.group_id
for row in rows]))
if groups.intersection(groups_required):
r = True
else:
r = False
if user_id:
self.log_event(self.messages.has_permission_log,
dict(user_id=user_id, name=name,
table_name=table_name, record_id=record_id))
return r
def add_permission(
self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
gives group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.settings.table_permission
if group_id == 0:
group_id = self.user_group()
id = permission.insert(group_id=group_id, name=name,
table_name=str(table_name),
record_id=long(record_id))
self.log_event(self.messages.add_permission_log,
dict(permission_id=id, group_id=group_id,
name=name, table_name=table_name,
record_id=record_id))
return id
def del_permission(
self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
revokes group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.settings.table_permission
self.log_event(self.messages.del_permission_log,
dict(group_id=group_id, name=name,
table_name=table_name, record_id=record_id))
return self.db(permission.group_id == group_id)(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== long(record_id)).delete()
def accessible_query(self, name, table, user_id=None):
"""
returns a query with all accessible records for user_id or
the current logged in user
this method does not work on GAE because uses JOIN and IN
example:
db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL)
"""
if not user_id:
user_id = self.user_id
if self.has_permission(name, table, 0, user_id):
return table.id > 0
db = self.db
membership = self.settings.table_membership
permission = self.settings.table_permission
return table.id.belongs(db(membership.user_id == user_id)\
(membership.group_id == permission.group_id)\
(permission.name == name)\
(permission.table_name == table)\
._select(permission.record_id))
@staticmethod
def archive(form,
archive_table=None,
current_record='current_record',
archive_current=False,
fields=None):
"""
If you have a table (db.mytable) that needs full revision history you can just do:
form=crud.update(db.mytable,myrecord,onaccept=auth.archive)
or
form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive)
crud.archive will define a new table "mytable_archive" and store
a copy of the current record (if archive_current=True)
or a copy of the previous record (if archive_current=False)
in the newly created table including a reference
to the current record.
fields allows to specify extra fields that need to be archived.
If you want to access such table you need to define it yourself
in a model:
db.define_table('mytable_archive',
Field('current_record',db.mytable),
db.mytable)
Notice such table includes all fields of db.mytable plus one: current_record.
crud.archive does not timestamp the stored record unless your original table
has a fields like:
db.define_table(...,
Field('saved_on','datetime',
default=request.now,update=request.now,writable=False),
Field('saved_by',auth.user,
default=auth.user_id,update=auth.user_id,writable=False),
there is nothing special about these fields since they are filled before
the record is archived.
If you want to change the archive table name and the name of the reference field
you can do, for example:
db.define_table('myhistory',
Field('parent_record',db.mytable),
db.mytable)
and use it as:
form=crud.update(db.mytable,myrecord,
onaccept=lambda form:crud.archive(form,
archive_table=db.myhistory,
current_record='parent_record'))
"""
if not archive_current and not form.record:
return None
table = form.table
if not archive_table:
archive_table_name = '%s_archive' % table
if archive_table_name in table._db:
archive_table = table._db[archive_table_name]
else:
archive_table = table._db.define_table(archive_table_name,
Field(current_record,table),
table)
new_record = {current_record:form.vars.id}
for fieldname in archive_table.fields:
if not fieldname in ['id',current_record]:
if archive_current and fieldname in form.vars:
new_record[fieldname]=form.vars[fieldname]
elif form.record and fieldname in form.record:
new_record[fieldname]=form.record[fieldname]
if fields:
for key,value in fields.items():
new_record[key] = value
id = archive_table.insert(**new_record)
return id
class Crud(object):
def url(self, f=None, args=None, vars=None):
"""
this should point to the controller that exposes
download and crud
"""
if args is None: args=[]
if vars is None: vars={}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def __init__(self, environment, db=None, controller='default'):
self.db = db
if not db and environment and isinstance(environment,DAL):
self.db = environment
elif not db:
raise SyntaxError, "must pass db as first or second argument"
self.environment = current
settings = self.settings = Settings()
settings.auth = None
settings.logger = None
settings.create_next = None
settings.update_next = None
settings.controller = controller
settings.delete_next = self.url()
settings.download_url = self.url('download')
settings.create_onvalidation = StorageList()
settings.update_onvalidation = StorageList()
settings.delete_onvalidation = StorageList()
settings.create_onaccept = StorageList()
settings.update_onaccept = StorageList()
settings.update_ondelete = StorageList()
settings.delete_onaccept = StorageList()
settings.update_deletable = True
settings.showid = False
settings.keepvalues = False
settings.create_captcha = None
settings.update_captcha = None
settings.captcha = None
settings.formstyle = 'table3cols'
settings.label_separator = ': '
settings.hideerror = False
settings.detect_record_change = True
settings.hmac_key = None
settings.lock_keys = True
messages = self.messages = Messages(current.T)
messages.submit_button = 'Submit'
messages.delete_label = 'Check to delete:'
messages.record_created = 'Record Created'
messages.record_updated = 'Record Updated'
messages.record_deleted = 'Record Deleted'
messages.update_log = 'Record %(id)s updated'
messages.create_log = 'Record %(id)s created'
messages.read_log = 'Record %(id)s read'
messages.delete_log = 'Record %(id)s deleted'
messages.lock_keys = True
def __call__(self):
args = current.request.args
if len(args) < 1:
raise HTTP(404)
elif args[0] == 'tables':
return self.tables()
elif len(args) > 1 and not args(1) in self.db.tables:
raise HTTP(404)
table = self.db[args(1)]
if args[0] == 'create':
return self.create(table)
elif args[0] == 'select':
return self.select(table,linkto=self.url(args='read'))
elif args[0] == 'search':
form, rows = self.search(table,linkto=self.url(args='read'))
return DIV(form,SQLTABLE(rows))
elif args[0] == 'read':
return self.read(table, args(2))
elif args[0] == 'update':
return self.update(table, args(2))
elif args[0] == 'delete':
return self.delete(table, args(2))
else:
raise HTTP(404)
def log_event(self, message, vars):
if self.settings.logger:
self.settings.logger.log_event(message, vars, origin = 'crud')
def has_permission(self, name, table, record=0):
if not self.settings.auth:
return True
try:
record_id = record.id
except:
record_id = record
return self.settings.auth.has_permission(name, str(table), record_id)
def tables(self):
return TABLE(*[TR(A(name,
_href=self.url(args=('select',name)))) \
for name in self.db.tables])
@staticmethod
def archive(form,archive_table=None,current_record='current_record'):
return Auth.archive(form,archive_table=archive_table,
current_record=current_record)
def update(
self,
table,
record,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
ondelete=DEFAULT,
log=DEFAULT,
message=DEFAULT,
deletable=DEFAULT,
formname=DEFAULT,
):
"""
method: Crud.update(table, record, [next=DEFAULT
[, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT
[, message=DEFAULT[, deletable=DEFAULT]]]]]])
"""
if not (isinstance(table, self.db.Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, self.db.Table):
table = self.db[table]
try:
record_id = record.id
except:
record_id = record or 0
if record_id and not self.has_permission('update', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
if not record_id and not self.has_permission('create', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
response = current.response
session = current.session
if request.extension == 'json' and request.vars.json:
request.vars.update(json_parser.loads(request.vars.json))
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.update_next
if onvalidation is DEFAULT:
onvalidation = self.settings.update_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.update_onaccept
if ondelete is DEFAULT:
ondelete = self.settings.update_ondelete
if log is DEFAULT:
log = self.messages.update_log
if deletable is DEFAULT:
deletable = self.settings.update_deletable
if message is DEFAULT:
message = self.messages.record_updated
form = SQLFORM(
table,
record,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
deletable=deletable,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
self.accepted = False
self.deleted = False
captcha = self.settings.update_captcha or self.settings.captcha
if record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle,'captcha__row')
captcha = self.settings.create_captcha or self.settings.captcha
if not record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle,'captcha__row')
if not request.extension in ('html','load'):
(_session, _formname) = (None, None)
else:
(_session, _formname) = (session, '%s/%s' % (table._tablename, form.record_id))
if not formname is DEFAULT:
_formname = formname
keepvalues = self.settings.keepvalues
if request.vars.delete_this_record:
keepvalues = False
if isinstance(onvalidation,StorageList):
onvalidation=onvalidation.get(table._tablename, [])
if form.accepts(request, _session, formname=_formname,
onvalidation=onvalidation, keepvalues=keepvalues,
hideerror=self.settings.hideerror,
detect_record_change = self.settings.detect_record_change):
self.accepted = True
response.flash = message
if log:
self.log_event(log, form.vars)
if request.vars.delete_this_record:
self.deleted = True
message = self.messages.record_deleted
callback(ondelete,form,table._tablename)
response.flash = message
callback(onaccept,form,table._tablename)
if not request.extension in ('html','load'):
raise HTTP(200, 'RECORD CREATED/UPDATED')
if isinstance(next, (list, tuple)): ### fix issue with 2.6
next = next[0]
if next: # Only redirect when explicit
next = replace_id(next, form)
session.flash = response.flash
redirect(next)
elif not request.extension in ('html','load'):
raise HTTP(401,serializers.json(dict(errors=form.errors)))
return form
def create(
self,
table,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
message=DEFAULT,
formname=DEFAULT,
):
"""
method: Crud.create(table, [next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT[, message=DEFAULT]]]]])
"""
if next is DEFAULT:
next = self.settings.create_next
if onvalidation is DEFAULT:
onvalidation = self.settings.create_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.create_onaccept
if log is DEFAULT:
log = self.messages.create_log
if message is DEFAULT:
message = self.messages.record_created
return self.update(
table,
None,
next=next,
onvalidation=onvalidation,
onaccept=onaccept,
log=log,
message=message,
deletable=False,
formname=formname,
)
def read(self, table, record):
if not (isinstance(table, self.db.Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, self.db.Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
form = SQLFORM(
table,
record,
readonly=True,
comments=False,
upload=self.settings.download_url,
showid=self.settings.showid,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if not current.request.extension in ('html','load'):
return table._filter_fields(form.record, id=True)
return form
def delete(
self,
table,
record_id,
next=DEFAULT,
message=DEFAULT,
):
"""
method: Crud.delete(table, record_id, [next=DEFAULT
[, message=DEFAULT]])
"""
if not (isinstance(table, self.db.Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, self.db.Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
session = current.session
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.delete_next
if message is DEFAULT:
message = self.messages.record_deleted
record = table[record_id]
if record:
callback(self.settings.delete_onvalidation,record)
del table[record_id]
callback(self.settings.delete_onaccept,record,table._tablename)
session.flash = message
redirect(next)
def rows(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
):
if not (isinstance(table, self.db.Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, self.db.Table):
table = self.db[table]
if not query:
query = table.id > 0
if not fields:
fields = [field for field in table if field.readable]
rows = self.db(query).select(*fields,**dict(orderby=orderby,
limitby=limitby))
return rows
def select(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers=None,
**attr
):
headers = headers or {}
rows = self.rows(table,query,fields,orderby,limitby)
if not rows:
return None # Nicer than an empty table.
if not 'upload' in attr:
attr['upload'] = self.url('download')
if not current.request.extension in ('html','load'):
return rows.as_list()
if not headers:
if isinstance(table,str):
table = self.db[table]
headers = dict((str(k),k.label) for k in table)
return SQLTABLE(rows,headers=headers,**attr)
def get_format(self, field):
rtable = field._db[field.type[10:]]
format = rtable.get('_format', None)
if format and isinstance(format, str):
return format[2:-2]
return field.name
def get_query(self, field, op, value, refsearch=False):
try:
if refsearch: format = self.get_format(field)
if op == 'equals':
if not refsearch:
return field == value
else:
return lambda row: row[field.name][format] == value
elif op == 'not equal':
if not refsearch:
return field != value
else:
return lambda row: row[field.name][format] != value
elif op == 'greater than':
if not refsearch:
return field > value
else:
return lambda row: row[field.name][format] > value
elif op == 'less than':
if not refsearch:
return field < value
else:
return lambda row: row[field.name][format] < value
elif op == 'starts with':
if not refsearch:
return field.like(value+'%')
else:
return lambda row: str(row[field.name][format]).startswith(value)
elif op == 'ends with':
if not refsearch:
return field.like('%'+value)
else:
return lambda row: str(row[field.name][format]).endswith(value)
elif op == 'contains':
if not refsearch:
return field.like('%'+value+'%')
else:
return lambda row: value in row[field.name][format]
except:
return None
def search(self, *tables, **args):
"""
Creates a search form and its results for a table
Example usage:
form, results = crud.search(db.test,
queries = ['equals', 'not equal', 'contains'],
query_labels={'equals':'Equals',
'not equal':'Not equal'},
fields = ['id','children'],
field_labels = {'id':'ID','children':'Children'},
zero='Please choose',
query = (db.test.id > 0)&(db.test.id != 3) )
"""
table = tables[0]
fields = args.get('fields', table.fields)
request = current.request
db = self.db
if not (isinstance(table, db.Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby','groupby','left','distinct','limitby','cache'):
if key in args: attributes[key]=args[key]
tbl = TABLE()
selected = []; refsearch = []; results = []
showall = args.get('showall', False)
if showall:
selected = fields
chkall = args.get('chkall', False)
if chkall:
for f in fields:
request.vars['chk%s'%f] = 'on'
ops = args.get('queries', [])
zero = args.get('zero', '')
if not ops:
ops = ['equals', 'not equal', 'greater than',
'less than', 'starts with',
'ends with', 'contains']
ops.insert(0,zero)
query_labels = args.get('query_labels', {})
query = args.get('query',table.id > 0)
field_labels = args.get('field_labels',{})
for field in fields:
field = table[field]
if not field.readable: continue
fieldname = field.name
chkval = request.vars.get('chk' + fieldname, None)
txtval = request.vars.get('txt' + fieldname, None)
opval = request.vars.get('op' + fieldname, None)
row = TR(TD(INPUT(_type = "checkbox", _name = "chk" + fieldname,
_disabled = (field.type == 'id'),
value = (field.type == 'id' or chkval == 'on'))),
TD(field_labels.get(fieldname,field.label)),
TD(SELECT([OPTION(query_labels.get(op,op),
_value=op) for op in ops],
_name = "op" + fieldname,
value = opval)),
TD(INPUT(_type = "text", _name = "txt" + fieldname,
_value = txtval, _id='txt' + fieldname,
_class = str(field.type))))
tbl.append(row)
if request.post_vars and (chkval or field.type=='id'):
if txtval and opval != '':
if field.type[0:10] == 'reference ':
refsearch.append(self.get_query(field,
opval, txtval, refsearch=True))
else:
value, error = field.validate(txtval)
if not error:
### TODO deal with 'starts with', 'ends with', 'contains' on GAE
query &= self.get_query(field, opval, value)
else:
row[3].append(DIV(error,_class='error'))
selected.append(field)
form = FORM(tbl,INPUT(_type="submit"))
if selected:
try:
results = db(query).select(*selected,**attributes)
for r in refsearch:
results = results.find(r)
except: # hmmm, we should do better here
results = None
return form, results
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
def fetch(url, data=None, headers=None,
cookie=Cookie.SimpleCookie(),
user_agent='Mozilla/5.0'):
headers = headers or {}
if not data is None:
data = urllib.urlencode(data)
if user_agent: headers['User-agent'] = user_agent
headers['Cookie'] = ' '.join(['%s=%s;'%(c.key,c.value) for c in cookie.values()])
try:
from google.appengine.api import urlfetch
except ImportError:
req = urllib2.Request(url, data, headers)
html = urllib2.urlopen(req).read()
else:
method = ((data is None) and urlfetch.GET) or urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url, payload=data,
method=method, headers=headers,
allow_truncated=False,follow_redirects=False,
deadline=10)
# next request will be a get, so no need to send the data again
data = None
method = urlfetch.GET
# load cookies from the response
cookie.load(response.headers.get('set-cookie', ''))
url = response.headers.get('location')
html = response.content
return html
regex_geocode = \
re.compile('\<coordinates\>(?P<la>[^,]*),(?P<lo>[^,]*).*?\</coordinates\>')
def geocode(address):
try:
a = urllib.quote(address)
txt = fetch('http://maps.google.com/maps/geo?q=%s&output=xml'
% a)
item = regex_geocode.search(txt)
(la, lo) = (float(item.group('la')), float(item.group('lo')))
return (la, lo)
except:
return (0.0, 0.0)
def universal_caller(f, *a, **b):
c = f.func_code.co_argcount
n = f.func_code.co_varnames[:c]
defaults = f.func_defaults or []
pos_args = n[0:-len(defaults)]
named_args = n[-len(defaults):]
arg_dict = {}
# Fill the arg_dict with name and value for the submitted, positional values
for pos_index, pos_val in enumerate(a[:c]):
arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument
# There might be pos_args left, that are sent as named_values. Gather them as well.
# If a argument already is populated with values we simply replaces them.
for arg_name in pos_args[len(arg_dict):]:
if b.has_key(arg_name):
arg_dict[arg_name] = b[arg_name]
if len(arg_dict) >= len(pos_args):
# All the positional arguments is found. The function may now be called.
# However, we need to update the arg_dict with the values from the named arguments as well.
for arg_name in named_args:
if b.has_key(arg_name):
arg_dict[arg_name] = b[arg_name]
return f(**arg_dict)
# Raise an error, the function cannot be called.
raise HTTP(404, "Object does not exist")
class Service(object):
def __init__(self, environment=None):
self.run_procedures = {}
self.csv_procedures = {}
self.xml_procedures = {}
self.rss_procedures = {}
self.json_procedures = {}
self.jsonrpc_procedures = {}
self.xmlrpc_procedures = {}
self.amfrpc_procedures = {}
self.amfrpc3_procedures = {}
self.soap_procedures = {}
def run(self, f):
"""
example:
service = Service()
@service.run
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/run/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def csv(self, f):
"""
example:
service = Service()
@service.csv
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/csv/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def xml(self, f):
"""
example:
service = Service()
@service.xml
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/xml/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def rss(self, f):
"""
example:
service = Service()
@service.rss
def myfunction():
return dict(title=..., link=..., description=...,
created_on=..., entries=[dict(title=..., link=...,
description=..., created_on=...])
def call():
return service()
Then call it with:
wget http://..../app/default/call/rss/myfunction
"""
self.rss_procedures[f.__name__] = f
return f
def json(self, f):
"""
example:
service = Service()
@service.json
def myfunction(a, b):
return [{a: b}]
def call():
return service()
Then call it with:
wget http://..../app/default/call/json/myfunction?a=hello&b=world
"""
self.json_procedures[f.__name__] = f
return f
def jsonrpc(self, f):
"""
example:
service = Service()
@service.jsonrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world
"""
self.jsonrpc_procedures[f.__name__] = f
return f
def xmlrpc(self, f):
"""
example:
service = Service()
@service.xmlrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world
"""
self.xmlrpc_procedures[f.__name__] = f
return f
def amfrpc(self, f):
"""
example:
service = Service()
@service.amfrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world
"""
self.amfrpc_procedures[f.__name__] = f
return f
def amfrpc3(self, domain='default'):
"""
example:
service = Service()
@service.amfrpc3('domain')
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world
"""
if not isinstance(domain, str):
raise SyntaxError, "AMF3 requires a domain for function"
def _amfrpc3(f):
if domain:
self.amfrpc3_procedures[domain+'.'+f.__name__] = f
else:
self.amfrpc3_procedures[f.__name__] = f
return f
return _amfrpc3
def soap(self, name=None, returns=None, args=None,doc=None):
"""
example:
service = Service()
@service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,})
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
from gluon.contrib.pysimplesoap.client import SoapClient
client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL")
response = client.MyFunction(a=1,b=2)
return response['result']
Exposes online generated documentation and xml example messages at:
- http://..../app/default/call/soap
"""
def _soap(f):
self.soap_procedures[name or f.__name__] = f, returns, args, doc
return f
return _soap
def serve_run(self, args=None):
request = current.request
if not args:
args = request.args
if args and args[0] in self.run_procedures:
return str(universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars)))
self.error()
def serve_csv(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/x-csv'
if not args:
args = request.args
def none_exception(value):
if isinstance(value, unicode):
return value.encode('utf8')
if hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
if value is None:
return '<NULL>'
return value
if args and args[0] in self.run_procedures:
r = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
s = cStringIO.StringIO()
if hasattr(r, 'export_to_csv_file'):
r.export_to_csv_file(s)
elif r and isinstance(r[0], (dict, Storage)):
import csv
writer = csv.writer(s)
writer.writerow(r[0].keys())
for line in r:
writer.writerow([none_exception(v) \
for v in line.values()])
else:
import csv
writer = csv.writer(s)
for line in r:
writer.writerow(line)
return s.getvalue()
self.error()
def serve_xml(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/xml'
if not args:
args = request.args
if args and args[0] in self.run_procedures:
s = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
if hasattr(s, 'as_list'):
s = s.as_list()
return serializers.xml(s,quote=False)
self.error()
def serve_rss(self, args=None):
request = current.request
response = current.response
if not args:
args = request.args
if args and args[0] in self.rss_procedures:
feed = universal_caller(self.rss_procedures[args[0]],
*args[1:], **dict(request.vars))
else:
self.error()
response.headers['Content-Type'] = 'application/rss+xml'
return serializers.rss(feed)
def serve_json(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
if not args:
args = request.args
d = dict(request.vars)
if args and args[0] in self.json_procedures:
s = universal_caller(self.json_procedures[args[0]],*args[1:],**d)
if hasattr(s, 'as_list'):
s = s.as_list()
return response.json(s)
self.error()
class JsonRpcException(Exception):
def __init__(self,code,info):
self.code,self.info = code,info
def serve_jsonrpc(self):
def return_response(id, result):
return serializers.json({'version': '1.1',
'id': id, 'result': result, 'error': None})
def return_error(id, code, message, data=None):
error = {'name': 'JSONRPCError',
'code': code, 'message': message}
if data is not None:
error['data'] = data
return serializers.json({'id': id,
'version': '1.1',
'error': error,
})
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
methods = self.jsonrpc_procedures
data = json_parser.loads(request.body.read())
id, method, params = data['id'], data['method'], data.get('params','')
if not method in methods:
return return_error(id, 100, 'method "%s" does not exist' % method)
try:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
return return_response(id, s)
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except BaseException:
etype, eval, etb = sys.exc_info()
code = 100
message = '%s: %s' % (etype.__name__, eval)
data = request.is_local and traceback.format_tb(etb)
return return_error(id, code, message, data)
except:
etype, eval, etb = sys.exc_info()
return return_error(id, 100, 'Exception %s: %s' % (etype, eval))
def serve_xmlrpc(self):
request = current.request
response = current.response
services = self.xmlrpc_procedures.values()
return response.xmlrpc(request, services)
def serve_amfrpc(self, version=0):
try:
import pyamf
import pyamf.remoting.gateway
except:
return "pyamf not installed or not in Python sys.path"
request = current.request
response = current.response
if version == 3:
services = self.amfrpc3_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
pyamf_request = pyamf.remoting.decode(request.body)
else:
services = self.amfrpc_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
context = pyamf.get_context(pyamf.AMF0)
pyamf_request = pyamf.remoting.decode(request.body, context)
pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)
for name, message in pyamf_request:
pyamf_response[name] = base_gateway.getProcessor(message)(message)
response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE
if version==3:
return pyamf.remoting.encode(pyamf_response).getvalue()
else:
return pyamf.remoting.encode(pyamf_response, context).getvalue()
def serve_soap(self, version="1.1"):
try:
from contrib.pysimplesoap.server import SoapDispatcher
except:
return "pysimplesoap not installed in contrib"
request = current.request
response = current.response
procedures = self.soap_procedures
location = "%s://%s%s" % (
request.env.wsgi_url_scheme,
request.env.http_host,
URL(r=request,f="call/soap",vars={}))
namespace = 'namespace' in response and response.namespace or location
documentation = response.description or ''
dispatcher = SoapDispatcher(
name = response.title,
location = location,
action = location, # SOAPAction
namespace = namespace,
prefix='pys',
documentation = documentation,
ns = True)
for method, (function, returns, args, doc) in procedures.items():
dispatcher.register_function(method, function, returns, args, doc)
if request.env.request_method == 'POST':
# Process normal Soap Operation
response.headers['Content-Type'] = 'text/xml'
return dispatcher.dispatch(request.body.read())
elif 'WSDL' in request.vars:
# Return Web Service Description
response.headers['Content-Type'] = 'text/xml'
return dispatcher.wsdl()
elif 'op' in request.vars:
# Return method help webpage
response.headers['Content-Type'] = 'text/html'
method = request.vars['op']
sample_req_xml, sample_res_xml, doc = dispatcher.help(method)
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
A("See all webservice operations",
_href=URL(r=request,f="call/soap",vars={})),
H2(method),
P(doc),
UL(LI("Location: %s" % dispatcher.location),
LI("Namespace: %s" % dispatcher.namespace),
LI("SoapAction: %s" % dispatcher.action),
),
H3("Sample SOAP XML Request Message:"),
CODE(sample_req_xml,language="xml"),
H3("Sample SOAP XML Response Message:"),
CODE(sample_res_xml,language="xml"),
]
return {'body': body}
else:
# Return general help and method list webpage
response.headers['Content-Type'] = 'text/html'
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
P(response.description),
P("The following operations are available"),
A("See WSDL for webservice description",
_href=URL(r=request,f="call/soap",vars={"WSDL":None})),
UL([LI(A("%s: %s" % (method, doc or ''),
_href=URL(r=request,f="call/soap",vars={'op': method})))
for method, doc in dispatcher.list_methods()]),
]
return {'body': body}
def __call__(self):
"""
register services with:
service = Service()
@service.run
@service.rss
@service.json
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
@service.amfrpc3('domain')
@service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,})
expose services with
def call(): return service()
call services with
http://..../app/default/call/run?[parameters]
http://..../app/default/call/rss?[parameters]
http://..../app/default/call/json?[parameters]
http://..../app/default/call/jsonrpc
http://..../app/default/call/xmlrpc
http://..../app/default/call/amfrpc
http://..../app/default/call/amfrpc3
http://..../app/default/call/soap
"""
request = current.request
if len(request.args) < 1:
raise HTTP(404, "Not Found")
arg0 = request.args(0)
if arg0 == 'run':
return self.serve_run(request.args[1:])
elif arg0 == 'rss':
return self.serve_rss(request.args[1:])
elif arg0 == 'csv':
return self.serve_csv(request.args[1:])
elif arg0 == 'xml':
return self.serve_xml(request.args[1:])
elif arg0 == 'json':
return self.serve_json(request.args[1:])
elif arg0 == 'jsonrpc':
return self.serve_jsonrpc()
elif arg0 == 'xmlrpc':
return self.serve_xmlrpc()
elif arg0 == 'amfrpc':
return self.serve_amfrpc()
elif arg0 == 'amfrpc3':
return self.serve_amfrpc(3)
elif arg0 == 'soap':
return self.serve_soap()
else:
self.error()
def error(self):
raise HTTP(404, "Object does not exist")
def completion(callback):
"""
Executes a task on completion of the called action. For example:
from gluon.tools import completion
@completion(lambda d: logging.info(repr(d)))
def index():
return dict(message='hello')
It logs the output of the function every time input is called.
The argument of completion is executed in a new thread.
"""
def _completion(f):
def __completion(*a,**b):
d = None
try:
d = f(*a,**b)
return d
finally:
thread.start_new_thread(callback,(d,))
return __completion
return _completion
def prettydate(d,T=lambda x:x):
try:
dt = datetime.datetime.now() - d
except:
return ''
if dt.days >= 2*365:
return T('%d years ago') % int(dt.days / 365)
elif dt.days >= 365:
return T('1 year ago')
elif dt.days >= 60:
return T('%d months ago') % int(dt.days / 30)
elif dt.days > 21:
return T('1 month ago')
elif dt.days >= 14:
return T('%d weeks ago') % int(dt.days / 7)
elif dt.days >= 7:
return T('1 week ago')
elif dt.days > 1:
return T('%d days ago') % dt.days
elif dt.days == 1:
return T('1 day ago')
elif dt.seconds >= 2*60*60:
return T('%d hours ago') % int(dt.seconds / 3600)
elif dt.seconds >= 60*60:
return T('1 hour ago')
elif dt.seconds >= 2*60:
return T('%d minutes ago') % int(dt.seconds / 60)
elif dt.seconds >= 60:
return T('1 minute ago')
elif dt.seconds > 1:
return T('%d seconds ago') % dt.seconds
elif dt.seconds == 1:
return T('1 second ago')
else:
return T('now')
def test_thread_separation():
def f():
c=PluginManager()
lock1.acquire()
lock2.acquire()
c.x=7
lock1.release()
lock2.release()
lock1=thread.allocate_lock()
lock2=thread.allocate_lock()
lock1.acquire()
thread.start_new_thread(f,())
a=PluginManager()
a.x=5
lock1.release()
lock2.acquire()
return a.x
class PluginManager(object):
"""
Plugin Manager is similar to a storage object but it is a single level singleton
this means that multiple instances within the same thread share the same attributes
Its constructor is also special. The first argument is the name of the plugin you are defining.
The named arguments are parameters needed by the plugin with default values.
If the parameters were previous defined, the old values are used.
For example:
### in some general configuration file:
>>> plugins = PluginManager()
>>> plugins.me.param1=3
### within the plugin model
>>> _ = PluginManager('me',param1=5,param2=6,param3=7)
### where the plugin is used
>>> print plugins.me.param1
3
>>> print plugins.me.param2
6
>>> plugins.me.param3 = 8
>>> print plugins.me.param3
8
Here are some tests:
>>> a=PluginManager()
>>> a.x=6
>>> b=PluginManager('check')
>>> print b.x
6
>>> b=PluginManager() # reset settings
>>> print b.x
<Storage {}>
>>> b.x=7
>>> print a.x
7
>>> a.y.z=8
>>> print b.y.z
8
>>> test_thread_separation()
5
>>> plugins=PluginManager('me',db='mydb')
>>> print plugins.me.db
mydb
>>> print 'me' in plugins
True
>>> print plugins.me.installed
True
"""
instances = {}
def __new__(cls,*a,**b):
id = thread.get_ident()
lock = thread.allocate_lock()
try:
lock.acquire()
try:
return cls.instances[id]
except KeyError:
instance = object.__new__(cls,*a,**b)
cls.instances[id] = instance
return instance
finally:
lock.release()
def __init__(self,plugin=None,**defaults):
if not plugin:
self.__dict__.clear()
settings = self.__getattr__(plugin)
settings.installed = True
[settings.update({key:value}) for key,value in defaults.items() \
if not key in settings]
def __getattr__(self, key):
if not key in self.__dict__:
self.__dict__[key] = Storage()
return self.__dict__[key]
def keys(self):
return self.__dict__.keys()
def __contains__(self,key):
return key in self.__dict__
class Expose(object):
def __init__(self,base=None,basename='base'):
current.session.forget()
base = base or os.path.join(current.request.folder,'static')
self.basename = basename
args = self.args = current.request.raw_args and \
current.request.raw_args.split('/') or []
filename = os.path.join(base,*args)
if not os.path.normpath(filename).startswith(base):
raise HTTP(401,"NOT AUTHORIZED")
if not os.path.isdir(filename):
current.response.headers['Content-Type'] = contenttype(filename)
raise HTTP(200,open(filename,'rb'),**current.response.headers)
self.path = path = os.path.join(filename,'*')
self.folders = [f[len(path)-1:] for f in sorted(glob.glob(path)) \
if os.path.isdir(f) and not self.isprivate(f)]
self.filenames = [f[len(path)-1:] for f in sorted(glob.glob(path)) \
if not os.path.isdir(f) and not self.isprivate(f)]
def breadcrumbs(self, basename):
path = []
span = SPAN()
span.append(A(basename,_href=URL()))
span.append('/')
args = current.request.raw_args and \
current.request.raw_args.split('/') or []
for arg in args:
path.append(arg)
span.append(A(arg,_href=URL(args='/'.join(path))))
span.append('/')
return span
def table_folders(self):
return TABLE(*[TR(TD(A(folder,_href=URL(args=self.args+[folder])))) \
for folder in self.folders])
@staticmethod
def isprivate(f):
return 'private' in f or f.startswith('.') or f.endswith('~')
@staticmethod
def isimage(f):
return f.rsplit('.')[-1].lower() in ('png','jpg','jpeg','gif','tiff')
def table_files(self,width=160):
return TABLE(*[TR(TD(A(f,_href=URL(args=self.args+[f]))),
TD(IMG(_src=URL(args=self.args+[f]),
_style='max-width:%spx' % width) \
if width and self.isimage(f) else '')) \
for f in self.filenames])
def xml(self):
return DIV(
H2(self.breadcrumbs(self.basename)),
H3('Folders'),
self.table_folders(),
H3('Files'),
self.table_files()).xml()
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Functions required to execute app components
============================================
FOR INTERNAL USE ONLY
"""
import os
import stat
import thread
from fileutils import read_file
cfs = {} # for speed-up
cfs_lock = thread.allocate_lock() # and thread safety
def getcfs(key, filename, filter=None):
"""
Caches the *filtered* file `filename` with `key` until the file is
modified.
:param key: the cache key
:param filename: the file to cache
:param filter: is the function used for filtering. Normally `filename` is a
.py file and `filter` is a function that bytecode compiles the file.
In this way the bytecode compiled file is cached. (Default = None)
This is used on Google App Engine since pyc files cannot be saved.
"""
t = os.stat(filename)[stat.ST_MTIME]
cfs_lock.acquire()
item = cfs.get(key, None)
cfs_lock.release()
if item and item[0] == t:
return item[1]
if not filter:
data = read_file(filename)
else:
data = filter()
cfs_lock.acquire()
cfs[key] = (t, data)
cfs_lock.release()
return data
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
CONTENT_TYPE dictionary created against freedesktop.org' shared mime info
database version 0.70.
"""
__all__ = ['contenttype']
CONTENT_TYPE = {
'.load': 'text/html',
'.123': 'application/vnd.lotus-1-2-3',
'.3ds': 'image/x-3ds',
'.3g2': 'video/3gpp',
'.3ga': 'video/3gpp',
'.3gp': 'video/3gpp',
'.3gpp': 'video/3gpp',
'.602': 'application/x-t602',
'.669': 'audio/x-mod',
'.7z': 'application/x-7z-compressed',
'.a': 'application/x-archive',
'.aac': 'audio/mp4',
'.abw': 'application/x-abiword',
'.abw.crashed': 'application/x-abiword',
'.abw.gz': 'application/x-abiword',
'.ac3': 'audio/ac3',
'.ace': 'application/x-ace',
'.adb': 'text/x-adasrc',
'.ads': 'text/x-adasrc',
'.afm': 'application/x-font-afm',
'.ag': 'image/x-applix-graphics',
'.ai': 'application/illustrator',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aiff',
'.aiff': 'audio/x-aiff',
'.al': 'application/x-perl',
'.alz': 'application/x-alz',
'.amr': 'audio/amr',
'.ani': 'application/x-navi-animation',
'.anim[1-9j]': 'video/x-anim',
'.anx': 'application/annodex',
'.ape': 'audio/x-ape',
'.arj': 'application/x-arj',
'.arw': 'image/x-sony-arw',
'.as': 'application/x-applix-spreadsheet',
'.asc': 'text/plain',
'.asf': 'video/x-ms-asf',
'.asp': 'application/x-asp',
'.ass': 'text/x-ssa',
'.asx': 'audio/x-ms-asx',
'.atom': 'application/atom+xml',
'.au': 'audio/basic',
'.avi': 'video/x-msvideo',
'.aw': 'application/x-applix-word',
'.awb': 'audio/amr-wb',
'.awk': 'application/x-awk',
'.axa': 'audio/annodex',
'.axv': 'video/annodex',
'.bak': 'application/x-trash',
'.bcpio': 'application/x-bcpio',
'.bdf': 'application/x-font-bdf',
'.bib': 'text/x-bibtex',
'.bin': 'application/octet-stream',
'.blend': 'application/x-blender',
'.blender': 'application/x-blender',
'.bmp': 'image/bmp',
'.bz': 'application/x-bzip',
'.bz2': 'application/x-bzip',
'.c': 'text/x-csrc',
'.c++': 'text/x-c++src',
'.cab': 'application/vnd.ms-cab-compressed',
'.cb7': 'application/x-cb7',
'.cbr': 'application/x-cbr',
'.cbt': 'application/x-cbt',
'.cbz': 'application/x-cbz',
'.cc': 'text/x-c++src',
'.cdf': 'application/x-netcdf',
'.cdr': 'application/vnd.corel-draw',
'.cer': 'application/x-x509-ca-cert',
'.cert': 'application/x-x509-ca-cert',
'.cgm': 'image/cgm',
'.chm': 'application/x-chm',
'.chrt': 'application/x-kchart',
'.class': 'application/x-java',
'.cls': 'text/x-tex',
'.cmake': 'text/x-cmake',
'.cpio': 'application/x-cpio',
'.cpio.gz': 'application/x-cpio-compressed',
'.cpp': 'text/x-c++src',
'.cr2': 'image/x-canon-cr2',
'.crt': 'application/x-x509-ca-cert',
'.crw': 'image/x-canon-crw',
'.cs': 'text/x-csharp',
'.csh': 'application/x-csh',
'.css': 'text/css',
'.cssl': 'text/css',
'.csv': 'text/csv',
'.cue': 'application/x-cue',
'.cur': 'image/x-win-bitmap',
'.cxx': 'text/x-c++src',
'.d': 'text/x-dsrc',
'.dar': 'application/x-dar',
'.dbf': 'application/x-dbf',
'.dc': 'application/x-dc-rom',
'.dcl': 'text/x-dcl',
'.dcm': 'application/dicom',
'.dcr': 'image/x-kodak-dcr',
'.dds': 'image/x-dds',
'.deb': 'application/x-deb',
'.der': 'application/x-x509-ca-cert',
'.desktop': 'application/x-desktop',
'.dia': 'application/x-dia-diagram',
'.diff': 'text/x-patch',
'.divx': 'video/x-msvideo',
'.djv': 'image/vnd.djvu',
'.djvu': 'image/vnd.djvu',
'.dng': 'image/x-adobe-dng',
'.doc': 'application/msword',
'.docbook': 'application/docbook+xml',
'.docm': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'.dot': 'text/vnd.graphviz',
'.dsl': 'text/x-dsl',
'.dtd': 'application/xml-dtd',
'.dtx': 'text/x-tex',
'.dv': 'video/dv',
'.dvi': 'application/x-dvi',
'.dvi.bz2': 'application/x-bzdvi',
'.dvi.gz': 'application/x-gzdvi',
'.dwg': 'image/vnd.dwg',
'.dxf': 'image/vnd.dxf',
'.e': 'text/x-eiffel',
'.egon': 'application/x-egon',
'.eif': 'text/x-eiffel',
'.el': 'text/x-emacs-lisp',
'.emf': 'image/x-emf',
'.emp': 'application/vnd.emusic-emusic_package',
'.ent': 'application/xml-external-parsed-entity',
'.eps': 'image/x-eps',
'.eps.bz2': 'image/x-bzeps',
'.eps.gz': 'image/x-gzeps',
'.epsf': 'image/x-eps',
'.epsf.bz2': 'image/x-bzeps',
'.epsf.gz': 'image/x-gzeps',
'.epsi': 'image/x-eps',
'.epsi.bz2': 'image/x-bzeps',
'.epsi.gz': 'image/x-gzeps',
'.epub': 'application/epub+zip',
'.erl': 'text/x-erlang',
'.es': 'application/ecmascript',
'.etheme': 'application/x-e-theme',
'.etx': 'text/x-setext',
'.exe': 'application/x-ms-dos-executable',
'.exr': 'image/x-exr',
'.ez': 'application/andrew-inset',
'.f': 'text/x-fortran',
'.f90': 'text/x-fortran',
'.f95': 'text/x-fortran',
'.fb2': 'application/x-fictionbook+xml',
'.fig': 'image/x-xfig',
'.fits': 'image/fits',
'.fl': 'application/x-fluid',
'.flac': 'audio/x-flac',
'.flc': 'video/x-flic',
'.fli': 'video/x-flic',
'.flv': 'video/x-flv',
'.flw': 'application/x-kivio',
'.fo': 'text/x-xslfo',
'.for': 'text/x-fortran',
'.g3': 'image/fax-g3',
'.gb': 'application/x-gameboy-rom',
'.gba': 'application/x-gba-rom',
'.gcrd': 'text/directory',
'.ged': 'application/x-gedcom',
'.gedcom': 'application/x-gedcom',
'.gen': 'application/x-genesis-rom',
'.gf': 'application/x-tex-gf',
'.gg': 'application/x-sms-rom',
'.gif': 'image/gif',
'.glade': 'application/x-glade',
'.gmo': 'application/x-gettext-translation',
'.gnc': 'application/x-gnucash',
'.gnd': 'application/gnunet-directory',
'.gnucash': 'application/x-gnucash',
'.gnumeric': 'application/x-gnumeric',
'.gnuplot': 'application/x-gnuplot',
'.gp': 'application/x-gnuplot',
'.gpg': 'application/pgp-encrypted',
'.gplt': 'application/x-gnuplot',
'.gra': 'application/x-graphite',
'.gsf': 'application/x-font-type1',
'.gsm': 'audio/x-gsm',
'.gtar': 'application/x-tar',
'.gv': 'text/vnd.graphviz',
'.gvp': 'text/x-google-video-pointer',
'.gz': 'application/x-gzip',
'.h': 'text/x-chdr',
'.h++': 'text/x-c++hdr',
'.hdf': 'application/x-hdf',
'.hh': 'text/x-c++hdr',
'.hp': 'text/x-c++hdr',
'.hpgl': 'application/vnd.hp-hpgl',
'.hpp': 'text/x-c++hdr',
'.hs': 'text/x-haskell',
'.htm': 'text/html',
'.html': 'text/html',
'.hwp': 'application/x-hwp',
'.hwt': 'application/x-hwt',
'.hxx': 'text/x-c++hdr',
'.ica': 'application/x-ica',
'.icb': 'image/x-tga',
'.icns': 'image/x-icns',
'.ico': 'image/vnd.microsoft.icon',
'.ics': 'text/calendar',
'.idl': 'text/x-idl',
'.ief': 'image/ief',
'.iff': 'image/x-iff',
'.ilbm': 'image/x-ilbm',
'.ime': 'text/x-imelody',
'.imy': 'text/x-imelody',
'.ins': 'text/x-tex',
'.iptables': 'text/x-iptables',
'.iso': 'application/x-cd-image',
'.iso9660': 'application/x-cd-image',
'.it': 'audio/x-it',
'.j2k': 'image/jp2',
'.jad': 'text/vnd.sun.j2me.app-descriptor',
'.jar': 'application/x-java-archive',
'.java': 'text/x-java',
'.jng': 'image/x-jng',
'.jnlp': 'application/x-java-jnlp-file',
'.jp2': 'image/jp2',
'.jpc': 'image/jp2',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpf': 'image/jp2',
'.jpg': 'image/jpeg',
'.jpr': 'application/x-jbuilder-project',
'.jpx': 'image/jp2',
'.js': 'application/javascript',
'.json': 'application/json',
'.jsonp': 'application/jsonp',
'.k25': 'image/x-kodak-k25',
'.kar': 'audio/midi',
'.karbon': 'application/x-karbon',
'.kdc': 'image/x-kodak-kdc',
'.kdelnk': 'application/x-desktop',
'.kexi': 'application/x-kexiproject-sqlite3',
'.kexic': 'application/x-kexi-connectiondata',
'.kexis': 'application/x-kexiproject-shortcut',
'.kfo': 'application/x-kformula',
'.kil': 'application/x-killustrator',
'.kino': 'application/smil',
'.kml': 'application/vnd.google-earth.kml+xml',
'.kmz': 'application/vnd.google-earth.kmz',
'.kon': 'application/x-kontour',
'.kpm': 'application/x-kpovmodeler',
'.kpr': 'application/x-kpresenter',
'.kpt': 'application/x-kpresenter',
'.kra': 'application/x-krita',
'.ksp': 'application/x-kspread',
'.kud': 'application/x-kugar',
'.kwd': 'application/x-kword',
'.kwt': 'application/x-kword',
'.la': 'application/x-shared-library-la',
'.latex': 'text/x-tex',
'.ldif': 'text/x-ldif',
'.lha': 'application/x-lha',
'.lhs': 'text/x-literate-haskell',
'.lhz': 'application/x-lhz',
'.log': 'text/x-log',
'.ltx': 'text/x-tex',
'.lua': 'text/x-lua',
'.lwo': 'image/x-lwo',
'.lwob': 'image/x-lwo',
'.lws': 'image/x-lws',
'.ly': 'text/x-lilypond',
'.lyx': 'application/x-lyx',
'.lz': 'application/x-lzip',
'.lzh': 'application/x-lha',
'.lzma': 'application/x-lzma',
'.lzo': 'application/x-lzop',
'.m': 'text/x-matlab',
'.m15': 'audio/x-mod',
'.m2t': 'video/mpeg',
'.m3u': 'audio/x-mpegurl',
'.m3u8': 'audio/x-mpegurl',
'.m4': 'application/x-m4',
'.m4a': 'audio/mp4',
'.m4b': 'audio/x-m4b',
'.m4v': 'video/mp4',
'.mab': 'application/x-markaby',
'.man': 'application/x-troff-man',
'.mbox': 'application/mbox',
'.md': 'application/x-genesis-rom',
'.mdb': 'application/vnd.ms-access',
'.mdi': 'image/vnd.ms-modi',
'.me': 'text/x-troff-me',
'.med': 'audio/x-mod',
'.metalink': 'application/metalink+xml',
'.mgp': 'application/x-magicpoint',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.mif': 'application/x-mif',
'.minipsf': 'audio/x-minipsf',
'.mka': 'audio/x-matroska',
'.mkv': 'video/x-matroska',
'.ml': 'text/x-ocaml',
'.mli': 'text/x-ocaml',
'.mm': 'text/x-troff-mm',
'.mmf': 'application/x-smaf',
'.mml': 'text/mathml',
'.mng': 'video/x-mng',
'.mo': 'application/x-gettext-translation',
'.mo3': 'audio/x-mo3',
'.moc': 'text/x-moc',
'.mod': 'audio/x-mod',
'.mof': 'text/x-mof',
'.moov': 'video/quicktime',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mp+': 'audio/x-musepack',
'.mp2': 'video/mpeg',
'.mp3': 'audio/mpeg',
'.mp4': 'video/mp4',
'.mpc': 'audio/x-musepack',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.mpga': 'audio/mpeg',
'.mpp': 'audio/x-musepack',
'.mrl': 'text/x-mrml',
'.mrml': 'text/x-mrml',
'.mrw': 'image/x-minolta-mrw',
'.ms': 'text/x-troff-ms',
'.msi': 'application/x-msi',
'.msod': 'image/x-msod',
'.msx': 'application/x-msx-rom',
'.mtm': 'audio/x-mod',
'.mup': 'text/x-mup',
'.mxf': 'application/mxf',
'.n64': 'application/x-n64-rom',
'.nb': 'application/mathematica',
'.nc': 'application/x-netcdf',
'.nds': 'application/x-nintendo-ds-rom',
'.nef': 'image/x-nikon-nef',
'.nes': 'application/x-nes-rom',
'.nfo': 'text/x-nfo',
'.not': 'text/x-mup',
'.nsc': 'application/x-netshow-channel',
'.nsv': 'video/x-nsv',
'.o': 'application/x-object',
'.obj': 'application/x-tgif',
'.ocl': 'text/x-ocl',
'.oda': 'application/oda',
'.odb': 'application/vnd.oasis.opendocument.database',
'.odc': 'application/vnd.oasis.opendocument.chart',
'.odf': 'application/vnd.oasis.opendocument.formula',
'.odg': 'application/vnd.oasis.opendocument.graphics',
'.odi': 'application/vnd.oasis.opendocument.image',
'.odm': 'application/vnd.oasis.opendocument.text-master',
'.odp': 'application/vnd.oasis.opendocument.presentation',
'.ods': 'application/vnd.oasis.opendocument.spreadsheet',
'.odt': 'application/vnd.oasis.opendocument.text',
'.oga': 'audio/ogg',
'.ogg': 'video/x-theora+ogg',
'.ogm': 'video/x-ogm+ogg',
'.ogv': 'video/ogg',
'.ogx': 'application/ogg',
'.old': 'application/x-trash',
'.oleo': 'application/x-oleo',
'.opml': 'text/x-opml+xml',
'.ora': 'image/openraster',
'.orf': 'image/x-olympus-orf',
'.otc': 'application/vnd.oasis.opendocument.chart-template',
'.otf': 'application/x-font-otf',
'.otg': 'application/vnd.oasis.opendocument.graphics-template',
'.oth': 'application/vnd.oasis.opendocument.text-web',
'.otp': 'application/vnd.oasis.opendocument.presentation-template',
'.ots': 'application/vnd.oasis.opendocument.spreadsheet-template',
'.ott': 'application/vnd.oasis.opendocument.text-template',
'.owl': 'application/rdf+xml',
'.oxt': 'application/vnd.openofficeorg.extension',
'.p': 'text/x-pascal',
'.p10': 'application/pkcs10',
'.p12': 'application/x-pkcs12',
'.p7b': 'application/x-pkcs7-certificates',
'.p7s': 'application/pkcs7-signature',
'.pack': 'application/x-java-pack200',
'.pak': 'application/x-pak',
'.par2': 'application/x-par2',
'.pas': 'text/x-pascal',
'.patch': 'text/x-patch',
'.pbm': 'image/x-portable-bitmap',
'.pcd': 'image/x-photo-cd',
'.pcf': 'application/x-cisco-vpn-settings',
'.pcf.gz': 'application/x-font-pcf',
'.pcf.z': 'application/x-font-pcf',
'.pcl': 'application/vnd.hp-pcl',
'.pcx': 'image/x-pcx',
'.pdb': 'chemical/x-pdb',
'.pdc': 'application/x-aportisdoc',
'.pdf': 'application/pdf',
'.pdf.bz2': 'application/x-bzpdf',
'.pdf.gz': 'application/x-gzpdf',
'.pef': 'image/x-pentax-pef',
'.pem': 'application/x-x509-ca-cert',
'.perl': 'application/x-perl',
'.pfa': 'application/x-font-type1',
'.pfb': 'application/x-font-type1',
'.pfx': 'application/x-pkcs12',
'.pgm': 'image/x-portable-graymap',
'.pgn': 'application/x-chess-pgn',
'.pgp': 'application/pgp-encrypted',
'.php': 'application/x-php',
'.php3': 'application/x-php',
'.php4': 'application/x-php',
'.pict': 'image/x-pict',
'.pict1': 'image/x-pict',
'.pict2': 'image/x-pict',
'.pickle': 'application/python-pickle',
'.pk': 'application/x-tex-pk',
'.pkipath': 'application/pkix-pkipath',
'.pkr': 'application/pgp-keys',
'.pl': 'application/x-perl',
'.pla': 'audio/x-iriver-pla',
'.pln': 'application/x-planperfect',
'.pls': 'audio/x-scpls',
'.pm': 'application/x-perl',
'.png': 'image/png',
'.pnm': 'image/x-portable-anymap',
'.pntg': 'image/x-macpaint',
'.po': 'text/x-gettext-translation',
'.por': 'application/x-spss-por',
'.pot': 'text/x-gettext-translation-template',
'.ppm': 'image/x-portable-pixmap',
'.pps': 'application/vnd.ms-powerpoint',
'.ppt': 'application/vnd.ms-powerpoint',
'.pptm': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'.ppz': 'application/vnd.ms-powerpoint',
'.prc': 'application/x-palm-database',
'.ps': 'application/postscript',
'.ps.bz2': 'application/x-bzpostscript',
'.ps.gz': 'application/x-gzpostscript',
'.psd': 'image/vnd.adobe.photoshop',
'.psf': 'audio/x-psf',
'.psf.gz': 'application/x-gz-font-linux-psf',
'.psflib': 'audio/x-psflib',
'.psid': 'audio/prs.sid',
'.psw': 'application/x-pocket-word',
'.pw': 'application/x-pw',
'.py': 'text/x-python',
'.pyc': 'application/x-python-bytecode',
'.pyo': 'application/x-python-bytecode',
'.qif': 'image/x-quicktime',
'.qt': 'video/quicktime',
'.qtif': 'image/x-quicktime',
'.qtl': 'application/x-quicktime-media-link',
'.qtvr': 'video/quicktime',
'.ra': 'audio/vnd.rn-realaudio',
'.raf': 'image/x-fuji-raf',
'.ram': 'application/ram',
'.rar': 'application/x-rar',
'.ras': 'image/x-cmu-raster',
'.raw': 'image/x-panasonic-raw',
'.rax': 'audio/vnd.rn-realaudio',
'.rb': 'application/x-ruby',
'.rdf': 'application/rdf+xml',
'.rdfs': 'application/rdf+xml',
'.reg': 'text/x-ms-regedit',
'.rej': 'application/x-reject',
'.rgb': 'image/x-rgb',
'.rle': 'image/rle',
'.rm': 'application/vnd.rn-realmedia',
'.rmj': 'application/vnd.rn-realmedia',
'.rmm': 'application/vnd.rn-realmedia',
'.rms': 'application/vnd.rn-realmedia',
'.rmvb': 'application/vnd.rn-realmedia',
'.rmx': 'application/vnd.rn-realmedia',
'.roff': 'text/troff',
'.rp': 'image/vnd.rn-realpix',
'.rpm': 'application/x-rpm',
'.rss': 'application/rss+xml',
'.rt': 'text/vnd.rn-realtext',
'.rtf': 'application/rtf',
'.rtx': 'text/richtext',
'.rv': 'video/vnd.rn-realvideo',
'.rvx': 'video/vnd.rn-realvideo',
'.s3m': 'audio/x-s3m',
'.sam': 'application/x-amipro',
'.sami': 'application/x-sami',
'.sav': 'application/x-spss-sav',
'.scm': 'text/x-scheme',
'.sda': 'application/vnd.stardivision.draw',
'.sdc': 'application/vnd.stardivision.calc',
'.sdd': 'application/vnd.stardivision.impress',
'.sdp': 'application/sdp',
'.sds': 'application/vnd.stardivision.chart',
'.sdw': 'application/vnd.stardivision.writer',
'.sgf': 'application/x-go-sgf',
'.sgi': 'image/x-sgi',
'.sgl': 'application/vnd.stardivision.writer',
'.sgm': 'text/sgml',
'.sgml': 'text/sgml',
'.sh': 'application/x-shellscript',
'.shar': 'application/x-shar',
'.shn': 'application/x-shorten',
'.siag': 'application/x-siag',
'.sid': 'audio/prs.sid',
'.sik': 'application/x-trash',
'.sis': 'application/vnd.symbian.install',
'.sisx': 'x-epoc/x-sisx-app',
'.sit': 'application/x-stuffit',
'.siv': 'application/sieve',
'.sk': 'image/x-skencil',
'.sk1': 'image/x-skencil',
'.skr': 'application/pgp-keys',
'.slk': 'text/spreadsheet',
'.smaf': 'application/x-smaf',
'.smc': 'application/x-snes-rom',
'.smd': 'application/vnd.stardivision.mail',
'.smf': 'application/vnd.stardivision.math',
'.smi': 'application/x-sami',
'.smil': 'application/smil',
'.sml': 'application/smil',
'.sms': 'application/x-sms-rom',
'.snd': 'audio/basic',
'.so': 'application/x-sharedlib',
'.spc': 'application/x-pkcs7-certificates',
'.spd': 'application/x-font-speedo',
'.spec': 'text/x-rpm-spec',
'.spl': 'application/x-shockwave-flash',
'.spx': 'audio/x-speex',
'.sql': 'text/x-sql',
'.sr2': 'image/x-sony-sr2',
'.src': 'application/x-wais-source',
'.srf': 'image/x-sony-srf',
'.srt': 'application/x-subrip',
'.ssa': 'text/x-ssa',
'.stc': 'application/vnd.sun.xml.calc.template',
'.std': 'application/vnd.sun.xml.draw.template',
'.sti': 'application/vnd.sun.xml.impress.template',
'.stm': 'audio/x-stm',
'.stw': 'application/vnd.sun.xml.writer.template',
'.sty': 'text/x-tex',
'.sub': 'text/x-subviewer',
'.sun': 'image/x-sun-raster',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml-compressed',
'.swf': 'application/x-shockwave-flash',
'.sxc': 'application/vnd.sun.xml.calc',
'.sxd': 'application/vnd.sun.xml.draw',
'.sxg': 'application/vnd.sun.xml.writer.global',
'.sxi': 'application/vnd.sun.xml.impress',
'.sxm': 'application/vnd.sun.xml.math',
'.sxw': 'application/vnd.sun.xml.writer',
'.sylk': 'text/spreadsheet',
'.t': 'text/troff',
'.t2t': 'text/x-txt2tags',
'.tar': 'application/x-tar',
'.tar.bz': 'application/x-bzip-compressed-tar',
'.tar.bz2': 'application/x-bzip-compressed-tar',
'.tar.gz': 'application/x-compressed-tar',
'.tar.lzma': 'application/x-lzma-compressed-tar',
'.tar.lzo': 'application/x-tzo',
'.tar.xz': 'application/x-xz-compressed-tar',
'.tar.z': 'application/x-tarz',
'.tbz': 'application/x-bzip-compressed-tar',
'.tbz2': 'application/x-bzip-compressed-tar',
'.tcl': 'text/x-tcl',
'.tex': 'text/x-tex',
'.texi': 'text/x-texinfo',
'.texinfo': 'text/x-texinfo',
'.tga': 'image/x-tga',
'.tgz': 'application/x-compressed-tar',
'.theme': 'application/x-theme',
'.themepack': 'application/x-windows-themepack',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tk': 'text/x-tcl',
'.tlz': 'application/x-lzma-compressed-tar',
'.tnef': 'application/vnd.ms-tnef',
'.tnf': 'application/vnd.ms-tnef',
'.toc': 'application/x-cdrdao-toc',
'.torrent': 'application/x-bittorrent',
'.tpic': 'image/x-tga',
'.tr': 'text/troff',
'.ts': 'application/x-linguist',
'.tsv': 'text/tab-separated-values',
'.tta': 'audio/x-tta',
'.ttc': 'application/x-font-ttf',
'.ttf': 'application/x-font-ttf',
'.ttx': 'application/x-font-ttx',
'.txt': 'text/plain',
'.txz': 'application/x-xz-compressed-tar',
'.tzo': 'application/x-tzo',
'.ufraw': 'application/x-ufraw',
'.ui': 'application/x-designer',
'.uil': 'text/x-uil',
'.ult': 'audio/x-mod',
'.uni': 'audio/x-mod',
'.uri': 'text/x-uri',
'.url': 'text/x-uri',
'.ustar': 'application/x-ustar',
'.vala': 'text/x-vala',
'.vapi': 'text/x-vala',
'.vcf': 'text/directory',
'.vcs': 'text/calendar',
'.vct': 'text/directory',
'.vda': 'image/x-tga',
'.vhd': 'text/x-vhdl',
'.vhdl': 'text/x-vhdl',
'.viv': 'video/vivo',
'.vivo': 'video/vivo',
'.vlc': 'audio/x-mpegurl',
'.vob': 'video/mpeg',
'.voc': 'audio/x-voc',
'.vor': 'application/vnd.stardivision.writer',
'.vst': 'image/x-tga',
'.wav': 'audio/x-wav',
'.wax': 'audio/x-ms-asx',
'.wb1': 'application/x-quattropro',
'.wb2': 'application/x-quattropro',
'.wb3': 'application/x-quattropro',
'.wbmp': 'image/vnd.wap.wbmp',
'.wcm': 'application/vnd.ms-works',
'.wdb': 'application/vnd.ms-works',
'.webm': 'video/webm',
'.wk1': 'application/vnd.lotus-1-2-3',
'.wk3': 'application/vnd.lotus-1-2-3',
'.wk4': 'application/vnd.lotus-1-2-3',
'.wks': 'application/vnd.ms-works',
'.wma': 'audio/x-ms-wma',
'.wmf': 'image/x-wmf',
'.wml': 'text/vnd.wap.wml',
'.wmls': 'text/vnd.wap.wmlscript',
'.wmv': 'video/x-ms-wmv',
'.wmx': 'audio/x-ms-asx',
'.wp': 'application/vnd.wordperfect',
'.wp4': 'application/vnd.wordperfect',
'.wp5': 'application/vnd.wordperfect',
'.wp6': 'application/vnd.wordperfect',
'.wpd': 'application/vnd.wordperfect',
'.wpg': 'application/x-wpg',
'.wpl': 'application/vnd.ms-wpl',
'.wpp': 'application/vnd.wordperfect',
'.wps': 'application/vnd.ms-works',
'.wri': 'application/x-mswrite',
'.wrl': 'model/vrml',
'.wv': 'audio/x-wavpack',
'.wvc': 'audio/x-wavpack-correction',
'.wvp': 'audio/x-wavpack',
'.wvx': 'audio/x-ms-asx',
'.x3f': 'image/x-sigma-x3f',
'.xac': 'application/x-gnucash',
'.xbel': 'application/x-xbel',
'.xbl': 'application/xml',
'.xbm': 'image/x-xbitmap',
'.xcf': 'image/x-xcf',
'.xcf.bz2': 'image/x-compressed-xcf',
'.xcf.gz': 'image/x-compressed-xcf',
'.xhtml': 'application/xhtml+xml',
'.xi': 'audio/x-xi',
'.xla': 'application/vnd.ms-excel',
'.xlc': 'application/vnd.ms-excel',
'.xld': 'application/vnd.ms-excel',
'.xlf': 'application/x-xliff',
'.xliff': 'application/x-xliff',
'.xll': 'application/vnd.ms-excel',
'.xlm': 'application/vnd.ms-excel',
'.xls': 'application/vnd.ms-excel',
'.xlsm': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'.xlt': 'application/vnd.ms-excel',
'.xlw': 'application/vnd.ms-excel',
'.xm': 'audio/x-xm',
'.xmf': 'audio/x-xmf',
'.xmi': 'text/x-xmi',
'.xml': 'application/xml',
'.xpm': 'image/x-xpixmap',
'.xps': 'application/vnd.ms-xpsdocument',
'.xsl': 'application/xml',
'.xslfo': 'text/x-xslfo',
'.xslt': 'application/xml',
'.xspf': 'application/xspf+xml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.xwd': 'image/x-xwindowdump',
'.xyz': 'chemical/x-pdb',
'.xz': 'application/x-xz',
'.w2p': 'application/w2p',
'.z': 'application/x-compress',
'.zabw': 'application/x-abiword',
'.zip': 'application/zip',
'.zoo': 'application/x-zoo',
}
def contenttype(filename, default='text/plain'):
"""
Returns the Content-Type string matching extension of the given filename.
"""
i = filename.rfind('.')
if i>=0:
default = CONTENT_TYPE.get(filename[i:].lower(),default)
j = filename.rfind('.', 0, i)
if j>=0:
default = CONTENT_TYPE.get(filename[j:].lower(),default)
if default.startswith('text/'):
default += '; charset=utf-8'
return default
| Python |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
gluon.rewrite parses incoming URLs and formats outgoing URLs for gluon.html.URL.
In addition, it rewrites both incoming and outgoing URLs based on the (optional) user-supplied routes.py,
which also allows for rewriting of certain error messages.
routes.py supports two styles of URL rewriting, depending on whether 'routers' is defined.
Refer to router.example.py and routes.example.py for additional documentation.
"""
import os
import re
import logging
import traceback
import threading
import urllib
from storage import Storage, List
from http import HTTP
from fileutils import abspath, read_file
from settings import global_settings
logger = logging.getLogger('web2py.rewrite')
thread = threading.local() # thread-local storage for routing parameters
def _router_default():
"return new copy of default base router"
router = Storage(
default_application = 'init',
applications = 'ALL',
default_controller = 'default',
controllers = 'DEFAULT',
default_function = 'index',
functions = dict(),
default_language = None,
languages = None,
root_static = ['favicon.ico', 'robots.txt'],
domains = None,
exclusive_domain = False,
map_hyphen = False,
acfe_match = r'\w+$', # legal app/ctlr/fcn/ext
file_match = r'(\w+[-=./]?)+$', # legal file (path) name
args_match = r'([\w@ -]+[=.]?)*$', # legal arg in args
)
return router
def _params_default(app=None):
"return new copy of default parameters"
p = Storage()
p.name = app or "BASE"
p.default_application = app or "init"
p.default_controller = "default"
p.default_function = "index"
p.routes_app = []
p.routes_in = []
p.routes_out = []
p.routes_onerror = []
p.routes_apps_raw = []
p.error_handler = None
p.error_message = '<html><body><h1>%s</h1></body></html>'
p.error_message_ticket = \
'<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body><!-- this is junk text else IE does not display the page: '+('x'*512)+' //--></html>'
p.routers = None
p.logging = 'off'
return p
params_apps = dict()
params = _params_default(app=None) # regex rewrite parameters
thread.routes = params # default to base regex rewrite parameters
routers = None
def log_rewrite(string):
"Log rewrite activity under control of routes.py"
if params.logging == 'debug': # catch common cases first
logger.debug(string)
elif params.logging == 'off' or not params.logging:
pass
elif params.logging == 'print':
print string
elif params.logging == 'info':
logger.info(string)
elif params.logging == 'warning':
logger.warning(string)
elif params.logging == 'error':
logger.error(string)
elif params.logging == 'critical':
logger.critical(string)
else:
logger.debug(string)
ROUTER_KEYS = set(('default_application', 'applications', 'default_controller', 'controllers',
'default_function', 'functions', 'default_language', 'languages',
'domain', 'domains', 'root_static', 'path_prefix',
'exclusive_domain', 'map_hyphen', 'map_static',
'acfe_match', 'file_match', 'args_match'))
ROUTER_BASE_KEYS = set(('applications', 'default_application', 'domains', 'path_prefix'))
# The external interface to rewrite consists of:
#
# load: load routing configuration file(s)
# url_in: parse and rewrite incoming URL
# url_out: assemble and rewrite outgoing URL
#
# thread.routes.default_application
# thread.routes.error_message
# thread.routes.error_message_ticket
# thread.routes.try_redirect_on_error
# thread.routes.error_handler
#
# filter_url: helper for doctest & unittest
# filter_err: helper for doctest & unittest
# regex_filter_out: doctest
def url_in(request, environ):
"parse and rewrite incoming URL"
if routers:
return map_url_in(request, environ)
return regex_url_in(request, environ)
def url_out(request, env, application, controller, function, args, other, scheme, host, port):
"assemble and rewrite outgoing URL"
if routers:
acf = map_url_out(request, env, application, controller, function, args, other, scheme, host, port)
url = '%s%s' % (acf, other)
else:
url = '/%s/%s/%s%s' % (application, controller, function, other)
url = regex_filter_out(url, env)
#
# fill in scheme and host if absolute URL is requested
# scheme can be a string, eg 'http', 'https', 'ws', 'wss'
#
if scheme or port is not None:
if host is None: # scheme or port implies host
host = True
if not scheme or scheme is True:
if request and request.env:
scheme = request.env.get('wsgi_url_scheme', 'http').lower()
else:
scheme = 'http' # some reasonable default in case we need it
if host is not None:
if host is True:
host = request.env.http_host
if host:
if port is None:
port = ''
else:
port = ':%s' % port
url = '%s://%s%s%s' % (scheme, host, port, url)
return url
def try_rewrite_on_error(http_response, request, environ, ticket=None):
"""
called from main.wsgibase to rewrite the http response.
"""
status = int(str(http_response.status).split()[0])
if status>=399 and thread.routes.routes_onerror:
keys=set(('%s/%s' % (request.application, status),
'%s/*' % (request.application),
'*/%s' % (status),
'*/*'))
for (key,uri) in thread.routes.routes_onerror:
if key in keys:
if uri == '!':
# do nothing!
return http_response, environ
elif '?' in uri:
path_info, query_string = uri.split('?',1)
query_string += '&'
else:
path_info, query_string = uri, ''
query_string += \
'code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \
(status,ticket,request.env.request_uri,request.url)
if uri.startswith('http://') or uri.startswith('https://'):
# make up a response
url = path_info+'?'+query_string
message = 'You are being redirected <a href="%s">here</a>'
return HTTP(303, message % url, Location=url), environ
else:
error_raising_path = environ['PATH_INFO']
# Rewrite routes_onerror path.
path_info = '/' + path_info.lstrip('/') # add leading '/' if missing
environ['PATH_INFO'] = path_info
error_handling_path = url_in(request, environ)[1]['PATH_INFO']
# Avoid infinite loop.
if error_handling_path != error_raising_path:
# wsgibase will be called recursively with the routes_onerror path.
environ['PATH_INFO'] = path_info
environ['QUERY_STRING'] = query_string
return None, environ
# do nothing!
return http_response, environ
def try_redirect_on_error(http_object, request, ticket=None):
"called from main.wsgibase to rewrite the http response"
status = int(str(http_object.status).split()[0])
if status>399 and thread.routes.routes_onerror:
keys=set(('%s/%s' % (request.application, status),
'%s/*' % (request.application),
'*/%s' % (status),
'*/*'))
for (key,redir) in thread.routes.routes_onerror:
if key in keys:
if redir == '!':
break
elif '?' in redir:
url = '%s&code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \
(redir,status,ticket,request.env.request_uri,request.url)
else:
url = '%s?code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \
(redir,status,ticket,request.env.request_uri,request.url)
return HTTP(303,
'You are being redirected <a href="%s">here</a>' % url,
Location=url)
return http_object
def load(routes='routes.py', app=None, data=None, rdict=None):
"""
load: read (if file) and parse routes
store results in params
(called from main.py at web2py initialization time)
If data is present, it's used instead of the routes.py contents.
If rdict is present, it must be a dict to be used for routers (unit test)
"""
global params
global routers
if app is None:
# reinitialize
global params_apps
params_apps = dict()
params = _params_default(app=None) # regex rewrite parameters
thread.routes = params # default to base regex rewrite parameters
routers = None
if isinstance(rdict, dict):
symbols = dict(routers=rdict)
path = 'rdict'
else:
if data is not None:
path = 'routes'
else:
if app is None:
path = abspath(routes)
else:
path = abspath('applications', app, routes)
if not os.path.exists(path):
return
data = read_file(path).replace('\r\n','\n')
symbols = {}
try:
exec (data + '\n') in symbols
except SyntaxError, e:
logger.error(
'%s has a syntax error and will not be loaded\n' % path
+ traceback.format_exc())
raise e
p = _params_default(app)
for sym in ('routes_app', 'routes_in', 'routes_out'):
if sym in symbols:
for (k, v) in symbols[sym]:
p[sym].append(compile_regex(k, v))
for sym in ('routes_onerror', 'routes_apps_raw',
'error_handler','error_message', 'error_message_ticket',
'default_application','default_controller', 'default_function',
'logging'):
if sym in symbols:
p[sym] = symbols[sym]
if 'routers' in symbols:
p.routers = Storage(symbols['routers'])
for key in p.routers:
if isinstance(p.routers[key], dict):
p.routers[key] = Storage(p.routers[key])
if app is None:
params = p # install base rewrite parameters
thread.routes = params # install default as current routes
#
# create the BASE router if routers in use
#
routers = params.routers # establish routers if present
if isinstance(routers, dict):
routers = Storage(routers)
if routers is not None:
router = _router_default()
if routers.BASE:
router.update(routers.BASE)
routers.BASE = router
# scan each app in applications/
# create a router, if routers are in use
# parse the app-specific routes.py if present
#
all_apps = []
for appname in [app for app in os.listdir(abspath('applications')) if not app.startswith('.')]:
if os.path.isdir(abspath('applications', appname)) and \
os.path.isdir(abspath('applications', appname, 'controllers')):
all_apps.append(appname)
if routers:
router = Storage(routers.BASE) # new copy
if appname in routers:
for key in routers[appname].keys():
if key in ROUTER_BASE_KEYS:
raise SyntaxError, "BASE-only key '%s' in router '%s'" % (key, appname)
router.update(routers[appname])
routers[appname] = router
if os.path.exists(abspath('applications', appname, routes)):
load(routes, appname)
if routers:
load_routers(all_apps)
else: # app
params_apps[app] = p
if routers and p.routers:
if app in p.routers:
routers[app].update(p.routers[app])
log_rewrite('URL rewrite is on. configuration in %s' % path)
regex_at = re.compile(r'(?<!\\)\$[a-zA-Z]\w*')
regex_anything = re.compile(r'(?<!\\)\$anything')
def compile_regex(k, v):
"""
Preprocess and compile the regular expressions in routes_app/in/out
The resulting regex will match a pattern of the form:
[remote address]:[protocol]://[host]:[method] [path]
We allow abbreviated regexes on input; here we try to complete them.
"""
k0 = k # original k for error reporting
# bracket regex in ^...$ if not already done
if not k[0] == '^':
k = '^%s' % k
if not k[-1] == '$':
k = '%s$' % k
# if there are no :-separated parts, prepend a catch-all for the IP address
if k.find(':') < 0:
# k = '^.*?:%s' % k[1:]
k = '^.*?:https?://[^:/]+:[a-z]+ %s' % k[1:]
# if there's no ://, provide a catch-all for the protocol, host & method
if k.find('://') < 0:
i = k.find(':/')
if i < 0:
raise SyntaxError, "routes pattern syntax error: path needs leading '/' [%s]" % k0
k = r'%s:https?://[^:/]+:[a-z]+ %s' % (k[:i], k[i+1:])
# $anything -> ?P<anything>.*
for item in regex_anything.findall(k):
k = k.replace(item, '(?P<anything>.*)')
# $a (etc) -> ?P<a>\w+
for item in regex_at.findall(k):
k = k.replace(item, r'(?P<%s>\w+)' % item[1:])
# same for replacement pattern, but with \g
for item in regex_at.findall(v):
v = v.replace(item, r'\g<%s>' % item[1:])
return (re.compile(k, re.DOTALL), v)
def load_routers(all_apps):
"load-time post-processing of routers"
for app in routers.keys():
# initialize apps with routers that aren't present, on behalf of unit tests
if app not in all_apps:
all_apps.append(app)
router = Storage(routers.BASE) # new copy
if app != 'BASE':
for key in routers[app].keys():
if key in ROUTER_BASE_KEYS:
raise SyntaxError, "BASE-only key '%s' in router '%s'" % (key, app)
router.update(routers[app])
routers[app] = router
router = routers[app]
for key in router.keys():
if key not in ROUTER_KEYS:
raise SyntaxError, "unknown key '%s' in router '%s'" % (key, app)
if not router.controllers:
router.controllers = set()
elif not isinstance(router.controllers, str):
router.controllers = set(router.controllers)
if router.languages:
router.languages = set(router.languages)
else:
router.languages = set()
if app != 'BASE':
for base_only in ROUTER_BASE_KEYS:
router.pop(base_only, None)
if 'domain' in router:
routers.BASE.domains[router.domain] = app
if isinstance(router.controllers, str) and router.controllers == 'DEFAULT':
router.controllers = set()
if os.path.isdir(abspath('applications', app)):
cpath = abspath('applications', app, 'controllers')
for cname in os.listdir(cpath):
if os.path.isfile(abspath(cpath, cname)) and cname.endswith('.py'):
router.controllers.add(cname[:-3])
if router.controllers:
router.controllers.add('static')
router.controllers.add(router.default_controller)
if router.functions:
if isinstance(router.functions, (set, tuple, list)):
functions = set(router.functions)
if isinstance(router.default_function, str):
functions.add(router.default_function) # legacy compatibility
router.functions = { router.default_controller: functions }
for controller in router.functions:
router.functions[controller] = set(router.functions[controller])
else:
router.functions = dict()
if isinstance(routers.BASE.applications, str) and routers.BASE.applications == 'ALL':
routers.BASE.applications = list(all_apps)
if routers.BASE.applications:
routers.BASE.applications = set(routers.BASE.applications)
else:
routers.BASE.applications = set()
for app in routers.keys():
# set router name
router = routers[app]
router.name = app
# compile URL validation patterns
router._acfe_match = re.compile(router.acfe_match)
router._file_match = re.compile(router.file_match)
if router.args_match:
router._args_match = re.compile(router.args_match)
# convert path_prefix to a list of path elements
if router.path_prefix:
if isinstance(router.path_prefix, str):
router.path_prefix = router.path_prefix.strip('/').split('/')
# rewrite BASE.domains as tuples
#
# key: 'domain[:port]' -> (domain, port)
# value: 'application[/controller] -> (application, controller)
# (port and controller may be None)
#
domains = dict()
if routers.BASE.domains:
for (domain, app) in [(d.strip(':'), a.strip('/')) for (d, a) in routers.BASE.domains.items()]:
port = None
if ':' in domain:
(domain, port) = domain.split(':')
ctlr = None
fcn = None
if '/' in app:
(app, ctlr) = app.split('/', 1)
if ctlr and '/' in ctlr:
(ctlr, fcn) = ctlr.split('/')
if app not in all_apps and app not in routers:
raise SyntaxError, "unknown app '%s' in domains" % app
domains[(domain, port)] = (app, ctlr, fcn)
routers.BASE.domains = domains
def regex_uri(e, regexes, tag, default=None):
"filter incoming URI against a list of regexes"
path = e['PATH_INFO']
host = e.get('HTTP_HOST', 'localhost').lower()
i = host.find(':')
if i > 0:
host = host[:i]
key = '%s:%s://%s:%s %s' % \
(e.get('REMOTE_ADDR','localhost'),
e.get('wsgi.url_scheme', 'http').lower(), host,
e.get('REQUEST_METHOD', 'get').lower(), path)
for (regex, value) in regexes:
if regex.match(key):
rewritten = regex.sub(value, key)
log_rewrite('%s: [%s] [%s] -> %s' % (tag, key, value, rewritten))
return rewritten
log_rewrite('%s: [%s] -> %s (not rewritten)' % (tag, key, default))
return default
def regex_select(env=None, app=None, request=None):
"""
select a set of regex rewrite params for the current request
"""
if app:
thread.routes = params_apps.get(app, params)
elif env and params.routes_app:
if routers:
map_url_in(request, env, app=True)
else:
app = regex_uri(env, params.routes_app, "routes_app")
thread.routes = params_apps.get(app, params)
else:
thread.routes = params # default to base rewrite parameters
log_rewrite("select routing parameters: %s" % thread.routes.name)
return app # for doctest
def regex_filter_in(e):
"regex rewrite incoming URL"
query = e.get('QUERY_STRING', None)
e['WEB2PY_ORIGINAL_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '')
if thread.routes.routes_in:
path = regex_uri(e, thread.routes.routes_in, "routes_in", e['PATH_INFO'])
items = path.split('?', 1)
e['PATH_INFO'] = items[0]
if len(items) > 1:
if query:
query = items[1] + '&' + query
else:
query = items[1]
e['QUERY_STRING'] = query
e['REQUEST_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '')
return e
# pattern to replace spaces with underscore in URL
# also the html escaped variants '+' and '%20' are covered
regex_space = re.compile('(\+|\s|%20)+')
# pattern to find valid paths in url /application/controller/...
# this could be:
# for static pages:
# /<b:application>/static/<x:file>
# for dynamic pages:
# /<a:application>[/<c:controller>[/<f:function>[.<e:ext>][/<s:args>]]]
# application, controller, function and ext may only contain [a-zA-Z0-9_]
# file and args may also contain '-', '=', '.' and '/'
# apps in routes_apps_raw must parse raw_args into args
regex_static = re.compile(r'''
(^ # static pages
/(?P<b> \w+) # b=app
/static # /b/static
/(?P<x> (\w[\-\=\./]?)* ) # x=file
$)
''', re.X)
regex_url = re.compile(r'''
(^( # (/a/c/f.e/s)
/(?P<a> [\w\s+]+ ) # /a=app
( # (/c.f.e/s)
/(?P<c> [\w\s+]+ ) # /a/c=controller
( # (/f.e/s)
/(?P<f> [\w\s+]+ ) # /a/c/f=function
( # (.e)
\.(?P<e> [\w\s+]+ ) # /a/c/f.e=extension
)?
( # (/s)
/(?P<r> # /a/c/f.e/r=raw_args
.*
)
)?
)?
)?
)?
/?$)
''', re.X)
regex_args = re.compile(r'''
(^
(?P<s>
( [\w@/-][=.]? )* # s=args
)?
/?$) # trailing slash
''', re.X)
def regex_url_in(request, environ):
"rewrite and parse incoming URL"
# ##################################################
# select application
# rewrite URL if routes_in is defined
# update request.env
# ##################################################
regex_select(env=environ, request=request)
if thread.routes.routes_in:
environ = regex_filter_in(environ)
for (key, value) in environ.items():
request.env[key.lower().replace('.', '_')] = value
path = request.env.path_info.replace('\\', '/')
# ##################################################
# serve if a static file
# ##################################################
match = regex_static.match(regex_space.sub('_', path))
if match and match.group('x'):
static_file = os.path.join(request.env.applications_parent,
'applications', match.group('b'),
'static', match.group('x'))
return (static_file, environ)
# ##################################################
# parse application, controller and function
# ##################################################
path = re.sub('%20', ' ', path)
match = regex_url.match(path)
if not match or match.group('c') == 'static':
raise HTTP(400,
thread.routes.error_message % 'invalid request',
web2py_error='invalid path')
request.application = \
regex_space.sub('_', match.group('a') or thread.routes.default_application)
request.controller = \
regex_space.sub('_', match.group('c') or thread.routes.default_controller)
request.function = \
regex_space.sub('_', match.group('f') or thread.routes.default_function)
group_e = match.group('e')
request.raw_extension = group_e and regex_space.sub('_', group_e) or None
request.extension = request.raw_extension or 'html'
request.raw_args = match.group('r')
request.args = List([])
if request.application in thread.routes.routes_apps_raw:
# application is responsible for parsing args
request.args = None
elif request.raw_args:
match = regex_args.match(request.raw_args.replace(' ', '_'))
if match:
group_s = match.group('s')
request.args = \
List((group_s and group_s.split('/')) or [])
if request.args and request.args[-1] == '':
request.args.pop() # adjust for trailing empty arg
else:
raise HTTP(400,
thread.routes.error_message % 'invalid request',
web2py_error='invalid path (args)')
return (None, environ)
def regex_filter_out(url, e=None):
"regex rewrite outgoing URL"
if not hasattr(thread, 'routes'):
regex_select() # ensure thread.routes is set (for application threads)
if routers:
return url # already filtered
if thread.routes.routes_out:
items = url.split('?', 1)
if e:
host = e.get('http_host', 'localhost').lower()
i = host.find(':')
if i > 0:
host = host[:i]
items[0] = '%s:%s://%s:%s %s' % \
(e.get('remote_addr', ''),
e.get('wsgi_url_scheme', 'http').lower(), host,
e.get('request_method', 'get').lower(), items[0])
else:
items[0] = ':http://localhost:get %s' % items[0]
for (regex, value) in thread.routes.routes_out:
if regex.match(items[0]):
rewritten = '?'.join([regex.sub(value, items[0])] + items[1:])
log_rewrite('routes_out: [%s] -> %s' % (url, rewritten))
return rewritten
log_rewrite('routes_out: [%s] not rewritten' % url)
return url
def filter_url(url, method='get', remote='0.0.0.0', out=False, app=False, lang=None,
domain=(None,None), env=False, scheme=None, host=None, port=None):
"doctest/unittest interface to regex_filter_in() and regex_filter_out()"
regex_url = re.compile(r'^(?P<scheme>http|https|HTTP|HTTPS)\://(?P<host>[^/]*)(?P<uri>.*)')
match = regex_url.match(url)
urlscheme = match.group('scheme').lower()
urlhost = match.group('host').lower()
uri = match.group('uri')
k = uri.find('?')
if k < 0:
k = len(uri)
if isinstance(domain, str):
domain = (domain, None)
(path_info, query_string) = (uri[:k], uri[k+1:])
path_info = urllib.unquote(path_info) # simulate server
e = {
'REMOTE_ADDR': remote,
'REQUEST_METHOD': method,
'wsgi.url_scheme': urlscheme,
'HTTP_HOST': urlhost,
'REQUEST_URI': uri,
'PATH_INFO': path_info,
'QUERY_STRING': query_string,
#for filter_out request.env use lowercase
'remote_addr': remote,
'request_method': method,
'wsgi_url_scheme': urlscheme,
'http_host': urlhost
}
request = Storage()
e["applications_parent"] = global_settings.applications_parent
request.env = Storage(e)
request.uri_language = lang
# determine application only
#
if app:
if routers:
return map_url_in(request, e, app=True)
return regex_select(e)
# rewrite outbound URL
#
if out:
(request.env.domain_application, request.env.domain_controller) = domain
items = path_info.lstrip('/').split('/')
if items[-1] == '':
items.pop() # adjust trailing empty args
assert len(items) >= 3, "at least /a/c/f is required"
a = items.pop(0)
c = items.pop(0)
f = items.pop(0)
if not routers:
return regex_filter_out(uri, e)
acf = map_url_out(request, None, a, c, f, items, None, scheme, host, port)
if items:
url = '%s/%s' % (acf, '/'.join(items))
if items[-1] == '':
url += '/'
else:
url = acf
if query_string:
url += '?' + query_string
return url
# rewrite inbound URL
#
(static, e) = url_in(request, e)
if static:
return static
result = "/%s/%s/%s" % (request.application, request.controller, request.function)
if request.extension and request.extension != 'html':
result += ".%s" % request.extension
if request.args:
result += " %s" % request.args
if e['QUERY_STRING']:
result += " ?%s" % e['QUERY_STRING']
if request.uri_language:
result += " (%s)" % request.uri_language
if env:
return request.env
return result
def filter_err(status, application='app', ticket='tkt'):
"doctest/unittest interface to routes_onerror"
if status > 399 and thread.routes.routes_onerror:
keys = set(('%s/%s' % (application, status),
'%s/*' % (application),
'*/%s' % (status),
'*/*'))
for (key,redir) in thread.routes.routes_onerror:
if key in keys:
if redir == '!':
break
elif '?' in redir:
url = redir + '&' + 'code=%s&ticket=%s' % (status,ticket)
else:
url = redir + '?' + 'code=%s&ticket=%s' % (status,ticket)
return url # redirection
return status # no action
# router support
#
class MapUrlIn(object):
"logic for mapping incoming URLs"
def __init__(self, request=None, env=None):
"initialize a map-in object"
self.request = request
self.env = env
self.router = None
self.application = None
self.language = None
self.controller = None
self.function = None
self.extension = 'html'
self.controllers = set()
self.functions = dict()
self.languages = set()
self.default_language = None
self.map_hyphen = False
self.exclusive_domain = False
path = self.env['PATH_INFO']
self.query = self.env.get('QUERY_STRING', None)
path = path.lstrip('/')
self.env['PATH_INFO'] = '/' + path
self.env['WEB2PY_ORIGINAL_URI'] = self.env['PATH_INFO'] + (self.query and ('?' + self.query) or '')
# to handle empty args, strip exactly one trailing slash, if present
# .../arg1// represents one trailing empty arg
#
if path.endswith('/'):
path = path[:-1]
self.args = List(path and path.split('/') or [])
# see http://www.python.org/dev/peps/pep-3333/#url-reconstruction for URL composition
self.remote_addr = self.env.get('REMOTE_ADDR','localhost')
self.scheme = self.env.get('wsgi.url_scheme', 'http').lower()
self.method = self.env.get('REQUEST_METHOD', 'get').lower()
self.host = self.env.get('HTTP_HOST')
self.port = None
if not self.host:
self.host = self.env.get('SERVER_NAME')
self.port = self.env.get('SERVER_PORT')
if not self.host:
self.host = 'localhost'
self.port = '80'
if ':' in self.host:
(self.host, self.port) = self.host.split(':')
if not self.port:
if self.scheme == 'https':
self.port = '443'
else:
self.port = '80'
def map_prefix(self):
"strip path prefix, if present in its entirety"
prefix = routers.BASE.path_prefix
if prefix:
prefixlen = len(prefix)
if prefixlen > len(self.args):
return
for i in xrange(prefixlen):
if prefix[i] != self.args[i]:
return # prefix didn't match
self.args = List(self.args[prefixlen:]) # strip the prefix
def map_app(self):
"determine application name"
base = routers.BASE # base router
self.domain_application = None
self.domain_controller = None
self.domain_function = None
arg0 = self.harg0
if (self.host, self.port) in base.domains:
(self.application, self.domain_controller, self.domain_function) = base.domains[(self.host, self.port)]
self.env['domain_application'] = self.application
self.env['domain_controller'] = self.domain_controller
self.env['domain_function'] = self.domain_function
elif (self.host, None) in base.domains:
(self.application, self.domain_controller, self.domain_function) = base.domains[(self.host, None)]
self.env['domain_application'] = self.application
self.env['domain_controller'] = self.domain_controller
self.env['domain_function'] = self.domain_function
elif base.applications and arg0 in base.applications:
self.application = arg0
elif arg0 and not base.applications:
self.application = arg0
else:
self.application = base.default_application or ''
self.pop_arg_if(self.application == arg0)
if not base._acfe_match.match(self.application):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error="invalid application: '%s'" % self.application)
if self.application not in routers and \
(self.application != thread.routes.default_application or self.application == 'welcome'):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error="unknown application: '%s'" % self.application)
# set the application router
#
log_rewrite("select application=%s" % self.application)
self.request.application = self.application
if self.application not in routers:
self.router = routers.BASE # support gluon.main.wsgibase init->welcome
else:
self.router = routers[self.application] # application router
self.controllers = self.router.controllers
self.default_controller = self.domain_controller or self.router.default_controller
self.functions = self.router.functions
self.languages = self.router.languages
self.default_language = self.router.default_language
self.map_hyphen = self.router.map_hyphen
self.exclusive_domain = self.router.exclusive_domain
self._acfe_match = self.router._acfe_match
self._file_match = self.router._file_match
self._args_match = self.router._args_match
def map_root_static(self):
'''
handle root-static files (no hyphen mapping)
a root-static file is one whose incoming URL expects it to be at the root,
typically robots.txt & favicon.ico
'''
if len(self.args) == 1 and self.arg0 in self.router.root_static:
self.controller = self.request.controller = 'static'
root_static_file = os.path.join(self.request.env.applications_parent,
'applications', self.application,
self.controller, self.arg0)
log_rewrite("route: root static=%s" % root_static_file)
return root_static_file
return None
def map_language(self):
"handle language (no hyphen mapping)"
arg0 = self.arg0 # no hyphen mapping
if arg0 and self.languages and arg0 in self.languages:
self.language = arg0
else:
self.language = self.default_language
if self.language:
log_rewrite("route: language=%s" % self.language)
self.pop_arg_if(self.language == arg0)
arg0 = self.arg0
def map_controller(self):
"identify controller"
# handle controller
#
arg0 = self.harg0 # map hyphens
if not arg0 or (self.controllers and arg0 not in self.controllers):
self.controller = self.default_controller or ''
else:
self.controller = arg0
self.pop_arg_if(arg0 == self.controller)
log_rewrite("route: controller=%s" % self.controller)
if not self.router._acfe_match.match(self.controller):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error='invalid controller')
def map_static(self):
'''
handle static files
file_match but no hyphen mapping
'''
if self.controller != 'static':
return None
file = '/'.join(self.args)
if not self.router._file_match.match(file):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error='invalid static file')
#
# support language-specific static subdirectories,
# eg /appname/en/static/filename => applications/appname/static/en/filename
# if language-specific file doesn't exist, try same file in static
#
if self.language:
static_file = os.path.join(self.request.env.applications_parent,
'applications', self.application,
'static', self.language, file)
if not self.language or not os.path.isfile(static_file):
static_file = os.path.join(self.request.env.applications_parent,
'applications', self.application,
'static', file)
log_rewrite("route: static=%s" % static_file)
return static_file
def map_function(self):
"handle function.extension"
arg0 = self.harg0 # map hyphens
functions = self.functions.get(self.controller, set())
if isinstance(self.router.default_function, dict):
default_function = self.router.default_function.get(self.controller, None)
else:
default_function = self.router.default_function # str or None
default_function = self.domain_function or default_function
if not arg0 or functions and arg0 not in functions:
self.function = default_function or ""
self.pop_arg_if(arg0 and self.function == arg0)
else:
func_ext = arg0.split('.')
if len(func_ext) > 1:
self.function = func_ext[0]
self.extension = func_ext[-1]
else:
self.function = arg0
self.pop_arg_if(True)
log_rewrite("route: function.ext=%s.%s" % (self.function, self.extension))
if not self.router._acfe_match.match(self.function):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error='invalid function')
if self.extension and not self.router._acfe_match.match(self.extension):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error='invalid extension')
def validate_args(self):
'''
check args against validation pattern
'''
for arg in self.args:
if not self.router._args_match.match(arg):
raise HTTP(400, thread.routes.error_message % 'invalid request',
web2py_error='invalid arg <%s>' % arg)
def update_request(self):
'''
update request from self
build env.request_uri
make lower-case versions of http headers in env
'''
self.request.application = self.application
self.request.controller = self.controller
self.request.function = self.function
self.request.extension = self.extension
self.request.args = self.args
if self.language:
self.request.uri_language = self.language
uri = '/%s/%s/%s' % (self.application, self.controller, self.function)
if self.map_hyphen:
uri = uri.replace('_', '-')
if self.extension != 'html':
uri += '.' + self.extension
if self.language:
uri = '/%s%s' % (self.language, uri)
uri += self.args and urllib.quote('/' + '/'.join([str(x) for x in self.args])) or ''
uri += (self.query and ('?' + self.query) or '')
self.env['REQUEST_URI'] = uri
for (key, value) in self.env.items():
self.request.env[key.lower().replace('.', '_')] = value
@property
def arg0(self):
"return first arg"
return self.args(0)
@property
def harg0(self):
"return first arg with optional hyphen mapping"
if self.map_hyphen and self.args(0):
return self.args(0).replace('-', '_')
return self.args(0)
def pop_arg_if(self, dopop):
"conditionally remove first arg and return new first arg"
if dopop:
self.args.pop(0)
class MapUrlOut(object):
"logic for mapping outgoing URLs"
def __init__(self, request, env, application, controller, function, args, other, scheme, host, port):
"initialize a map-out object"
self.default_application = routers.BASE.default_application
if application in routers:
self.router = routers[application]
else:
self.router = routers.BASE
self.request = request
self.env = env
self.application = application
self.controller = controller
self.function = function
self.args = args
self.other = other
self.scheme = scheme
self.host = host
self.port = port
self.applications = routers.BASE.applications
self.controllers = self.router.controllers
self.functions = self.router.functions.get(self.controller, set())
self.languages = self.router.languages
self.default_language = self.router.default_language
self.exclusive_domain = self.router.exclusive_domain
self.map_hyphen = self.router.map_hyphen
self.map_static = self.router.map_static
self.path_prefix = routers.BASE.path_prefix
self.domain_application = request and self.request.env.domain_application
self.domain_controller = request and self.request.env.domain_controller
if isinstance(self.router.default_function, dict):
self.default_function = self.router.default_function.get(self.controller, None)
else:
self.default_function = self.router.default_function
if (self.router.exclusive_domain and self.domain_application and self.domain_application != self.application and not self.host):
raise SyntaxError, 'cross-domain conflict: must specify host'
lang = request and request.uri_language
if lang and self.languages and lang in self.languages:
self.language = lang
else:
self.language = None
self.omit_application = False
self.omit_language = False
self.omit_controller = False
self.omit_function = False
def omit_lang(self):
"omit language if possible"
if not self.language or self.language == self.default_language:
self.omit_language = True
def omit_acf(self):
"omit what we can of a/c/f"
router = self.router
# Handle the easy no-args case of tail-defaults: /a/c /a /
#
if not self.args and self.function == self.default_function:
self.omit_function = True
if self.controller == router.default_controller:
self.omit_controller = True
if self.application == self.default_application:
self.omit_application = True
# omit default application
# (which might be the domain default application)
#
default_application = self.domain_application or self.default_application
if self.application == default_application:
self.omit_application = True
# omit controller if default controller
#
default_controller = ((self.application == self.domain_application) and self.domain_controller) or router.default_controller or ''
if self.controller == default_controller:
self.omit_controller = True
# omit function if possible
#
if self.functions and self.function in self.functions and self.function == self.default_function:
self.omit_function = True
# prohibit ambiguous cases
#
# because we presume the lang string to be unambiguous, its presence protects application omission
#
if self.exclusive_domain:
applications = [self.domain_application]
else:
applications = self.applications
if self.omit_language:
if not applications or self.controller in applications:
self.omit_application = False
if self.omit_application:
if not applications or self.function in applications:
self.omit_controller = False
if not self.controllers or self.function in self.controllers:
self.omit_controller = False
if self.args:
if self.args[0] in self.functions or self.args[0] in self.controllers or self.args[0] in applications:
self.omit_function = False
if self.omit_controller:
if self.function in self.controllers or self.function in applications:
self.omit_controller = False
if self.omit_application:
if self.controller in applications:
self.omit_application = False
# handle static as a special case
# (easier for external static handling)
#
if self.controller == 'static' or self.controller.startswith('static/'):
if not self.map_static:
self.omit_application = False
if self.language:
self.omit_language = False
self.omit_controller = False
self.omit_function = False
def build_acf(self):
"build acf from components"
acf = ''
if self.map_hyphen:
self.application = self.application.replace('_', '-')
self.controller = self.controller.replace('_', '-')
if self.controller != 'static' and not self.controller.startswith('static/'):
self.function = self.function.replace('_', '-')
if not self.omit_application:
acf += '/' + self.application
if not self.omit_language:
acf += '/' + self.language
if not self.omit_controller:
acf += '/' + self.controller
if not self.omit_function:
acf += '/' + self.function
if self.path_prefix:
acf = '/' + '/'.join(self.path_prefix) + acf
if self.args:
return acf
return acf or '/'
def acf(self):
"convert components to /app/lang/controller/function"
if not routers:
return None # use regex filter
self.omit_lang() # try to omit language
self.omit_acf() # try to omit a/c/f
return self.build_acf() # build and return the /a/lang/c/f string
def map_url_in(request, env, app=False):
"route incoming URL"
# initialize router-url object
#
thread.routes = params # default to base routes
map = MapUrlIn(request=request, env=env)
map.map_prefix() # strip prefix if present
map.map_app() # determine application
# configure thread.routes for error rewrite
#
if params.routes_app:
thread.routes = params_apps.get(app, params)
if app:
return map.application
root_static_file = map.map_root_static() # handle root-static files
if root_static_file:
return (root_static_file, map.env)
map.map_language()
map.map_controller()
static_file = map.map_static()
if static_file:
return (static_file, map.env)
map.map_function()
map.validate_args()
map.update_request()
return (None, map.env)
def map_url_out(request, env, application, controller, function, args, other, scheme, host, port):
'''
supply /a/c/f (or /a/lang/c/f) portion of outgoing url
The basic rule is that we can only make transformations
that map_url_in can reverse.
Suppose that the incoming arguments are a,c,f,args,lang
and that the router defaults are da, dc, df, dl.
We can perform these transformations trivially if args=[] and lang=None or dl:
/da/dc/df => /
/a/dc/df => /a
/a/c/df => /a/c
We would also like to be able to strip the default application or application/controller
from URLs with function/args present, thus:
/da/c/f/args => /c/f/args
/da/dc/f/args => /f/args
We use [applications] and [controllers] and {functions} to suppress ambiguous omissions.
We assume that language names do not collide with a/c/f names.
'''
map = MapUrlOut(request, env, application, controller, function, args, other, scheme, host, port)
return map.acf()
def get_effective_router(appname):
"return a private copy of the effective router for the specified application"
if not routers or appname not in routers:
return None
return Storage(routers[appname]) # return a copy
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
def handler(request, response, methods):
response.session_id = None # no sessions for xmlrpc
dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None)
for method in methods:
dispatcher.register_function(method)
dispatcher.register_introspection_functions()
response.headers['Content-Type'] = 'text/xml'
dispatch = getattr(dispatcher, '_dispatch', None)
return dispatcher._marshaled_dispatch(request.body.read(), dispatch)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and
Limodou <limodou@gmail.com>.
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This makes uses of the pywin32 package
(http://sourceforge.net/projects/pywin32/).
You do not need to install this package to use web2py.
"""
import time
import os
import sys
import traceback
try:
import win32serviceutil
import win32service
import win32event
except:
if os.name == 'nt':
print "Warning, winservice is unable to install the Mark Hammond Win32 extensions"
import servicemanager
import _winreg
from fileutils import up
__all__ = ['web2py_windows_service_handler']
class Service(win32serviceutil.ServiceFramework):
_svc_name_ = '_unNamed'
_svc_display_name_ = '_Service Template'
def __init__(self, *args):
win32serviceutil.ServiceFramework.__init__(self, *args)
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def log(self, msg):
servicemanager.LogInfoMsg(str(msg))
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.start()
win32event.WaitForSingleObject(self.stop_event,
win32event.INFINITE)
except:
self.log(traceback.format_exc(sys.exc_info))
self.SvcStop()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
try:
self.stop()
except:
self.log(traceback.format_exc(sys.exc_info))
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# to be overridden
def start(self):
pass
# to be overridden
def stop(self):
pass
class Web2pyService(Service):
_svc_name_ = 'web2py'
_svc_display_name_ = 'web2py Service'
_exe_args_ = 'options'
server = None
def chdir(self):
try:
h = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
r'SYSTEM\CurrentControlSet\Services\%s'
% self._svc_name_)
try:
cls = _winreg.QueryValue(h, 'PythonClass')
finally:
_winreg.CloseKey(h)
dir = os.path.dirname(cls)
os.chdir(dir)
return True
except:
self.log("Can't change to web2py working path; server is stopped")
return False
def start(self):
self.log('web2py server starting')
if not self.chdir():
return
if len(sys.argv) == 2:
opt_mod = sys.argv[1]
else:
opt_mod = self._exe_args_
options = __import__(opt_mod, [], [], '')
if True: # legacy support for old options files, which have only (deprecated) numthreads
if hasattr(options, 'numthreads') and not hasattr(options, 'minthreads'):
options.minthreads = options.numthreads
if not hasattr(options, 'minthreads'): options.minthreads = None
if not hasattr(options, 'maxthreads'): options.maxthreads = None
import main
self.server = main.HttpServer(
ip=options.ip,
port=options.port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_filename=options.profiler_filename,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder
)
try:
self.server.start()
except:
# self.server.stop()
self.server = None
raise
def stop(self):
self.log('web2py server stopping')
if not self.chdir():
return
if self.server:
self.server.stop()
time.sleep(1)
def web2py_windows_service_handler(argv=None, opt_file='options'):
path = os.path.dirname(__file__)
classstring = os.path.normpath(os.path.join(up(path),
'gluon.winservice.Web2pyService'))
if opt_file:
Web2pyService._exe_args_ = opt_file
win32serviceutil.HandleCommandLine(Web2pyService,
serviceClassString=classstring, argv=['', 'install'])
win32serviceutil.HandleCommandLine(Web2pyService,
serviceClassString=classstring, argv=argv)
if __name__ == '__main__':
web2py_windows_service_handler()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import cgi
import os
import re
import copy
import types
import urllib
import base64
import sanitizer
import itertools
import decoder
import copy_reg
import cPickle
import marshal
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
from storage import Storage
from utils import web2py_uuid, hmac_hash
from highlight import highlight
regex_crlf = re.compile('\r|\n')
join = ''.join
__all__ = [
'A',
'B',
'BEAUTIFY',
'BODY',
'BR',
'BUTTON',
'CENTER',
'CAT',
'CODE',
'COL',
'COLGROUP',
'DIV',
'EM',
'EMBED',
'FIELDSET',
'FORM',
'H1',
'H2',
'H3',
'H4',
'H5',
'H6',
'HEAD',
'HR',
'HTML',
'I',
'IFRAME',
'IMG',
'INPUT',
'LABEL',
'LEGEND',
'LI',
'LINK',
'OL',
'UL',
'MARKMIN',
'MENU',
'META',
'OBJECT',
'ON',
'OPTION',
'P',
'PRE',
'SCRIPT',
'OPTGROUP',
'SELECT',
'SPAN',
'STYLE',
'TABLE',
'TAG',
'TD',
'TEXTAREA',
'TH',
'THEAD',
'TBODY',
'TFOOT',
'TITLE',
'TR',
'TT',
'URL',
'XHTML',
'XML',
'xmlescape',
'embed64',
]
def xmlescape(data, quote = True):
"""
returns an escaped string of the provided data
:param data: the data to be escaped
:param quote: optional (default False)
"""
# first try the xml function
if hasattr(data,'xml') and callable(data.xml):
return data.xml()
# otherwise, make it a string
if not isinstance(data, (str, unicode)):
data = str(data)
elif isinstance(data, unicode):
data = data.encode('utf8', 'xmlcharrefreplace')
# ... and do the escaping
data = cgi.escape(data, quote).replace("'","'")
return data
def truncate_string(text, length, dots='...'):
text = text.decode('utf-8')
if len(text)>length:
text = text[:length-len(dots)].encode('utf-8')+dots
return text
def URL(
a=None,
c=None,
f=None,
r=None,
args=None,
vars=None,
anchor='',
extension=None,
env=None,
hmac_key=None,
hash_vars=True,
salt=None,
user_signature=None,
scheme=None,
host=None,
port=None,
encode_embedded_slash=False,
url_encode=True
):
"""
generate a URL
example::
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':1, 'q':2}, anchor='1'))
'/a/c/f/x/y/z?p=1&q=2#1'
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':(1,3), 'q':2}, anchor='1'))
'/a/c/f/x/y/z?p=1&p=3&q=2#1'
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':(3,1), 'q':2}, anchor='1'))
'/a/c/f/x/y/z?p=3&p=1&q=2#1'
>>> str(URL(a='a', c='c', f='f', anchor='1+2'))
'/a/c/f#1%2B2'
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':(1,3), 'q':2}, anchor='1', hmac_key='key'))
'/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f#1'
>>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z']))
'/a/c/f/w/x/y/z'
>>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z'], encode_embedded_slash=True))
'/a/c/f/w%2Fx/y%2Fz'
>>> str(URL(a='a', c='c', f='f', args=['%(id)d'], url_encode=False))
'/a/c/f/%(id)d'
>>> str(URL(a='a', c='c', f='f', args=['%(id)d'], url_encode=True))
'/a/c/f/%25%28id%29d'
>>> str(URL(a='a', c='c', f='f', vars={'id' : '%(id)d' }, url_encode=False))
'/a/c/f?id=%(id)d'
>>> str(URL(a='a', c='c', f='f', vars={'id' : '%(id)d' }, url_encode=True))
'/a/c/f?id=%25%28id%29d'
>>> str(URL(a='a', c='c', f='f', anchor='%(id)d', url_encode=False))
'/a/c/f#%(id)d'
>>> str(URL(a='a', c='c', f='f', anchor='%(id)d', url_encode=True))
'/a/c/f#%25%28id%29d'
generates a url '/a/c/f' corresponding to application a, controller c
and function f. If r=request is passed, a, c, f are set, respectively,
to r.application, r.controller, r.function.
The more typical usage is:
URL(r=request, f='index') that generates a url for the index function
within the present application and controller.
:param a: application (default to current if r is given)
:param c: controller (default to current if r is given)
:param f: function (default to current if r is given)
:param r: request (optional)
:param args: any arguments (optional)
:param vars: any variables (optional)
:param anchor: anchorname, without # (optional)
:param hmac_key: key to use when generating hmac signature (optional)
:param hash_vars: which of the vars to include in our hmac signature
True (default) - hash all vars, False - hash none of the vars,
iterable - hash only the included vars ['key1','key2']
:param scheme: URI scheme (True, 'http' or 'https', etc); forces absolute URL (optional)
:param host: string to force absolute URL with host (True means http_host)
:param port: optional port number (forces absolute URL)
:raises SyntaxError: when no application, controller or function is
available
:raises SyntaxError: when a CRLF is found in the generated url
"""
from rewrite import url_out # done here in case used not-in web2py
if args in (None,[]): args = []
vars = vars or {}
application = None
controller = None
function = None
if not isinstance(args, (list, tuple)):
args = [args]
if not r:
if a and not c and not f: (f,a,c)=(a,c,f)
elif a and c and not f: (c,f,a)=(a,c,f)
from globals import current
if hasattr(current,'request'):
r = current.request
if r:
application = r.application
controller = r.controller
function = r.function
env = r.env
if extension is None and r.extension != 'html':
extension = r.extension
if a:
application = a
if c:
controller = c
if f:
if not isinstance(f, str):
if hasattr(f,'__name__'):
function = f.__name__
else:
raise SyntaxError, 'when calling URL, function or function name required'
elif '/' in f:
items = f.split('/')
function = f = items[0]
args = items[1:] + args
else:
function = f
# if the url gets a static resource, don't force extention
if controller == 'static':
extension = None
if '.' in function:
function, extension = function.split('.', 1)
function2 = '%s.%s' % (function,extension or 'html')
if not (application and controller and function):
raise SyntaxError, 'not enough information to build the url'
if args:
if url_encode:
if encode_embedded_slash:
other = '/' + '/'.join([urllib.quote(str(x), '') for x in args])
else:
other = args and urllib.quote('/' + '/'.join([str(x) for x in args]))
else:
other = args and ('/' + '/'.join([str(x) for x in args]))
else:
other = ''
if other.endswith('/'):
other += '/' # add trailing slash to make last trailing empty arg explicit
if vars.has_key('_signature'): vars.pop('_signature')
list_vars = []
for (key, vals) in sorted(vars.items()):
if not isinstance(vals, (list, tuple)):
vals = [vals]
for val in vals:
list_vars.append((key, val))
if user_signature:
from globals import current
if current.session.auth:
hmac_key = current.session.auth.hmac_key
if hmac_key:
# generate an hmac signature of the vars & args so can later
# verify the user hasn't messed with anything
h_args = '/%s/%s/%s%s' % (application, controller, function2, other)
# how many of the vars should we include in our hash?
if hash_vars is True: # include them all
h_vars = list_vars
elif hash_vars is False: # include none of them
h_vars = ''
else: # include just those specified
if hash_vars and not isinstance(hash_vars, (list, tuple)):
hash_vars = [hash_vars]
h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars]
# re-assembling the same way during hash authentication
message = h_args + '?' + urllib.urlencode(sorted(h_vars))
sig = hmac_hash(message, hmac_key, digest_alg='sha1', salt=salt)
# add the signature into vars
list_vars.append(('_signature', sig))
if list_vars:
if url_encode:
other += '?%s' % urllib.urlencode(list_vars)
else:
other += '?%s' % '&'.join([var[0]+'='+var[1] for var in list_vars])
if anchor:
if url_encode:
other += '#' + urllib.quote(str(anchor))
else:
other += '#' + (str(anchor))
if extension:
function += '.' + extension
if regex_crlf.search(join([application, controller, function, other])):
raise SyntaxError, 'CRLF Injection Detected'
url = url_out(r, env, application, controller, function,
args, other, scheme, host, port)
return url
def verifyURL(request, hmac_key=None, hash_vars=True, salt=None, user_signature=None):
"""
Verifies that a request's args & vars have not been tampered with by the user
:param request: web2py's request object
:param hmac_key: the key to authenticate with, must be the same one previously
used when calling URL()
:param hash_vars: which vars to include in our hashing. (Optional)
Only uses the 1st value currently
True (or undefined) means all, False none,
an iterable just the specified keys
do not call directly. Use instead:
URL.verify(hmac_key='...')
the key has to match the one used to generate the URL.
>>> r = Storage()
>>> gv = Storage(p=(1,3),q=2,_signature='a32530f0d0caa80964bb92aad2bedf8a4486a31f')
>>> r.update(dict(application='a', controller='c', function='f', extension='html'))
>>> r['args'] = ['x', 'y', 'z']
>>> r['get_vars'] = gv
>>> verifyURL(r, 'key')
True
>>> verifyURL(r, 'kay')
False
>>> r.get_vars.p = (3, 1)
>>> verifyURL(r, 'key')
True
>>> r.get_vars.p = (3, 2)
>>> verifyURL(r, 'key')
False
"""
if not request.get_vars.has_key('_signature'):
return False # no signature in the request URL
# check if user_signature requires
if user_signature:
from globals import current
if not current.session or not current.session.auth:
return False
hmac_key = current.session.auth.hmac_key
if not hmac_key:
return False
# get our sig from request.get_vars for later comparison
original_sig = request.get_vars._signature
# now generate a new hmac for the remaining args & vars
vars, args = request.get_vars, request.args
# remove the signature var since it was not part of our signed message
request.get_vars.pop('_signature')
# join all the args & vars into one long string
# always include all of the args
other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) or ''
h_args = '/%s/%s/%s.%s%s' % (request.application,
request.controller,
request.function,
request.extension,
other)
# but only include those vars specified (allows more flexibility for use with
# forms or ajax)
list_vars = []
for (key, vals) in sorted(vars.items()):
if not isinstance(vals, (list, tuple)):
vals = [vals]
for val in vals:
list_vars.append((key, val))
# which of the vars are to be included?
if hash_vars is True: # include them all
h_vars = list_vars
elif hash_vars is False: # include none of them
h_vars = ''
else: # include just those specified
# wrap in a try - if the desired vars have been removed it'll fail
try:
if hash_vars and not isinstance(hash_vars, (list, tuple)):
hash_vars = [hash_vars]
h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars]
except:
# user has removed one of our vars! Immediate fail
return False
# build the full message string with both args & vars
message = h_args + '?' + urllib.urlencode(sorted(h_vars))
# hash with the hmac_key provided
sig = hmac_hash(message, str(hmac_key), digest_alg='sha1', salt=salt)
# put _signature back in get_vars just in case a second call to URL.verify is performed
# (otherwise it'll immediately return false)
request.get_vars['_signature'] = original_sig
# return whether or not the signature in the request matched the one we just generated
# (I.E. was the message the same as the one we originally signed)
return original_sig == sig
URL.verify = verifyURL
ON = True
class XmlComponent(object):
"""
Abstract root for all Html components
"""
# TODO: move some DIV methods to here
def xml(self):
raise NotImplementedError
class XML(XmlComponent):
"""
use it to wrap a string that contains XML/HTML so that it will not be
escaped by the template
example:
>>> XML('<h1>Hello</h1>').xml()
'<h1>Hello</h1>'
"""
def __init__(
self,
text,
sanitize = False,
permitted_tags = [
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
'h1','h2','h3','h4','h5','h6',
'table','tr','td','div',
],
allowed_attributes = {
'a': ['href', 'title'],
'img': ['src', 'alt'],
'blockquote': ['type'],
'td': ['colspan'],
},
):
"""
:param text: the XML text
:param sanitize: sanitize text using the permitted tags and allowed
attributes (default False)
:param permitted_tags: list of permitted tags (default: simple list of
tags)
:param allowed_attributes: dictionary of allowed attributed (default
for A, IMG and BlockQuote).
The key is the tag; the value is a list of allowed attributes.
"""
if sanitize:
text = sanitizer.sanitize(text, permitted_tags,
allowed_attributes)
if isinstance(text, unicode):
text = text.encode('utf8', 'xmlcharrefreplace')
elif not isinstance(text, str):
text = str(text)
self.text = text
def xml(self):
return self.text
def __str__(self):
return self.xml()
def __add__(self,other):
return '%s%s' % (self,other)
def __radd__(self,other):
return '%s%s' % (other,self)
def __cmp__(self,other):
return cmp(str(self),str(other))
def __hash__(self):
return hash(str(self))
def __getattr__(self,name):
return getattr(str(self),name)
def __getitem__(self,i):
return str(self)[i]
def __getslice__(self,i,j):
return str(self)[i:j]
def __iter__(self):
for c in str(self): yield c
def __len__(self):
return len(str(self))
def flatten(self,render=None):
"""
return the text stored by the XML object rendered by the render function
"""
if render:
return render(self.text,None,{})
return self.text
def elements(self, *args, **kargs):
"""
to be considered experimental since the behavior of this method is questionable
another options could be TAG(self.text).elements(*args,**kargs)
"""
return []
### important to allow safe session.flash=T(....)
def XML_unpickle(data):
return marshal.loads(data)
def XML_pickle(data):
return XML_unpickle, (marshal.dumps(str(data)),)
copy_reg.pickle(XML, XML_pickle, XML_unpickle)
class DIV(XmlComponent):
"""
HTML helper, for easy generating and manipulating a DOM structure.
Little or no validation is done.
Behaves like a dictionary regarding updating of attributes.
Behaves like a list regarding inserting/appending components.
example::
>>> DIV('hello', 'world', _style='color:red;').xml()
'<div style=\"color:red;\">helloworld</div>'
all other HTML helpers are derived from DIV.
_something=\"value\" attributes are transparently translated into
something=\"value\" HTML attributes
"""
# name of the tag, subclasses should update this
# tags ending with a '/' denote classes that cannot
# contain components
tag = 'div'
def __init__(self, *components, **attributes):
"""
:param *components: any components that should be nested in this element
:param **attributes: any attributes you want to give to this element
:raises SyntaxError: when a stand alone tag receives components
"""
if self.tag[-1:] == '/' and components:
raise SyntaxError, '<%s> tags cannot have components'\
% self.tag
if len(components) == 1 and isinstance(components[0], (list,tuple)):
self.components = list(components[0])
else:
self.components = list(components)
self.attributes = attributes
self._fixup()
# converts special attributes in components attributes
self._postprocessing()
self.parent = None
for c in self.components:
self._setnode(c)
def update(self, **kargs):
"""
dictionary like updating of the tag attributes
"""
for (key, value) in kargs.items():
self[key] = value
return self
def append(self, value):
"""
list style appending of components
>>> a=DIV()
>>> a.append(SPAN('x'))
>>> print a
<div><span>x</span></div>
"""
self._setnode(value)
ret = self.components.append(value)
self._fixup()
return ret
def insert(self, i, value):
"""
list style inserting of components
>>> a=DIV()
>>> a.insert(0,SPAN('x'))
>>> print a
<div><span>x</span></div>
"""
self._setnode(value)
ret = self.components.insert(i, value)
self._fixup()
return ret
def __getitem__(self, i):
"""
gets attribute with name 'i' or component #i.
If attribute 'i' is not found returns None
:param i: index
if i is a string: the name of the attribute
otherwise references to number of the component
"""
if isinstance(i, str):
try:
return self.attributes[i]
except KeyError:
return None
else:
return self.components[i]
def __setitem__(self, i, value):
"""
sets attribute with name 'i' or component #i.
:param i: index
if i is a string: the name of the attribute
otherwise references to number of the component
:param value: the new value
"""
self._setnode(value)
if isinstance(i, (str, unicode)):
self.attributes[i] = value
else:
self.components[i] = value
def __delitem__(self, i):
"""
deletes attribute with name 'i' or component #i.
:param i: index
if i is a string: the name of the attribute
otherwise references to number of the component
"""
if isinstance(i, str):
del self.attributes[i]
else:
del self.components[i]
def __len__(self):
"""
returns the number of included components
"""
return len(self.components)
def __nonzero__(self):
"""
always return True
"""
return True
def _fixup(self):
"""
Handling of provided components.
Nothing to fixup yet. May be overridden by subclasses,
eg for wrapping some components in another component or blocking them.
"""
return
def _wrap_components(self, allowed_parents,
wrap_parent = None,
wrap_lambda = None):
"""
helper for _fixup. Checks if a component is in allowed_parents,
otherwise wraps it in wrap_parent
:param allowed_parents: (tuple) classes that the component should be an
instance of
:param wrap_parent: the class to wrap the component in, if needed
:param wrap_lambda: lambda to use for wrapping, if needed
"""
components = []
for c in self.components:
if isinstance(c, allowed_parents):
pass
elif wrap_lambda:
c = wrap_lambda(c)
else:
c = wrap_parent(c)
if isinstance(c,DIV):
c.parent = self
components.append(c)
self.components = components
def _postprocessing(self):
"""
Handling of attributes (normally the ones not prefixed with '_').
Nothing to postprocess yet. May be overridden by subclasses
"""
return
def _traverse(self, status, hideerror=False):
# TODO: docstring
newstatus = status
for c in self.components:
if hasattr(c, '_traverse') and callable(c._traverse):
c.vars = self.vars
c.request_vars = self.request_vars
c.errors = self.errors
c.latest = self.latest
c.session = self.session
c.formname = self.formname
c['hideerror']=hideerror
newstatus = c._traverse(status,hideerror) and newstatus
# for input, textarea, select, option
# deal with 'value' and 'validation'
name = self['_name']
if newstatus:
newstatus = self._validate()
self._postprocessing()
elif 'old_value' in self.attributes:
self['value'] = self['old_value']
self._postprocessing()
elif name and name in self.vars:
self['value'] = self.vars[name]
self._postprocessing()
if name:
self.latest[name] = self['value']
return newstatus
def _validate(self):
"""
nothing to validate yet. May be overridden by subclasses
"""
return True
def _setnode(self,value):
if isinstance(value,DIV):
value.parent = self
def _xml(self):
"""
helper for xml generation. Returns separately:
- the component attributes
- the generated xml of the inner components
Component attributes start with an underscore ('_') and
do not have a False or None value. The underscore is removed.
A value of True is replaced with the attribute name.
:returns: tuple: (attributes, components)
"""
# get the attributes for this component
# (they start with '_', others may have special meanings)
fa = ''
for key in sorted(self.attributes):
value = self[key]
if key[:1] != '_':
continue
name = key[1:]
if value is True:
value = name
elif value is False or value is None:
continue
fa += ' %s="%s"' % (name, xmlescape(value, True))
# get the xml for the inner components
co = join([xmlescape(component) for component in
self.components])
return (fa, co)
def xml(self):
"""
generates the xml for this component.
"""
(fa, co) = self._xml()
if not self.tag:
return co
if self.tag[-1:] == '/':
# <tag [attributes] />
return '<%s%s />' % (self.tag[:-1], fa)
# else: <tag [attributes]> inner components xml </tag>
return '<%s%s>%s</%s>' % (self.tag, fa, co, self.tag)
def __str__(self):
"""
str(COMPONENT) returns equals COMPONENT.xml()
"""
return self.xml()
def flatten(self, render=None):
"""
return the text stored by the DIV object rendered by the render function
the render function must take text, tagname, and attributes
render=None is equivalent to render=lambda text, tag, attr: text
>>> markdown = lambda text,tag=None,attributes={}: \
{None: re.sub('\s+',' ',text), \
'h1':'#'+text+'\\n\\n', \
'p':text+'\\n'}.get(tag,text)
>>> a=TAG('<h1>Header</h1><p>this is a test</p>')
>>> a.flatten(markdown)
'#Header\\n\\nthis is a test\\n'
"""
text = ''
for c in self.components:
if isinstance(c,XmlComponent):
s=c.flatten(render)
elif render:
s=render(str(c))
else:
s=str(c)
text+=s
if render:
text = render(text,self.tag,self.attributes)
return text
regex_tag=re.compile('^[\w\-\:]+')
regex_id=re.compile('#([\w\-]+)')
regex_class=re.compile('\.([\w\-]+)')
regex_attr=re.compile('\[([\w\-\:]+)=(.*?)\]')
def elements(self, *args, **kargs):
"""
find all component that match the supplied attribute dictionary,
or None if nothing could be found
All components of the components are searched.
>>> a = DIV(DIV(SPAN('x'),3,DIV(SPAN('y'))))
>>> for c in a.elements('span',first_only=True): c[0]='z'
>>> print a
<div><div><span>z</span>3<div><span>y</span></div></div></div>
>>> for c in a.elements('span'): c[0]='z'
>>> print a
<div><div><span>z</span>3<div><span>z</span></div></div></div>
It also supports a syntax compatible with jQuery
>>> a=TAG('<div><span><a id="1-1" u:v=$>hello</a></span><p class="this is a test">world</p></div>')
>>> for e in a.elements('div a#1-1, p.is'): print e.flatten()
hello
world
>>> for e in a.elements('#1-1'): print e.flatten()
hello
>>> a.elements('a[u:v=$]')[0].xml()
'<a id="1-1" u:v="$">hello</a>'
>>> a=FORM( INPUT(_type='text'), SELECT(range(1)), TEXTAREA() )
>>> for c in a.elements('input, select, textarea'): c['_disabled'] = 'disabled'
>>> a.xml()
'<form action="" enctype="multipart/form-data" method="post"><input disabled="disabled" type="text" /><select disabled="disabled"><option value="0">0</option></select><textarea cols="40" disabled="disabled" rows="10"></textarea></form>'
"""
if len(args)==1:
args = [a.strip() for a in args[0].split(',')]
if len(args)>1:
subset = [self.elements(a,**kargs) for a in args]
return reduce(lambda a,b:a+b,subset,[])
elif len(args)==1:
items = args[0].split()
if len(items)>1:
subset=[a.elements(' '.join(items[1:]),**kargs) for a in self.elements(items[0])]
return reduce(lambda a,b:a+b,subset,[])
else:
item=items[0]
if '#' in item or '.' in item or '[' in item:
match_tag = self.regex_tag.search(item)
match_id = self.regex_id.search(item)
match_class = self.regex_class.search(item)
match_attr = self.regex_attr.finditer(item)
args = []
if match_tag: args = [match_tag.group()]
if match_id: kargs['_id'] = match_id.group(1)
if match_class: kargs['_class'] = re.compile('(?<!\w)%s(?!\w)' % \
match_class.group(1).replace('-','\\-').replace(':','\\:'))
for item in match_attr:
kargs['_'+item.group(1)]=item.group(2)
return self.elements(*args,**kargs)
# make a copy of the components
matches = []
first_only = False
if kargs.has_key("first_only"):
first_only = kargs["first_only"]
del kargs["first_only"]
# check if the component has an attribute with the same
# value as provided
check = True
tag = getattr(self,'tag').replace("/","")
if args and tag not in args:
check = False
for (key, value) in kargs.items():
if isinstance(value,(str,int)):
if self[key] != str(value):
check = False
elif key in self.attributes:
if not value.search(str(self[key])):
check = False
else:
check = False
if 'find' in kargs:
find = kargs['find']
for c in self.components:
if isinstance(find,(str,int)):
if isinstance(c,str) and str(find) in c:
check = True
else:
if isinstance(c,str) and find.search(c):
check = True
# if found, return the component
if check:
matches.append(self)
if first_only:
return matches
# loop the copy
for c in self.components:
if isinstance(c, XmlComponent):
kargs['first_only'] = first_only
child_matches = c.elements( *args, **kargs )
if first_only and len(child_matches) != 0:
return child_matches
matches.extend( child_matches )
return matches
def element(self, *args, **kargs):
"""
find the first component that matches the supplied attribute dictionary,
or None if nothing could be found
Also the components of the components are searched.
"""
kargs['first_only'] = True
elements = self.elements(*args, **kargs)
if not elements:
# we found nothing
return None
return elements[0]
def siblings(self,*args,**kargs):
"""
find all sibling components that match the supplied argument list
and attribute dictionary, or None if nothing could be found
"""
sibs = [s for s in self.parent.components if not s == self]
matches = []
first_only = False
if kargs.has_key("first_only"):
first_only = kargs["first_only"]
del kargs["first_only"]
for c in sibs:
try:
check = True
tag = getattr(c,'tag').replace("/","")
if args and tag not in args:
check = False
for (key, value) in kargs.items():
if c[key] != value:
check = False
if check:
matches.append(c)
if first_only: break
except:
pass
return matches
def sibling(self,*args,**kargs):
"""
find the first sibling component that match the supplied argument list
and attribute dictionary, or None if nothing could be found
"""
kargs['first_only'] = True
sibs = self.siblings(*args, **kargs)
if not sibs:
return None
return sibs[0]
class CAT(DIV):
tag = ''
def TAG_unpickler(data):
return cPickle.loads(data)
def TAG_pickler(data):
d = DIV()
d.__dict__ = data.__dict__
marshal_dump = cPickle.dumps(d)
return (TAG_unpickler, (marshal_dump,))
class __TAG__(XmlComponent):
"""
TAG factory example::
>>> print TAG.first(TAG.second('test'), _key = 3)
<first key=\"3\"><second>test</second></first>
"""
def __getitem__(self, name):
return self.__getattr__(name)
def __getattr__(self, name):
if name[-1:] == '_':
name = name[:-1] + '/'
if isinstance(name,unicode):
name = name.encode('utf-8')
class __tag__(DIV):
tag = name
copy_reg.pickle(__tag__, TAG_pickler, TAG_unpickler)
return lambda *a, **b: __tag__(*a, **b)
def __call__(self,html):
return web2pyHTMLParser(decoder.decoder(html)).tree
TAG = __TAG__()
class HTML(DIV):
"""
There are four predefined document type definitions.
They can be specified in the 'doctype' parameter:
-'strict' enables strict doctype
-'transitional' enables transitional doctype (default)
-'frameset' enables frameset doctype
-'html5' enables HTML 5 doctype
-any other string will be treated as user's own doctype
'lang' parameter specifies the language of the document.
Defaults to 'en'.
See also :class:`DIV`
"""
tag = 'html'
strict = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n'
transitional = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n'
frameset = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n'
html5 = '<!DOCTYPE HTML>\n'
def xml(self):
lang = self['lang']
if not lang:
lang = 'en'
self.attributes['_lang'] = lang
doctype = self['doctype']
if doctype:
if doctype == 'strict':
doctype = self.strict
elif doctype == 'transitional':
doctype = self.transitional
elif doctype == 'frameset':
doctype = self.frameset
elif doctype == 'html5':
doctype = self.html5
else:
doctype = '%s\n' % doctype
else:
doctype = self.transitional
(fa, co) = self._xml()
return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag)
class XHTML(DIV):
"""
This is XHTML version of the HTML helper.
There are three predefined document type definitions.
They can be specified in the 'doctype' parameter:
-'strict' enables strict doctype
-'transitional' enables transitional doctype (default)
-'frameset' enables frameset doctype
-any other string will be treated as user's own doctype
'lang' parameter specifies the language of the document and the xml document.
Defaults to 'en'.
'xmlns' parameter specifies the xml namespace.
Defaults to 'http://www.w3.org/1999/xhtml'.
See also :class:`DIV`
"""
tag = 'html'
strict = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
transitional = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
frameset = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">\n'
xmlns = 'http://www.w3.org/1999/xhtml'
def xml(self):
xmlns = self['xmlns']
if xmlns:
self.attributes['_xmlns'] = xmlns
else:
self.attributes['_xmlns'] = self.xmlns
lang = self['lang']
if not lang:
lang = 'en'
self.attributes['_lang'] = lang
self.attributes['_xml:lang'] = lang
doctype = self['doctype']
if doctype:
if doctype == 'strict':
doctype = self.strict
elif doctype == 'transitional':
doctype = self.transitional
elif doctype == 'frameset':
doctype = self.frameset
else:
doctype = '%s\n' % doctype
else:
doctype = self.transitional
(fa, co) = self._xml()
return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag)
class HEAD(DIV):
tag = 'head'
class TITLE(DIV):
tag = 'title'
class META(DIV):
tag = 'meta/'
class LINK(DIV):
tag = 'link/'
class SCRIPT(DIV):
tag = 'script'
def xml(self):
(fa, co) = self._xml()
# no escaping of subcomponents
co = '\n'.join([str(component) for component in
self.components])
if co:
# <script [attributes]><!--//--><![CDATA[//><!--
# script body
# //--><!]]></script>
# return '<%s%s><!--//--><![CDATA[//><!--\n%s\n//--><!]]></%s>' % (self.tag, fa, co, self.tag)
return '<%s%s><!--\n%s\n//--></%s>' % (self.tag, fa, co, self.tag)
else:
return DIV.xml(self)
class STYLE(DIV):
tag = 'style'
def xml(self):
(fa, co) = self._xml()
# no escaping of subcomponents
co = '\n'.join([str(component) for component in
self.components])
if co:
# <style [attributes]><!--/*--><![CDATA[/*><!--*/
# style body
# /*]]>*/--></style>
return '<%s%s><!--/*--><![CDATA[/*><!--*/\n%s\n/*]]>*/--></%s>' % (self.tag, fa, co, self.tag)
else:
return DIV.xml(self)
class IMG(DIV):
tag = 'img/'
class SPAN(DIV):
tag = 'span'
class BODY(DIV):
tag = 'body'
class H1(DIV):
tag = 'h1'
class H2(DIV):
tag = 'h2'
class H3(DIV):
tag = 'h3'
class H4(DIV):
tag = 'h4'
class H5(DIV):
tag = 'h5'
class H6(DIV):
tag = 'h6'
class P(DIV):
"""
Will replace ``\\n`` by ``<br />`` if the `cr2br` attribute is provided.
see also :class:`DIV`
"""
tag = 'p'
def xml(self):
text = DIV.xml(self)
if self['cr2br']:
text = text.replace('\n', '<br />')
return text
class B(DIV):
tag = 'b'
class BR(DIV):
tag = 'br/'
class HR(DIV):
tag = 'hr/'
class A(DIV):
tag = 'a'
def xml(self):
if self['delete']:
d = "jQuery(this).closest('%s').remove();" % self['delete']
else:
d = ''
if self['component']:
self['_onclick']="web2py_component('%s','%s');%sreturn false;" % \
(self['component'],self['target'] or '',d)
self['_href'] = self['_href'] or '#null'
elif self['callback']:
if d:
self['_onclick']="if(confirm(w2p_ajax_confirm_message||'Are you sure you want o delete this object?')){ajax('%s',[],'%s');%s};return false;" % (self['callback'],self['target'] or '',d)
else:
self['_onclick']="ajax('%s',[],'%s');%sreturn false;" % \
(self['callback'],self['target'] or '',d)
self['_href'] = self['_href'] or '#null'
elif self['cid']:
self['_onclick']='web2py_component("%s","%s");%sreturn false;' % \
(self['_href'],self['cid'],d)
return DIV.xml(self)
class BUTTON(DIV):
tag = 'button'
class EM(DIV):
tag = 'em'
class EMBED(DIV):
tag = 'embed/'
class TT(DIV):
tag = 'tt'
class PRE(DIV):
tag = 'pre'
class CENTER(DIV):
tag = 'center'
class CODE(DIV):
"""
displays code in HTML with syntax highlighting.
:param attributes: optional attributes:
- language: indicates the language, otherwise PYTHON is assumed
- link: can provide a link
- styles: for styles
Example::
{{=CODE(\"print 'hello world'\", language='python', link=None,
counter=1, styles={}, highlight_line=None)}}
supported languages are \"python\", \"html_plain\", \"c\", \"cpp\",
\"web2py\", \"html\".
The \"html\" language interprets {{ and }} tags as \"web2py\" code,
\"html_plain\" doesn't.
if a link='/examples/global/vars/' is provided web2py keywords are linked to
the online docs.
the counter is used for line numbering, counter can be None or a prompt
string.
"""
def xml(self):
language = self['language'] or 'PYTHON'
link = self['link']
counter = self.attributes.get('counter', 1)
highlight_line = self.attributes.get('highlight_line', None)
context_lines = self.attributes.get('context_lines', None)
styles = self['styles'] or {}
return highlight(
join(self.components),
language=language,
link=link,
counter=counter,
styles=styles,
attributes=self.attributes,
highlight_line=highlight_line,
context_lines=context_lines,
)
class LABEL(DIV):
tag = 'label'
class LI(DIV):
tag = 'li'
class UL(DIV):
"""
UL Component.
If subcomponents are not LI-components they will be wrapped in a LI
see also :class:`DIV`
"""
tag = 'ul'
def _fixup(self):
self._wrap_components(LI, LI)
class OL(UL):
tag = 'ol'
class TD(DIV):
tag = 'td'
class TH(DIV):
tag = 'th'
class TR(DIV):
"""
TR Component.
If subcomponents are not TD/TH-components they will be wrapped in a TD
see also :class:`DIV`
"""
tag = 'tr'
def _fixup(self):
self._wrap_components((TD, TH), TD)
class THEAD(DIV):
tag = 'thead'
def _fixup(self):
self._wrap_components(TR, TR)
class TBODY(DIV):
tag = 'tbody'
def _fixup(self):
self._wrap_components(TR, TR)
class TFOOT(DIV):
tag = 'tfoot'
def _fixup(self):
self._wrap_components(TR, TR)
class COL(DIV):
tag = 'col'
class COLGROUP(DIV):
tag = 'colgroup'
class TABLE(DIV):
"""
TABLE Component.
If subcomponents are not TR/TBODY/THEAD/TFOOT-components
they will be wrapped in a TR
see also :class:`DIV`
"""
tag = 'table'
def _fixup(self):
self._wrap_components((TR, TBODY, THEAD, TFOOT, COL, COLGROUP), TR)
class I(DIV):
tag = 'i'
class IFRAME(DIV):
tag = 'iframe'
class INPUT(DIV):
"""
INPUT Component
examples::
>>> INPUT(_type='text', _name='name', value='Max').xml()
'<input name=\"name\" type=\"text\" value=\"Max\" />'
>>> INPUT(_type='checkbox', _name='checkbox', value='on').xml()
'<input checked=\"checked\" name=\"checkbox\" type=\"checkbox\" value=\"on\" />'
>>> INPUT(_type='radio', _name='radio', _value='yes', value='yes').xml()
'<input checked=\"checked\" name=\"radio\" type=\"radio\" value=\"yes\" />'
>>> INPUT(_type='radio', _name='radio', _value='no', value='yes').xml()
'<input name=\"radio\" type=\"radio\" value=\"no\" />'
the input helper takes two special attributes value= and requires=.
:param value: used to pass the initial value for the input field.
value differs from _value because it works for checkboxes, radio,
textarea and select/option too.
- for a checkbox value should be '' or 'on'.
- for a radio or select/option value should be the _value
of the checked/selected item.
:param requires: should be None, or a validator or a list of validators
for the value of the field.
"""
tag = 'input/'
def _validate(self):
# # this only changes value, not _value
name = self['_name']
if name is None or name == '':
return True
name = str(name)
if self['_type'] != 'checkbox':
self['old_value'] = self['value'] or self['_value'] or ''
value = self.request_vars.get(name, '')
self['value'] = value
else:
self['old_value'] = self['value'] or False
value = self.request_vars.get(name)
if isinstance(value, (tuple, list)):
self['value'] = self['_value'] in value
else:
self['value'] = self['_value'] == value
requires = self['requires']
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
for validator in requires:
(value, errors) = validator(value)
if not errors is None:
self.vars[name] = value
self.errors[name] = errors
break
if not name in self.errors:
self.vars[name] = value
return True
return False
def _postprocessing(self):
t = self['_type']
if not t:
t = self['_type'] = 'text'
t = t.lower()
value = self['value']
if self['_value'] is None:
_value = None
else:
_value = str(self['_value'])
if '_checked' in self.attributes and not 'value' in self.attributes:
pass
elif t == 'checkbox':
if not _value:
_value = self['_value'] = 'on'
if not value:
value = []
elif value is True:
value = [_value]
elif not isinstance(value,(list,tuple)):
value = str(value).split('|')
self['_checked'] = _value in value and 'checked' or None
elif t == 'radio':
if str(value) == str(_value):
self['_checked'] = 'checked'
else:
self['_checked'] = None
elif t == 'text' or t == 'hidden':
if value is None:
self['value'] = _value
else:
self['_value'] = value
def xml(self):
name = self.attributes.get('_name', None)
if name and hasattr(self, 'errors') \
and self.errors.get(name, None) \
and self['hideerror'] != True:
self['_class'] = (self['_class'] and self['_class']+' ' or '')+'invalidinput'
return DIV.xml(self) + DIV(self.errors[name], _class='error',
errors=None, _id='%s__error' % name).xml()
else:
if self['_class'] and self['_class'].endswith('invalidinput'):
self['_class'] = self['_class'][:-12]
if self['_class'] == '':
self['_class'] = None
return DIV.xml(self)
class TEXTAREA(INPUT):
"""
example::
TEXTAREA(_name='sometext', value='blah '*100, requires=IS_NOT_EMPTY())
'blah blah blah ...' will be the content of the textarea field.
"""
tag = 'textarea'
def _postprocessing(self):
if not '_rows' in self.attributes:
self['_rows'] = 10
if not '_cols' in self.attributes:
self['_cols'] = 40
if not self['value'] is None:
self.components = [self['value']]
elif self.components:
self['value'] = self.components[0]
class OPTION(DIV):
tag = 'option'
def _fixup(self):
if not '_value' in self.attributes:
self.attributes['_value'] = str(self.components[0])
class OBJECT(DIV):
tag = 'object'
class OPTGROUP(DIV):
tag = 'optgroup'
def _fixup(self):
components = []
for c in self.components:
if isinstance(c, OPTION):
components.append(c)
else:
components.append(OPTION(c, _value=str(c)))
self.components = components
class SELECT(INPUT):
"""
example::
>>> from validators import IS_IN_SET
>>> SELECT('yes', 'no', _name='selector', value='yes',
... requires=IS_IN_SET(['yes', 'no'])).xml()
'<select name=\"selector\"><option selected=\"selected\" value=\"yes\">yes</option><option value=\"no\">no</option></select>'
"""
tag = 'select'
def _fixup(self):
components = []
for c in self.components:
if isinstance(c, (OPTION, OPTGROUP)):
components.append(c)
else:
components.append(OPTION(c, _value=str(c)))
self.components = components
def _postprocessing(self):
component_list = []
for c in self.components:
if isinstance(c, OPTGROUP):
component_list.append(c.components)
else:
component_list.append([c])
options = itertools.chain(*component_list)
value = self['value']
if not value is None:
if not self['_multiple']:
for c in options: # my patch
if value and str(c['_value'])==str(value):
c['_selected'] = 'selected'
else:
c['_selected'] = None
else:
if isinstance(value,(list,tuple)):
values = [str(item) for item in value]
else:
values = [str(value)]
for c in options: # my patch
if value and str(c['_value']) in values:
c['_selected'] = 'selected'
else:
c['_selected'] = None
class FIELDSET(DIV):
tag = 'fieldset'
class LEGEND(DIV):
tag = 'legend'
class FORM(DIV):
"""
example::
>>> from validators import IS_NOT_EMPTY
>>> form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY()))
>>> form.xml()
'<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"test\" type=\"text\" /></form>'
a FORM is container for INPUT, TEXTAREA, SELECT and other helpers
form has one important method::
form.accepts(request.vars, session)
if form is accepted (and all validators pass) form.vars contains the
accepted vars, otherwise form.errors contains the errors.
in case of errors the form is modified to present the errors to the user.
"""
tag = 'form'
def __init__(self, *components, **attributes):
DIV.__init__(self, *components, **attributes)
self.vars = Storage()
self.errors = Storage()
self.latest = Storage()
self.accepted = None # none for not submitted
def accepts(
self,
request_vars,
session=None,
formname='default',
keepvalues=False,
onvalidation=None,
hideerror=False,
**kwargs
):
"""
kwargs is not used but allows to specify the same interface for FROM and SQLFORM
"""
if request_vars.__class__.__name__ == 'Request':
request_vars=request_vars.post_vars
self.errors.clear()
self.request_vars = Storage()
self.request_vars.update(request_vars)
self.session = session
self.formname = formname
self.keepvalues = keepvalues
# if this tag is a form and we are in accepting mode (status=True)
# check formname and formkey
status = True
if self.session:
formkey = self.session.get('_formkey[%s]' % self.formname, None)
# check if user tampering with form and void CSRF
if formkey != self.request_vars._formkey:
status = False
if self.formname != self.request_vars._formname:
status = False
if status and self.session:
# check if editing a record that has been modified by the server
if hasattr(self,'record_hash') and self.record_hash != formkey:
status = False
self.record_changed = True
status = self._traverse(status,hideerror)
if onvalidation:
if isinstance(onvalidation, dict):
onsuccess = onvalidation.get('onsuccess', None)
onfailure = onvalidation.get('onfailure', None)
if onsuccess and status:
onsuccess(self)
if onfailure and request_vars and not status:
onfailure(self)
status = len(self.errors) == 0
elif status:
if isinstance(onvalidation, (list, tuple)):
[f(self) for f in onvalidation]
else:
onvalidation(self)
if self.errors:
status = False
if not session is None:
if hasattr(self,'record_hash'):
formkey = self.record_hash
else:
formkey = web2py_uuid()
self.formkey = session['_formkey[%s]' % formname] = formkey
if status and not keepvalues:
self._traverse(False,hideerror)
self.accepted = status
return status
def _postprocessing(self):
if not '_action' in self.attributes:
self['_action'] = ''
if not '_method' in self.attributes:
self['_method'] = 'post'
if not '_enctype' in self.attributes:
self['_enctype'] = 'multipart/form-data'
def hidden_fields(self):
c = []
if 'hidden' in self.attributes:
for (key, value) in self.attributes.get('hidden',{}).items():
c.append(INPUT(_type='hidden', _name=key, _value=value))
if hasattr(self, 'formkey') and self.formkey:
c.append(INPUT(_type='hidden', _name='_formkey',
_value=self.formkey))
if hasattr(self, 'formname') and self.formname:
c.append(INPUT(_type='hidden', _name='_formname',
_value=self.formname))
return DIV(c, _class="hidden")
def xml(self):
newform = FORM(*self.components, **self.attributes)
hidden_fields = self.hidden_fields()
if hidden_fields.components:
newform.append(hidden_fields)
return DIV.xml(newform)
def validate(self,**kwargs):
"""
This function validates the form,
you can use it instead of directly form.accepts.
Usage:
In controller
def action():
form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY()))
form.validate() #you can pass some args here - see below
return dict(form=form)
This can receive a bunch of arguments
onsuccess = 'flash' - will show message_onsuccess in response.flash
None - will do nothing
can be a function (lambda form: pass)
onfailure = 'flash' - will show message_onfailure in response.flash
None - will do nothing
can be a function (lambda form: pass)
message_onsuccess
message_onfailure
next = where to redirect in case of success
any other kwargs will be passed for form.accepts(...)
"""
from gluon import current, redirect
kwargs['request_vars'] = kwargs.get('request_vars',current.request.post_vars)
kwargs['session'] = kwargs.get('session',current.session)
kwargs['dbio'] = kwargs.get('dbio',False) # necessary for SQLHTML forms
onsuccess = kwargs.get('onsuccess','flash')
onfailure = kwargs.get('onfailure','flash')
message_onsuccess = kwargs.get('message_onsuccess',
current.T("Success!"))
message_onfailure = kwargs.get('message_onfailure',
current.T("Errors in form, please check it out."))
next = kwargs.get('next',None)
for key in ('message_onsuccess','message_onfailure','onsuccess',
'onfailure','next'):
if key in kwargs:
del kwargs[key]
if self.accepts(**kwargs):
if onsuccess == 'flash':
if next:
current.session.flash = message_onsuccess
else:
current.response.flash = message_onsuccess
elif callable(onsuccess):
onsuccess(self)
if next:
if self.vars:
for key,value in self.vars.items():
next = next.replace('[%s]' % key,
urllib.quote(str(value)))
if not next.startswith('/'):
next = URL(next)
redirect(next)
return True
elif self.errors:
if onfailure == 'flash':
current.response.flash = message_onfailure
elif callable(onfailure):
onfailure(self)
return False
def process(self, **kwargs):
"""
Perform the .validate() method but returns the form
Usage in controllers:
# directly on return
def action():
#some code here
return dict(form=FORM(...).process(...))
You can use it with FORM, SQLFORM or FORM based plugins
Examples:
#response.flash messages
def action():
form = SQLFORM(db.table).process(message_onsuccess='Sucess!')
retutn dict(form=form)
# callback function
# callback receives True or False as first arg, and a list of args.
def my_callback(status, msg):
response.flash = "Success! "+msg if status else "Errors occured"
# after argument can be 'flash' to response.flash messages
# or a function name to use as callback or None to do nothing.
def action():
return dict(form=SQLFORM(db.table).process(onsuccess=my_callback)
"""
kwargs['dbio'] = kwargs.get('dbio',True) # necessary for SQLHTML forms
self.validate(**kwargs)
return self
class BEAUTIFY(DIV):
"""
example::
>>> BEAUTIFY(['a', 'b', {'hello': 'world'}]).xml()
'<div><table><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;vertical-align:top">hello</td><td valign="top">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>'
turns any list, dictionary, etc into decent looking html.
Two special attributes are
:sorted: a function that takes the dict and returned sorted keys
:keyfilter: a funciton that takes a key and returns its representation
or None if the key is to be skipped. By default key[:1]=='_' is skipped.
"""
tag = 'div'
@staticmethod
def no_underscore(key):
if key[:1]=='_':
return None
return key
def __init__(self, component, **attributes):
self.components = [component]
self.attributes = attributes
sorter = attributes.get('sorted',sorted)
keyfilter = attributes.get('keyfilter',BEAUTIFY.no_underscore)
components = []
attributes = copy.copy(self.attributes)
level = attributes['level'] = attributes.get('level',6) - 1
if '_class' in attributes:
attributes['_class'] += 'i'
if level == 0:
return
for c in self.components:
if hasattr(c,'xml') and callable(c.xml):
components.append(c)
continue
elif hasattr(c,'keys') and callable(c.keys):
rows = []
try:
keys = (sorter and sorter(c)) or c
for key in keys:
if isinstance(key,(str,unicode)) and keyfilter:
filtered_key = keyfilter(key)
else:
filtered_key = str(key)
if filtered_key is None:
continue
value = c[key]
if type(value) == types.LambdaType:
continue
rows.append(TR(TD(filtered_key, _style='font-weight:bold;vertical-align:top'),
TD(':',_valign='top'),
TD(BEAUTIFY(value, **attributes))))
components.append(TABLE(*rows, **attributes))
continue
except:
pass
if isinstance(c, str):
components.append(str(c))
elif isinstance(c, unicode):
components.append(c.encode('utf8'))
elif isinstance(c, (list, tuple)):
items = [TR(TD(BEAUTIFY(item, **attributes)))
for item in c]
components.append(TABLE(*items, **attributes))
elif isinstance(c, cgi.FieldStorage):
components.append('FieldStorage object')
else:
components.append(repr(c))
self.components = components
class MENU(DIV):
"""
Used to build menus
Optional arguments
_class: defaults to 'web2py-menu web2py-menu-vertical'
ul_class: defaults to 'web2py-menu-vertical'
li_class: defaults to 'web2py-menu-expand'
Example:
menu = MENU([['name', False, URL(...), [submenu]], ...])
{{=menu}}
"""
tag = 'ul'
def __init__(self, data, **args):
self.data = data
self.attributes = args
if not '_class' in self.attributes:
self['_class'] = 'web2py-menu web2py-menu-vertical'
if not 'ul_class' in self.attributes:
self['ul_class'] = 'web2py-menu-vertical'
if not 'li_class' in self.attributes:
self['li_class'] = 'web2py-menu-expand'
if not 'li_active' in self.attributes:
self['li_active'] = 'web2py-menu-active'
if not 'mobile' in self.attributes:
self['mobile'] = False
def serialize(self, data, level=0):
if level == 0:
ul = UL(**self.attributes)
else:
ul = UL(_class=self['ul_class'])
for item in data:
(name, active, link) = item[:3]
if isinstance(link,DIV):
li = LI(link)
elif 'no_link_url' in self.attributes and self['no_link_url']==link:
li = LI(DIV(name))
elif link:
li = LI(A(name, _href=link))
else:
li = LI(A(name, _href='#',
_onclick='javascript:void(0);return false;'))
if len(item) > 3 and item[3]:
li['_class'] = self['li_class']
li.append(self.serialize(item[3], level+1))
if active or ('active_url' in self.attributes and self['active_url']==link):
if li['_class']:
li['_class'] = li['_class']+' '+self['li_active']
else:
li['_class'] = self['li_active']
if len(item) <= 4 or item[4] == True:
ul.append(li)
return ul
def serialize_mobile(self, data, select=None, prefix=''):
if not select:
select = SELECT()
for item in data:
if item[2]:
select.append(OPTION(CAT(prefix, item[0]), _value=item[2], _selected=item[1]))
if len(item)>3 and len(item[3]):
self.serialize_mobile(item[3], select, prefix = CAT(prefix, item[0], '/'))
select['_onchange'] = 'window.location=this.value'
return select
def xml(self):
if self['mobile']:
return self.serialize_mobile(self.data, 0).xml()
else:
return self.serialize(self.data, 0).xml()
def embed64(
filename = None,
file = None,
data = None,
extension = 'image/gif',
):
"""
helper to encode the provided (binary) data into base64.
:param filename: if provided, opens and reads this file in 'rb' mode
:param file: if provided, reads this file
:param data: if provided, uses the provided data
"""
if filename and os.path.exists(file):
fp = open(filename, 'rb')
data = fp.read()
fp.close()
data = base64.b64encode(data)
return 'data:%s;base64,%s' % (extension, data)
def test():
"""
Example:
>>> from validators import *
>>> print DIV(A('click me', _href=URL(a='a', c='b', f='c')), BR(), HR(), DIV(SPAN(\"World\"), _class='unknown')).xml()
<div><a href=\"/a/b/c\">click me</a><br /><hr /><div class=\"unknown\"><span>World</span></div></div>
>>> print DIV(UL(\"doc\",\"cat\",\"mouse\")).xml()
<div><ul><li>doc</li><li>cat</li><li>mouse</li></ul></div>
>>> print DIV(UL(\"doc\", LI(\"cat\", _class='feline'), 18)).xml()
<div><ul><li>doc</li><li class=\"feline\">cat</li><li>18</li></ul></div>
>>> print TABLE(['a', 'b', 'c'], TR('d', 'e', 'f'), TR(TD(1), TD(2), TD(3))).xml()
<table><tr><td>a</td><td>b</td><td>c</td></tr><tr><td>d</td><td>e</td><td>f</td></tr><tr><td>1</td><td>2</td><td>3</td></tr></table>
>>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_EXPR('int(value)<10')))
>>> print form.xml()
<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" /></form>
>>> print form.accepts({'myvar':'34'}, formname=None)
False
>>> print form.xml()
<form action="" enctype="multipart/form-data" method="post"><input class="invalidinput" name="myvar" type="text" value="34" /><div class="error" id="myvar__error">invalid expression</div></form>
>>> print form.accepts({'myvar':'4'}, formname=None, keepvalues=True)
True
>>> print form.xml()
<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"4\" /></form>
>>> form=FORM(SELECT('cat', 'dog', _name='myvar'))
>>> print form.accepts({'myvar':'dog'}, formname=None, keepvalues=True)
True
>>> print form.xml()
<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><select name=\"myvar\"><option value=\"cat\">cat</option><option selected=\"selected\" value=\"dog\">dog</option></select></form>
>>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_MATCH('^\w+$', 'only alphanumeric!')))
>>> print form.accepts({'myvar':'as df'}, formname=None)
False
>>> print form.xml()
<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input class=\"invalidinput\" name=\"myvar\" type=\"text\" value=\"as df\" /><div class=\"error\" id=\"myvar__error\">only alphanumeric!</div></form>
>>> session={}
>>> form=FORM(INPUT(value=\"Hello World\", _name=\"var\", requires=IS_MATCH('^\w+$')))
>>> if form.accepts({}, session,formname=None): print 'passed'
>>> if form.accepts({'var':'test ', '_formkey': session['_formkey[None]']}, session, formname=None): print 'passed'
"""
pass
class web2pyHTMLParser(HTMLParser):
"""
obj = web2pyHTMLParser(text) parses and html/xml text into web2py helpers.
obj.tree contains the root of the tree, and tree can be manipulated
>>> str(web2pyHTMLParser('hello<div a="b" c=3>wor<ld<span>xxx</span>y<script/>yy</div>zzz').tree)
'hello<div a="b" c="3">wor<ld<span>xxx</span>y<script></script>yy</div>zzz'
>>> str(web2pyHTMLParser('<div>a<span>b</div>c').tree)
'<div>a<span>b</span></div>c'
>>> tree = web2pyHTMLParser('hello<div a="b">world</div>').tree
>>> tree.element(_a='b')['_c']=5
>>> str(tree)
'hello<div a="b" c="5">world</div>'
"""
def __init__(self,text,closed=('input','link')):
HTMLParser.__init__(self)
self.tree = self.parent = TAG['']()
self.closed = closed
self.tags = [x for x in __all__ if isinstance(eval(x),DIV)]
self.last = None
self.feed(text)
def handle_starttag(self, tagname, attrs):
if tagname.upper() in self.tags:
tag=eval(tagname.upper())
else:
if tagname in self.closed: tagname+='/'
tag = TAG[tagname]()
for key,value in attrs: tag['_'+key]=value
tag.parent = self.parent
self.parent.append(tag)
if not tag.tag.endswith('/'):
self.parent=tag
else:
self.last = tag.tag[:-1]
def handle_data(self,data):
if not isinstance(data,unicode):
try:
data = data.decode('utf8')
except:
data = data.decode('latin1')
self.parent.append(data.encode('utf8','xmlcharref'))
def handle_charref(self,name):
if name[1].lower()=='x':
self.parent.append(unichr(int(name[2:], 16)).encode('utf8'))
else:
self.parent.append(unichr(int(name[1:], 10)).encode('utf8'))
def handle_entityref(self,name):
self.parent.append(unichr(name2codepoint[name]).encode('utf8'))
def handle_endtag(self, tagname):
# this deals with unbalanced tags
if tagname==self.last:
return
while True:
try:
parent_tagname=self.parent.tag
self.parent = self.parent.parent
except:
raise RuntimeError, "unable to balance tag %s" % tagname
if parent_tagname[:len(tagname)]==tagname: break
def markdown_serializer(text,tag=None,attr=None):
attr = attr or {}
if tag is None: return re.sub('\s+',' ',text)
if tag=='br': return '\n\n'
if tag=='h1': return '#'+text+'\n\n'
if tag=='h2': return '#'*2+text+'\n\n'
if tag=='h3': return '#'*3+text+'\n\n'
if tag=='h4': return '#'*4+text+'\n\n'
if tag=='p': return text+'\n\n'
if tag=='b' or tag=='strong': return '**%s**' % text
if tag=='em' or tag=='i': return '*%s*' % text
if tag=='tt' or tag=='code': return '`%s`' % text
if tag=='a': return '[%s](%s)' % (text,attr.get('_href',''))
if tag=='img': return '' % (attr.get('_alt',''),attr.get('_src',''))
return text
def markmin_serializer(text,tag=None,attr=None):
attr = attr or {}
# if tag is None: return re.sub('\s+',' ',text)
if tag=='br': return '\n\n'
if tag=='h1': return '# '+text+'\n\n'
if tag=='h2': return '#'*2+' '+text+'\n\n'
if tag=='h3': return '#'*3+' '+text+'\n\n'
if tag=='h4': return '#'*4+' '+text+'\n\n'
if tag=='p': return text+'\n\n'
if tag=='li': return '\n- '+text.replace('\n',' ')
if tag=='tr': return text[3:].replace('\n',' ')+'\n'
if tag in ['table','blockquote']: return '\n-----\n'+text+'\n------\n'
if tag in ['td','th']: return ' | '+text
if tag in ['b','strong','label']: return '**%s**' % text
if tag in ['em','i']: return "''%s''" % text
if tag in ['tt']: return '``%s``' % text.strip()
if tag in ['code']: return '``\n%s``' % text
if tag=='a': return '[[%s %s]]' % (text,attr.get('_href',''))
if tag=='img': return '[[%s %s left]]' % (attr.get('_alt','no title'),attr.get('_src',''))
return text
class MARKMIN(XmlComponent):
"""
For documentation: http://web2py.com/examples/static/markmin.html
"""
def __init__(self, text, extra=None, allowed=None, sep='p'):
self.text = text
self.extra = extra or {}
self.allowed = allowed or {}
self.sep = sep
def xml(self):
"""
calls the gluon.contrib.markmin render function to convert the wiki syntax
"""
from contrib.markmin.markmin2html import render
return render(self.text,extra=self.extra,allowed=self.allowed,sep=self.sep)
def __str__(self):
return self.xml()
def flatten(self,render=None):
"""
return the text stored by the MARKMIN object rendered by the render function
"""
return self.text
def elements(self, *args, **kargs):
"""
to be considered experimental since the behavior of this method is questionable
another options could be TAG(self.text).elements(*args,**kargs)
"""
return [self.text]
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
"""
Extract client information from http user agent
The module does not try to detect all capabilities of browser in current form (it can easily be extended though).
Aim is
* fast
* very easy to extend
* reliable enough for practical purposes
* and assist python web apps to detect clients.
Taken from http://pypi.python.org/pypi/httpagentparser (MIT license)
Modified my Ross Peoples for web2py to better support iPhone and iPad.
Modified by Angelo Compagnucci <angelo.compagnucci@gmail.com> to better support a wide ringe of mobile devices.
Now it supports: tablet device (is_tablet), BlackBerry, BlackBerry PlayBook, Android Tablets, Windows Mobile,
Symbian.
"""
import sys
class DetectorsHub(dict):
_known_types = ['os', 'dist', 'flavor', 'browser']
def __init__(self, *args, **kw):
dict.__init__(self, *args, **kw)
for typ in self._known_types:
self.setdefault(typ, [])
self.registerDetectors()
def register(self, detector):
if detector.info_type not in self._known_types:
self[detector.info_type] = [detector]
self._known_types.insert(detector.order, detector.info_type)
else:
self[detector.info_type].append(detector)
def reorderByPrefs(self, detectors, prefs):
if prefs is None:
return []
elif prefs == []:
return detectors
else:
prefs.insert(0, '')
def key_name(d):
return d.name in prefs and prefs.index(d.name) or sys.maxint
return sorted(detectors, key=key_name)
def __iter__(self):
return iter(self._known_types)
def registerDetectors(self):
detectors = [v() for v in globals().values() \
if DetectorBase in getattr(v, '__mro__', [])]
for d in detectors:
if d.can_register:
self.register(d)
class DetectorBase(object):
name = "" # "to perform match in DetectorsHub object"
info_type = '' #override me
result_key = '' #override me
order = 10 # 0 is highest
look_for = [] # list of words to look for
skip_if_found = [] # strings if present stop processin
can_register = False
is_mobile = False
is_tablet = False
prefs = dict() # dict(info_type = [name1, name2], ..)
version_splitters = ["/", " "]
_suggested_detectors = None
def __init__(self):
if not self.name:
self.name = self.__class__.__name__
self.can_register = (self.__class__.__dict__.get('can_register', True))
def detect(self, agent, result):
if agent and self.checkWords(agent):
result[self.info_type] = dict(name=self.name)
is_mobile=self.is_mobile
is_tablet=self.is_tablet
if is_mobile:
result['is_mobile'] = is_mobile
if is_tablet:
result['is_tablet'] = is_tablet
version = self.getVersion(agent)
if version:
result[self.info_type]['version'] = version
return True
return False
def checkWords(self, agent):
for w in self.skip_if_found:
if w in agent:
return False
for w in self.look_for:
if not w in agent:
return False
return True
# This works only for the first element of look_for
# If you want a different behaviour, you have to
# override this method
def getVersion(self, agent):
# -> version string /None
vs = self.version_splitters
return agent.partition(self.look_for[0] + vs[0])[2].partition(vs[1])[0].strip()
class OS(DetectorBase):
info_type = "os"
can_register = False
version_splitters = [";", " "]
class Dist(DetectorBase):
info_type = "dist"
can_register = False
class Flavor(DetectorBase):
info_type = "flavor"
can_register = False
class Browser(DetectorBase):
info_type = "browser"
can_register = False
class Macintosh(OS):
look_for = ['Macintosh']
prefs = dict(dist=None)
def getVersion(self, agent):
pass
class Firefox(Browser):
look_for = ["Firefox"]
class Konqueror(Browser):
look_for = ["Konqueror"]
version_splitters = ["/", ";"]
class Opera(Browser):
look_for = ["Opera"]
def getVersion(self, agent):
return agent.partition(self.look_for[0])[2][1:].partition(' ')[0]
class Netscape(Browser):
look_for = ["Netscape"]
class MSIE(Browser):
look_for = ["MSIE"]
skip_if_found = ["Opera"]
name = "Microsoft Internet Explorer"
version_splitters = [" ", ";"]
class Galeon(Browser):
look_for = ["Galeon"]
class Safari(Browser):
look_for = ["Safari"]
skip_if_found = ["Chrome", "OmniWeb", "Mobile", "iPad", 'Android']
def getVersion(self, agent):
if "Version/" in agent:
return agent.partition('Version/')[2].partition(' ')[0].strip()
class SafariTablet(Browser):
name = "Safari"
look_for = ['Safari', 'Android']
skip_if_found = ["Chrome", "OmniWeb", "Mobile", "iPad"]
is_mobile = True
is_tablet = True
def getVersion(self, agent):
if "Version/" in agent:
return agent.partition('Version/')[2].partition(' ')[0].strip()
class SafariMobile(Browser):
name = "Safari"
look_for = ["Safari", "Mobile"]
is_mobile = True
def getVersion(self, agent):
if "Version/" in agent:
return agent.partition('Version/')[2].partition(' ')[0].strip()
class SafariNokia(Browser):
name = "Safari"
look_for = ["Safari", "SymbianOS"]
is_mobile = True
def getVersion(self, agent):
pass
class SafariiPad(Browser):
name = "Safari"
look_for = ["Safari", "iPad"]
skip_if_found = ["Chrome", "OmniWeb"]
is_mobile = True
is_tablet = True
def getVersion(self, agent):
if "Version/" in agent:
return agent.partition('Version/')[2].partition(' ')[0].strip()
class Linux(OS):
look_for = ["Linux"]
prefs = dict(dist=["Ubuntu", "Android", "Debian"], flavor=None)
def getVersion(self, agent):
pass
class BlackBerry(OS):
look_for = ['BlackBerry']
prefs = dict( flavor=['PlayBook'])
is_mobile = True
# Manual check for tablet
def checkWords(self, agent):
if 'BlackBerry' in agent or 'PlayBook' in agent:
return True
return False
def getVersion(self, agent):
pass
class PlayBook(Flavor):
look_for = ['PlayBook']
is_mobile=True
is_tablet=True
def getVersion(self, agent):
return agent.partition('Tablet OS')[2].partition(';')[0].strip()
class Macintosh(OS):
look_for = ['Macintosh']
prefs = dict(dist=None, flavor=['MacOS'])
def getVersion(self, agent):
pass
class MacOS(Flavor):
look_for = ['Mac OS']
prefs = dict(browser=['Safari','SafariMobile', 'SafariIpad', 'Firefox', 'Opera', "Microsoft Internet Explorer"])
def getVersion(self, agent):
version_end_chars = [';', ')']
part = agent.partition('Mac OS')[2].strip()
for c in version_end_chars:
if c in part:
version = part.partition(c)[0]
break
return version.replace('_', '.')
class Windows(OS):
look_for = ['Windows', 'NT']
prefs = dict(browser=["Microsoft Internet Explorer", 'Firefox'],
dist=['WindowsMobile'], flavor=None)
def getVersion(self, agent):
v = agent.partition('NT')
return v[1]+' '+v[2].replace(')',';').partition(';')[0].strip()
class WindowsMobile(Dist):
name = 'Phone'
look_for = ['Windows', 'Phone']
is_mobile = True
def getVersion(self, agent):
return agent.partition('Windows Phone')[2].replace(')','').partition(';')[0].strip()
class Ubuntu(Dist):
look_for = ['Ubuntu']
version_splitters = ["/", " "]
prefs = dict(browser=['Firefox'])
class Debian(Dist):
look_for = ['Debian']
version_splitters = ["/", " "]
prefs = dict(browser=['Firefox'])
class Chrome(Browser):
look_for = ['Chrome']
version_splitters = ["/", " "]
class ChromeOS(OS):
look_for = ['CrOS']
version_splitters = [" ", ")"]
prefs = dict(browser=['Chrome'])
def getVersion(self, agent):
vs = self.version_splitters
return agent.partition(self.look_for[0]+vs[0])[2].partition(vs[1])[0].partition(" ")[2].strip()
class Android(Dist):
look_for = ['Android']
prefs = dict(browser=['SafariTablet', 'SafariMobile'])
is_mobile = True
def getVersion(self, agent):
return agent.partition('Android')[2].partition(';')[0].strip()
class SymbianOS(OS):
look_for = ['SymbianOS']
prefs = dict(dist = ['Series'], browser = ['Safari', 'Opera'])
is_mobile = True
version_splitters = ['/', '; ']
class Series(Flavor):
look_for = ['SymbianOS', 'Series']
version_splitters = ['/', ';']
def getVersion(self, agent):
return agent.partition('Series')[2].partition(' ')[0].replace('/',' ')
class BrowserNG(Browser):
look_for = ['BrowserNG']
version_splitters = ['/', ';']
class iPhone(Dist):
look_for = ['iPhone']
is_mobile = True
prefs = dict(browser=['SafariMobile'])
def getVersion(self, agent):
version_end_chars = ['like', ';', ')']
if (not 'CPU iPhone OS' in agent) and (not 'CPU OS' in agent):
return 'X'
part = agent.partition('OS')[2].strip()
for c in version_end_chars:
if c in part:
version = 'iOS ' + part.partition(c)[0].strip()
break
return version.replace('_', '.')
class iPad(Dist):
look_for = ['iPad']
is_mobile = True
is_tablet = True
def getVersion(self, agent):
version_end_chars = ['like', ';', ')']
if not 'OS' in agent:
return ''
part = agent.partition('OS')[2].strip()
for c in version_end_chars:
if c in part:
version = 'iOS ' + part.partition(c)[0].strip()
break
return version.replace('_', '.')
detectorshub = DetectorsHub()
def detect(agent):
result = dict()
prefs = dict()
result['is_mobile'] = False
result['is_tablet'] = False
for info_type in detectorshub:
detectors = detectorshub[info_type]
_d_prefs = prefs.get(info_type, [])
detectors = detectorshub.reorderByPrefs(detectors, _d_prefs)
try:
for detector in detectors:
if detector.detect(agent, result):
prefs = detector.prefs
break
except Exception, ex:
result['exception'] = ex
return result
class Result(dict):
def __missing__(self, k):
return ""
def simple_detect(agent):
"""
-> (os, browser, is_mobile) # tuple of strings
"""
result = detect(agent)
os_list = []
if 'flavor' in result: os_list.append(result['flavor']['name'])
if 'dist' in result: os_list.append(result['dist']['name'])
if 'os' in result: os_list.append(result['os']['name'])
os = os_list and " ".join(os_list) or "Unknown OS"
os_version = os_list and ('flavor' in result and result['flavor'] and result['flavor'].get(
'version')) or ('dist' in result and result['dist'] and result['dist'].get('version')) \
or ('os' in result and result['os'] and result['os'].get('version')) or ""
browser = 'browser' in result and result['browser']['name'] or 'Unknown Browser'
browser_version = 'browser' in result \
and result['browser'].get('version') or ""
if browser_version:
browser = " ".join((browser, browser_version))
if os_version:
os = " ".join((os, os_version))
#is_mobile = ('dist' in result and result.dist.is_mobile) or ('os' in result and result.os.is_mobile) or False
return os, browser, result['is_mobile']
if __name__ == '__main__':
import time
import unittest
data = (
('Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.83; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
('Series SymbianOS 60 3.1', 'Safari', True),
{'is_mobile': True, 'is_tablet': False, 'flavor': {'name': 'Series', 'version': '60 3.1'}, 'os': {'name': 'SymbianOS', 'version': '9.2'}, 'browser': {'name': 'Safari'}},),
('Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
('Series SymbianOS 60 5.0', 'BrowserNG 7.1.18124', True),
{'is_mobile': True, 'is_tablet': False, 'flavor': {'name': 'Series', 'version': '60 5.0'}, 'os': {'name': 'SymbianOS', 'version': '9.4'}, 'browser': {'name': 'BrowserNG', 'version': '7.1.18124'}},),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Windows Phone 6.5.3.5)',
('Phone Windows 6.5.3.5', 'Microsoft Internet Explorer 6.0', True),
{'is_mobile': True, 'is_tablet': False, 'dist': {'name': 'Phone', 'version': '6.5.3.5'}, 'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '6.0'}},),
('Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+',
('PlayBook BlackBerry 1.0.0', 'Safari 0.0.1', True),
{'is_mobile': True, 'is_tablet': True, 'flavor': {'name': 'PlayBook', 'version': '1.0.0'}, 'os': {'name': 'BlackBerry'}, 'browser': {'name': 'Safari', 'version': '0.0.1'}},),
('Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en-US) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.246 Mobile Safari/534.1+',
('BlackBerry', 'Safari 6.0.0.246', True),
{'is_mobile': True, 'is_tablet': False, 'os': {'name': 'BlackBerry'}, 'browser': {'name': 'Safari', 'version': '6.0.0.246'}},),
('Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/6.0.0.600 Mobile Safari/534.8+',
('BlackBerry', 'Safari 6.0.0.600', True),
{'is_mobile': True, 'is_tablet': False, 'os': {'name': 'BlackBerry'}, 'browser': {'name': 'Safari', 'version': '6.0.0.600'}},),
('Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5',
('MacOS iPad X', 'Safari 5.0.2', True),
{'is_mobile': True, 'is_tablet': True, 'flavor': {'version': 'X', 'name': 'MacOS'}, 'dist': {'version': 'iOS 4.2.1', 'name': 'iPad'}, 'browser': {'name': 'Safari', 'version': '5.0.2'}},),
('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1',
('Windows NT 5.1', 'Netscape 8.1', False),
{'is_mobile': False, 'is_tablet': False, 'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Netscape', 'version': '8.1'}},),
('Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
('Android Linux 3.0.1', 'Safari 4.0', True),
{'is_mobile': True, 'is_tablet': True, 'dist': {'version': '3.0.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},),
('Mozilla/5.0 (Linux; U; Android 2.3.7; it-it; Dream/Sapphire Build/FRG83) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
('Android Linux 2.3.7', 'Safari 4.0', True),
{'is_mobile': True, 'is_tablet': False, 'dist': {'version': '2.3.7', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},),
('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-GB; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10',
('MacOS Macintosh X 10.5', 'Firefox 3.0.10', False),
{ 'is_mobile': False, 'is_tablet': False, 'flavor': {'version': 'X 10.5', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '3.0.10', 'name': 'Firefox'}},),
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.3 Safari/534.24,gzip(gfe)',
('MacOS Macintosh X 10.6.6', 'Chrome 11.0.696.3', False),
{'is_mobile': False, 'is_tablet': False, 'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '11.0.696.3', 'name': 'Chrome'}},),
('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2) Gecko/20100308 Ubuntu/10.04 (lucid) Firefox/3.6 GTB7.1',
('Ubuntu Linux 10.04', 'Firefox 3.6', False),
{'is_mobile': False, 'is_tablet': False, 'dist': {'version': '10.04', 'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '3.6', 'name': 'Firefox'}},),
('Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
('Android Linux 2.2.1', 'Safari 4.0', True),
{'is_mobile': True, 'is_tablet': False, 'dist': {'version': '2.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},),
('Mozilla/5.0 (Linux; U; Android 2.3.4; it-it; LG-P990 Build/GRJ22) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 MMS/LG-Android-MMS-V1.0/1.2',
('Android Linux 2.3.4', 'Safari 4.0', True),
{'is_mobile': True, 'is_tablet': False, 'dist': {'version': '2.3.4', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},),
('Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
('MacOS iPhone X', 'Safari 3.0', True),
{'is_mobile': True, 'is_tablet': False, 'flavor': {'version': 'X', 'name': 'MacOS'}, 'dist': {'version': 'X', 'name': 'iPhone'}, 'browser': {'version': '3.0', 'name': 'Safari'}},),
('Mozilla/5.0 (X11; CrOS i686 0.0.0) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.27 Safari/534.24,gzip(gfe)',
('ChromeOS 0.0.0', 'Chrome 11.0.696.27', False),
{'is_mobile': False, 'is_tablet': False, 'os': {'name': 'ChromeOS', 'version': '0.0.0'}, 'browser': {'name': 'Chrome', 'version': '11.0.696.27'}},),
('Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.1) Opera 7.02 [en]',
('Windows NT 5.1', 'Opera 7.02', False),
{'is_mobile': False, 'is_tablet': False, 'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Opera', 'version': '7.02'}},),
('Opera/9.80 (X11; Linux i686; U; en) Presto/2.9.168 Version/11.50',
('Linux', 'Opera 9.80', False),
{'is_mobile': False, 'is_tablet': False, 'os': {'name': 'Linux'}, 'browser': {'name': 'Opera', 'version': '9.80'}},),
)
class TestHAP(unittest.TestCase):
def setUp(self):
self.harass_repeat = 100
self.data = data
def test_simple_detect(self):
for agent, simple_res, res in data:
self.assertEqual(simple_detect(agent), simple_res)
def test_detect(self):
for agent, simple_res, res in data:
self.assertEqual(detect(agent), res)
def test_harass(self):
then = time.time()
for agent, simple_res, res in data * self.harass_repeat:
detect(agent)
time_taken = time.time() - then
no_of_tests = len(self.data) * self.harass_repeat
print "\nTime taken for %s detecttions: %s" \
% (no_of_tests, time_taken)
print "Time taken for single detecttion: ", \
time_taken / (len(self.data) * self.harass_repeat)
unittest.main()
class mobilize(object):
def __init__(self, func):
self.func = func
def __call__(self):
from gluon import current
user_agent = current.request.user_agent()
if user_agent.is_mobile:
items = current.response.view.split('.')
items.insert(-1,'mobile')
current.response.view = '.'.join(items)
return self.func()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple JSON RPC Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "0.05"
import urllib
from xmlrpclib import Transport, SafeTransport
from cStringIO import StringIO
import random
import sys
try:
import gluon.contrib.simplejson as json # try web2py json serializer
except ImportError:
try:
import json # try stdlib (py2.6)
except:
import simplejson as json # try external module
class JSONRPCError(RuntimeError):
"Error object for remote procedure call fail"
def __init__(self, code, message, data=None):
value = "%s: %s\n%s" % (code, message, '\n'.join(data))
RuntimeError.__init__(self, value)
self.code = code
self.message = message
self.data = data
class JSONDummyParser:
"json wrapper for xmlrpclib parser interfase"
def __init__(self):
self.buf = StringIO()
def feed(self, data):
self.buf.write(data)
def close(self):
return self.buf.getvalue()
class JSONTransportMixin:
"json wrapper for xmlrpclib transport interfase"
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
# todo: add gzip compression
def getparser(self):
# get parser and unmarshaller
parser = JSONDummyParser()
return parser, parser
class JSONTransport(JSONTransportMixin, Transport):
pass
class JSONSafeTransport(JSONTransportMixin, SafeTransport):
pass
class ServerProxy(object):
"JSON RPC Simple Client Service Proxy"
def __init__(self, uri, transport=None, encoding=None, verbose=0):
self.location = uri # server location (url)
self.trace = verbose # show debug messages
self.exceptions = True # raise errors? (JSONRPCError)
self.timeout = None
self.json_request = self.json_response = ''
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError, "unsupported JSON-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if transport is None:
if type == "https":
transport = JSONSafeTransport()
else:
transport = JSONTransport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def __getattr__(self, attr):
"pseudo method that can be called"
return lambda *args: self.call(attr, *args)
def call(self, method, *args):
"JSON RPC communication (method invocation)"
# build data sent to the service
request_id = random.randint(0, sys.maxint)
data = {'id': request_id, 'method': method, 'params': args, }
request = json.dumps(data)
# make HTTP request (retry if connection is lost)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# store plain request and response for further debugging
self.json_request = request
self.json_response = response
# parse json data coming from service
# {'version': '1.1', 'id': id, 'result': result, 'error': None}
response = json.loads(response)
if response['id'] != request_id:
raise JSONRPCError(0, "JSON Request ID != Response ID")
self.error = response.get('error', {})
if self.error and self.exceptions:
raise JSONRPCError(self.error.get('code', 0),
self.error.get('message', ''),
self.error.get('data', None))
return response.get('result')
ServiceProxy = ServerProxy
if __name__ == "__main__":
# basic tests:
location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc"
client = ServerProxy(location, verbose='--verbose' in sys.argv,)
print client.add(1, 2)
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.4 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.0.1"
__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
def _s2bytes(s):
# Convert a UTF-8 str to bytes if the interpreter is Python 3
try:
return bytes(s, 'utf8')
except (NameError, TypeError):
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same (TypeError)
return s
def _l2bytes(l):
# Convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is not str:
# In Python 2.6 and above, this call won't raise an exception
# but it will return bytes([65]) as '[65]' instead of 'A'
return bytes(l)
raise NameError
except NameError:
return ''.join(map(chr, l))
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto',
'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp',
'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import copy
import datetime
import re
import struct
import sys
import time
import types
import urllib
import urllib2
import urlparse
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing, content santizing, and
# microformat support (at least while feedparser depends on BeautifulSoup).
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
# Unfortunately, these must be copied over to prevent NameError exceptions
attrfind = sgmllib.attrfind
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/
# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the
# older 2.x series. If it doesn't, and you can figure out why, I'll accept a
# patch and modify the compatibility statement accordingly.
try:
import BeautifulSoup
except ImportError:
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and link.has_key('href'):
return link['href']
elif key == 'categories':
return [(tag['scheme'], tag['term']) for tag in dict.__getitem__(self, 'tags')]
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
_ebcdic_to_ascii_map = _maketrans( \
_l2bytes(range(256)), _l2bytes(emap))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
#try:
uri = urlparse.urljoin(base, uri)
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
#except:
# uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
# return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
#Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.hasTitle = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ['xhtml:div', 'div']:
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ['xhtml:div', 'div']:
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = u'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = u'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and self.hasTitle:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.hasTitle = 0
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.hasTitle = 0
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and self.feeddata.has_key('image'):
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.hasTitle = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
context = self._getContext()
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
context = self._getContext()
self.hasTitle = 1
_end_dc_title = _end_title
def _end_media_title(self):
hasTitle = self.hasTitle
self._end_title()
self.hasTitle = hasTitle
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.hasTitle = 0
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if not context['media_thumbnail'][-1].has_key('url'):
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = [
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
]
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple:
return []
elif iPropertyType == self.STRING:
return ''
elif iPropertyType == self.DATE:
return None
elif iPropertyType == self.URI:
return ''
elif iPropertyType == self.NODE:
return None
else:
return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a':
sValue = elmResult.get('href')
elif sNodeName == 'img':
sValue = elmResult.get('src')
elif sNodeName == 'object':
sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
# XXX - this is super ugly; properly fix this with issue 148
for i, s in enumerate(arLines):
if not isinstance(s, unicode):
arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'):
return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1:
return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href:
continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
if segments:
tag = segments.pop()
else:
# there are no tags
continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', u'').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup:
return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip()))
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
scheme = urlparse.urlparse(base)[0]
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter((lambda n,v: n.startswith('xlink:')),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except IOError:
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString):
return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0:
return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if not data:
return None
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
# Account for the Etc/GMT timezone by stripping 'Etc/'
elif len(data) == 5 and data[4].lower().startswith('etc/'):
data[4] = data[4][4:]
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
# Jython doesn't adjust for 2-digit years like CPython does,
# so account for it by shifting the year so that it's in the
# range 1970-2069 (1970 being the year of the Unix epoch).
if tm[0] < 100:
tm = (tm[0] + (1900, 2000)[tm[0] < 70],) + tm[1:]
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
charset = params.get('charset', '').replace("'", "")
if not isinstance(charset, unicode):
charset = charset.decode('utf-8', 'ignore')
return content_type, charset
sniffed_xml_encoding = u''
xml_encoding = u''
true_encoding = u''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type')))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]):
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]):
# UTF-16BE
sniffed_xml_encoding = u'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16BE with BOM
sniffed_xml_encoding = u'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
# UTF-16LE
sniffed_xml_encoding = u'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16LE with BOM
sniffed_xml_encoding = u'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
# UTF-32BE
sniffed_xml_encoding = u'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
# UTF-32LE
sniffed_xml_encoding = u'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
# UTF-32BE with BOM
sniffed_xml_encoding = u'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
# UTF-32LE with BOM
sniffed_xml_encoding = u'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
# UTF-8 with BOM
sniffed_xml_encoding = u'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
except UnicodeDecodeError:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
if sniffed_xml_encoding and (xml_encoding in (u'iso-10646-ucs-2', u'ucs-2', u'csunicode', u'iso-10646-ucs-4', u'ucs-4', u'csucs4', u'utf-16', u'utf-32', u'utf_16', u'utf_32', u'utf16', u'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd', u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/')) and http_content_type.endswith(u'+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
true_encoding = http_encoding or u'us-ascii'
elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))):
true_encoding = xml_encoding or u'iso-8859-1'
else:
true_encoding = xml_encoding or u'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == u'gb2312':
true_encoding = u'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
encoding = 'utf-8'
data = data[3:]
elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub(_s2bytes(''), head)
doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if doctype.lower().count(_s2bytes('netscape')):
version = u'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# if feed is gzip-compressed, decompress it
if f and data and 'headers' in result:
if gzip and 'gzip' in (result['headers'].get('content-encoding'), result['headers'].get('Content-Encoding')):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad
# struct.error can occur if the data is damaged
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = None
elif zlib and 'deflate' in (result['headers'].get('content-encoding'), result['headers'].get('Content-Encoding')):
try:
data = zlib.decompress(data)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
# save HTTP headers
if 'headers' in result:
if 'etag' in result['headers'] or 'ETag' in result['headers']:
etag = result['headers'].get('etag', result['headers'].get('ETag', u''))
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']:
modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified'))
if modified:
result['modified'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type') or http_headers.has_key('Content-type'):
bozo_message = '%s is not an XML media type' % http_headers.get('content-type', http_headers.get('Content-type'))
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
if data is not None:
result['version'], data, entities = _stripDoctype(data)
# ensure that baseuri is an absolute uri using an acceptable URI scheme
contentloc = http_headers.get('content-location', http_headers.get('Content-Location', u''))
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', http_headers.get('Content-Language', None))
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = use_strict_parser = 1
break
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and (u'utf-8' not in tried_encodings):
proposed_encoding = u'utf-8'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and (u'windows-1252' not in tried_encodings):
proposed_encoding = u'windows-1252'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and (u'iso-8859-2' not in tried_encodings):
proposed_encoding = u'iso-8859-2'
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
except UnicodeDecodeError:
pass
else:
known_encoding = use_strict_parser = 1
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = u''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'document declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except xml.sax.SAXParseException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
| Python |
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
"""
fcgi - a FastCGI/WSGI gateway.
For more information about FastCGI, see <http://www.fastcgi.com/>.
For more information about the Web Server Gateway Interface, see
<http://www.python.org/peps/pep-0333.html>.
Example usage:
#!/usr/bin/env python
from myapplication import app # Assume app is your WSGI application object
from fcgi import WSGIServer
WSGIServer(app).run()
See the documentation for WSGIServer/Server for more information.
On most platforms, fcgi will fallback to regular CGI behavior if run in a
non-FastCGI context. If you want to force CGI behavior, set the environment
variable FCGI_FORCE_CGI to "Y" or "y".
"""
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision$'
import sys
import os
import signal
import struct
import cStringIO as StringIO
import select
import socket
import errno
import traceback
try:
import thread
import threading
thread_available = True
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
thread_available = False
# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
if not hasattr(socket, 'SHUT_WR'):
socket.SHUT_WR = 1
__all__ = ['WSGIServer']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
if length is not None:
if self._pos + length < newPos:
newPos = self._pos + length
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class MultiplexedInputStream(InputStream):
"""
A version of InputStream meant to be used with MultiplexedConnections.
Assumes the MultiplexedConnection (the producer) and the Request
(the consumer) are running in different threads.
"""
def __init__(self, conn):
super(MultiplexedInputStream, self).__init__(conn)
# Arbitrates access to this InputStream (it's used simultaneously
# by a Request and its owning Connection object).
lock = threading.RLock()
# Notifies Request thread that there is new data available.
self._lock = threading.Condition(lock)
def _waitForData(self):
# Wait for notification from add_data().
self._lock.wait()
def read(self, n=-1):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).read(n)
finally:
self._lock.release()
def readline(self, length=None):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).readline(length)
finally:
self._lock.release()
def add_data(self, data):
self._lock.acquire()
try:
super(MultiplexedInputStream, self).add_data(data)
self._lock.notify()
finally:
self._lock.release()
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = toWrite
rec.contentData = data[:toWrite]
self._conn.writeRecord(rec)
data = data[toWrite:]
length -= toWrite
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
_sendall = staticmethod(_sendall)
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except:
traceback.print_exc(file=self.stderr)
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
(protocolStatus, appStatus))
self._flush()
self._end(appStatus, protocolStatus)
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.close()
self.stderr.close()
class CGIRequest(Request):
"""A normal CGI request disguised as a FastCGI request."""
def __init__(self, server):
# These are normally filled in by Connection.
self.requestId = 1
self.role = FCGI_RESPONDER
self.flags = 0
self.aborted = False
self.server = server
self.params = dict(os.environ)
self.stdin = sys.stdin
self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
self.stderr = sys.stderr
self.data = StringIO.StringIO()
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
sys.exit(appStatus)
def _flush(self):
# Not buffered, do nothing.
pass
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, sock, addr, server):
self._sock = sock
self._addr = addr
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def _cleanupSocket(self):
"""Close the Connection's socket."""
try:
self._sock.shutdown(socket.SHUT_WR)
except:
return
try:
while True:
r, w, e = select.select([self._sock], [], [])
if not r or not self._sock.recv(1024):
break
except:
pass
self._sock.close()
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except EOFError:
break
except (select.error, socket.error), e:
if e[0] == errno.EBADF: # Socket was closed by Request.
break
raise
self._cleanupSocket()
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
while self._keepGoing:
try:
r, w, e = select.select([self._sock], [], [], 1.0)
except ValueError:
# Sigh. ValueError gets thrown sometimes when passing select
# a closed socket.
raise EOFError
if r: break
if not self._keepGoing:
return
rec = Record()
rec.read(self._sock)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._sock)
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
del self._requests[req.requestId]
if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
self._cleanupSocket()
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
else:
self._start_request(req)
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.stdin.add_data(inrec.contentData)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(rec)
class MultiplexedConnection(Connection):
"""
A version of Connection capable of handling multiple requests
simultaneously.
"""
_multiplexed = True
_inputStreamClass = MultiplexedInputStream
def __init__(self, sock, addr, server):
super(MultiplexedConnection, self).__init__(sock, addr, server)
# Used to arbitrate access to self._requests.
lock = threading.RLock()
# Notification is posted everytime a request completes, allowing us
# to quit cleanly.
self._lock = threading.Condition(lock)
def _cleanupSocket(self):
# Wait for any outstanding requests before closing the socket.
self._lock.acquire()
while self._requests:
self._lock.wait()
self._lock.release()
super(MultiplexedConnection, self)._cleanupSocket()
def writeRecord(self, rec):
# Must use locking to prevent intermingling of Records from different
# threads.
self._lock.acquire()
try:
# Probably faster than calling super. ;)
rec.write(self._sock)
finally:
self._lock.release()
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
self._lock.acquire()
try:
super(MultiplexedConnection, self).end_request(req, appStatus,
protocolStatus,
remove)
self._lock.notify()
finally:
self._lock.release()
def _do_begin_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_begin_request(inrec)
finally:
self._lock.release()
def _do_abort_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_abort_request(inrec)
finally:
self._lock.release()
def _start_request(self, req):
thread.start_new_thread(req.run, ())
def _do_params(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_params(inrec)
finally:
self._lock.release()
def _do_stdin(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_stdin(inrec)
finally:
self._lock.release()
def _do_data(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_data(inrec)
finally:
self._lock.release()
class Server(object):
"""
The FastCGI server.
Waits for connections from the web server, processing each
request.
If run in a normal CGI context, it will instead instantiate a
CGIRequest and run the handler through there.
"""
request_class = Request
cgirequest_class = CGIRequest
# Limits the size of the InputStream's string buffer to this size + the
# server's maximum Record size. Since the InputStream is not seekable,
# we throw away already-read data once this certain amount has been read.
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, handler=None, maxwrite=8192, bindAddress=None,
umask=None, multiplexed=False):
"""
handler, if present, must reference a function or method that
takes one argument: a Request object. If handler is not
specified at creation time, Server *must* be subclassed.
(The handler method below is abstract.)
maxwrite is the maximum number of bytes (per Record) to write
to the server. I've noticed mod_fastcgi has a relatively small
receive buffer (8K or so).
bindAddress, if present, must either be a string or a 2-tuple. If
present, run() will open its own listening socket. You would use
this if you wanted to run your application as an 'external' FastCGI
app. (i.e. the webserver would no longer be responsible for starting
your app) If a string, it will be interpreted as a filename and a UNIX
socket will be opened. If a tuple, the first element, a string,
is the interface name/IP to bind to, and the second element (an int)
is the port number.
Set multiplexed to True if you want to handle multiple requests
per connection. Some FastCGI backends (namely mod_fastcgi) don't
multiplex requests at all, so by default this is off (which saves
on thread creation/locking overhead). If threads aren't available,
this keyword is ignored; it's not possible to multiplex requests
at all.
"""
if handler is not None:
self.handler = handler
self.maxwrite = maxwrite
if thread_available:
try:
import resource
# Attempt to glean the maximum number of connections
# from the OS.
maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except ImportError:
maxConns = 100 # Just some made up number.
maxReqs = maxConns
if multiplexed:
self._connectionClass = MultiplexedConnection
maxReqs *= 5 # Another made up number.
else:
self._connectionClass = Connection
self.capability = {
FCGI_MAX_CONNS: maxConns,
FCGI_MAX_REQS: maxReqs,
FCGI_MPXS_CONNS: multiplexed and 1 or 0
}
else:
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self._bindAddress = bindAddress
self._umask = umask
def _setupSocket(self):
if self._bindAddress is None: # Run as a normal FastCGI?
isFCGI = True
sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
socket.SOCK_STREAM)
try:
sock.getpeername()
except socket.error, e:
if e[0] == errno.ENOTSOCK:
# Not a socket, assume CGI context.
isFCGI = False
elif e[0] != errno.ENOTCONN:
raise
# FastCGI/CGI discrimination is broken on Mac OS X.
# Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
# if you want to run your app as a simple CGI. (You can do
# this with Apache's mod_env [not loaded by default in OS X
# client, ha ha] and the SetEnv directive.)
if not isFCGI or \
os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
req = self.cgirequest_class(self)
req.run()
sys.exit(0)
else:
# Run as a server
oldUmask = None
if type(self._bindAddress) is str:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(self._bindAddress)
except OSError:
pass
if self._umask is not None:
oldUmask = os.umask(self._umask)
else:
# INET socket
assert type(self._bindAddress) is tuple
assert len(self._bindAddress) == 2
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self._bindAddress)
sock.listen(socket.SOMAXCONN)
if oldUmask is not None:
os.umask(oldUmask)
return sock
def _cleanupSocket(self, sock):
"""Closes the main socket."""
sock.close()
def _installSignalHandlers(self):
self._oldSIGs = [(x,signal.getsignal(x)) for x in
(signal.SIGHUP, signal.SIGINT, signal.SIGTERM)]
signal.signal(signal.SIGHUP, self._hupHandler)
signal.signal(signal.SIGINT, self._intHandler)
signal.signal(signal.SIGTERM, self._intHandler)
def _restoreSignalHandlers(self):
for signum,handler in self._oldSIGs:
signal.signal(signum, handler)
def _hupHandler(self, signum, frame):
self._hupReceived = True
self._keepGoing = False
def _intHandler(self, signum, frame):
self._keepGoing = False
def run(self, timeout=1.0):
"""
The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
SIGHUP was received, False otherwise.
"""
web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
if web_server_addrs is not None:
web_server_addrs = map(lambda x: x.strip(),
web_server_addrs.split(','))
sock = self._setupSocket()
self._keepGoing = True
self._hupReceived = False
# Install signal handlers.
self._installSignalHandlers()
while self._keepGoing:
try:
r, w, e = select.select([sock], [], [], timeout)
except select.error, e:
if e[0] == errno.EINTR:
continue
raise
if r:
try:
clientSock, addr = sock.accept()
except socket.error, e:
if e[0] in (errno.EINTR, errno.EAGAIN):
continue
raise
if web_server_addrs and \
(len(addr) != 2 or addr[0] not in web_server_addrs):
clientSock.close()
continue
# Instantiate a new Connection and begin processing FastCGI
# messages (either in a new thread or this thread).
conn = self._connectionClass(clientSock, addr, self)
thread.start_new_thread(conn.run, ())
self._mainloopPeriodic()
# Restore signal handlers.
self._restoreSignalHandlers()
self._cleanupSocket(sock)
return self._hupReceived
def _mainloopPeriodic(self):
"""
Called with just about each iteration of the main loop. Meant to
be overridden.
"""
pass
def _exit(self, reload=False):
"""
Protected convenience method for subclasses to force an exit. Not
really thread-safe, which is why it isn't public.
"""
if self._keepGoing:
self._keepGoing = False
self._hupReceived = reload
def handler(self, req):
"""
Default handler, which just raises an exception. Unless a handler
is passed at initialization time, this must be implemented by
a subclass.
"""
raise NotImplementedError, self.__class__.__name__ + '.handler'
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
import cgitb
req.stdout.write('Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()))
class WSGIServer(Server):
"""
FastCGI server that supports the Web Server Gateway Interface. See
<http://www.python.org/peps/pep-0333.html>.
"""
def __init__(self, application, environ=None,
multithreaded=True, **kw):
"""
environ, if present, must be a dictionary-like object. Its
contents will be copied into application's environ. Useful
for passing application-specific variables.
Set multithreaded to False if your application is not MT-safe.
"""
if kw.has_key('handler'):
del kw['handler'] # Doesn't make sense to let this through
super(WSGIServer, self).__init__(**kw)
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
# Used to force single-threadedness
self._app_lock = thread.allocate_lock()
def handler(self, req):
"""Special handler for WSGI."""
if req.role != FCGI_RESPONDER:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1,0)
environ['wsgi.input'] = req.stdin
if self._bindAddress is None:
stderr = req.stderr
else:
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
thread_available and self.multithreaded
# Rationale for the following: If started by the web server
# (self._bindAddress is None) in either FastCGI or CGI mode, the
# possibility of being spawned multiple times simultaneously is quite
# real. And, if started as an external server, multiple copies may be
# spawned for load-balancing/redundancy. (Though I don't think
# mod_fastcgi supports this?)
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = isinstance(req, CGIRequest)
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is str, 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header,value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s)
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __debug__:
for name,val in response_headers:
assert type(name) is str, 'Header names must be strings'
assert type(val) is str, 'Header values must be strings'
headers_set[:] = [status, response_headers]
return write
if not self.multithreaded:
self._app_lock.acquire()
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write('') # in case body was empty
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, e:
if e[0] != errno.EPIPE:
raise # Don't let EPIPE propagate beyond server
finally:
if not self.multithreaded:
self._app_lock.release()
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if not environ.has_key('SCRIPT_NAME'):
environ['SCRIPT_NAME'] = ''
if not environ.has_key('PATH_INFO'):
environ['PATH_INFO'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name,default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if not environ.has_key(name):
environ['wsgi.errors'].write('%s: missing FastCGI param %s '
'required by WSGI!\n' %
(self.__class__.__name__, name))
environ[name] = default
if __name__ == '__main__':
def test_app(environ, start_response):
"""Probably not the most efficient example."""
import cgi
start_response('200 OK', [('Content-Type', 'text/html')])
yield '<html><head><title>Hello World!</title></head>\n' \
'<body>\n' \
'<p>Hello World!</p>\n' \
'<table border="1">'
names = environ.keys()
names.sort()
for name in names:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
name, cgi.escape(`environ[name]`))
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
keep_blank_values=1)
if form.list:
yield '<tr><th colspan="2">Form data</th></tr>'
for field in form.list:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
field.name, field.value)
yield '</table>\n' \
'</body></html>\n'
WSGIServer(test_app).run()
| Python |
#!/usr/bin/env python
# coding: utf8
"""
RPX Authentication for web2py
Developed by Nathan Freeze (Copyright © 2009)
Email <nathan@freezable.com>
Modified by Massimo Di Pierro
This file contains code to allow using RPXNow.com (now Jainrain.com)
services with web2py
"""
import os
import re
import urllib
from gluon import *
from gluon.tools import fetch
from gluon.storage import Storage
import gluon.contrib.simplejson as json
class RPXAccount(object):
"""
from gluon.contrib.login_methods.rpx_account import RPXAccount
auth.settings.actions_disabled=['register','change_password','request_reset_password']
auth.settings.login_form = RPXAccount(request,
api_key="...",
domain="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
"""
def __init__(self,
request,
api_key = "",
domain = "",
url = "",
embed = True,
auth_url = "https://rpxnow.com/api/v2/auth_info",
language= "en",
prompt='rpx',
on_login_failure = None,
):
self.request=request
self.api_key=api_key
self.embed = embed
self.auth_url = auth_url
self.domain = domain
self.token_url = url
self.language = language
self.profile = None
self.prompt = prompt
self.on_login_failure = on_login_failure
self.mappings = Storage()
dn = {'givenName':'','familyName':''}
self.mappings.Facebook = lambda profile, dn=dn:\
dict(registration_id = profile.get("identifier",""),
username = profile.get("preferredUsername",""),
email = profile.get("email",""),
first_name = profile.get("name",dn).get("givenName",""),
last_name = profile.get("name",dn).get("familyName",""))
self.mappings.Google = lambda profile, dn=dn:\
dict(registration_id=profile.get("identifier",""),
username=profile.get("preferredUsername",""),
email=profile.get("email",""),
first_name=profile.get("name",dn).get("givenName",""),
last_name=profile.get("name",dn).get("familyName",""))
self.mappings.default = lambda profile:\
dict(registration_id=profile.get("identifier",""),
username=profile.get("preferredUsername",""),
email=profile.get("email",""),
first_name=profile.get("preferredUsername",""),
last_name='')
def get_user(self):
request = self.request
if request.vars.token:
user = Storage()
data = urllib.urlencode(dict(apiKey = self.api_key, token=request.vars.token))
auth_info_json = fetch(self.auth_url+'?'+data)
auth_info = json.loads(auth_info_json)
if auth_info['stat'] == 'ok':
self.profile = auth_info['profile']
provider = re.sub('[^\w\-]','',self.profile['providerName'])
user = self.mappings.get(provider,self.mappings.default)(self.profile)
return user
elif self.on_login_failure:
redirect(self.on_login_failure)
return None
def login_form(self):
request = self.request
args = request.args
if self.embed:
JANRAIN_URL = \
"https://%s.rpxnow.com/openid/embed?token_url=%s&language_preference=%s"
rpxform = IFRAME(_src=JANRAIN_URL % (self.domain,self.token_url,self.language),
_scrolling="no",
_frameborder="no",
_style="width:400px;height:240px;")
else:
JANRAIN_URL = \
"https://%s.rpxnow.com/openid/v2/signin?token_url=%s"
rpxform = DIV(SCRIPT(_src="https://rpxnow.com/openid/v2/widget",
_type="text/javascript"),
SCRIPT("RPXNOW.overlay = true;",
"RPXNOW.language_preference = '%s';" % self.language,
"RPXNOW.realm = '%s';" % self.domain,
"RPXNOW.token_url = '%s';" % self.token_url,
"RPXNOW.show();",
_type="text/javascript"))
return rpxform
def use_janrain(auth,filename='private/janrain.key',**kwargs):
path = os.path.join(current.request.folder,filename)
if os.path.exists(path):
request = current.request
domain,key = open(path,'r').read().strip().split(':')
host = current.request.env.http_host
url = "http://%s/%s/default/user/login" % (host,request.application)
auth.settings.actions_disabled = \
['register','change_password','request_reset_password']
auth.settings.login_form = RPXAccount(
request, api_key=key,domain=domain, url = url,**kwargs)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>.
License: GPL v2
Thanks to Hans Donner <hans.donner@pobox.com> for GaeGoogleAccount.
"""
from google.appengine.api import users
class GaeGoogleAccount(object):
"""
Login will be done via Google's Appengine login object, instead of web2py's
login form.
Include in your model (eg db.py)::
from gluon.contrib.login_methods.gae_google_account import \
GaeGoogleAccount
auth.settings.login_form=GaeGoogleAccount()
"""
def login_url(self, next="/"):
return users.create_login_url(next)
def logout_url(self, next="/"):
return users.create_logout_url(next)
def get_user(self):
user = users.get_current_user()
if user:
return dict(nickname=user.nickname(), email=user.email(),
user_id=user.user_id(), source="google account")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>.
License: GPL v2
Thanks to Hans Donner <hans.donner@pobox.com> for GaeGoogleAccount.
"""
from gluon.http import HTTP
try:
import linkedin
except ImportError:
raise HTTP(400,"linkedin module not found")
class LinkedInAccount(object):
"""
Login will be done via Google's Appengine login object, instead of web2py's
login form.
Include in your model (eg db.py)::
from gluon.contrib.login_methods.linkedin_account import LinkedInAccount
auth.settings.login_form=LinkedInAccount(request,KEY,SECRET,RETURN_URL)
"""
def __init__(self,request,key,secret,return_url):
self.request = request
self.api = linkedin.LinkedIn(key,secret,return_url)
self.token = result = self.api.requestToken()
def login_url(self, next="/"):
return self.api.getAuthorizeURL(self.token)
def logout_url(self, next="/"):
return ''
def get_user(self):
result = self.request.vars.verifier and self.api.accessToken(verifier = self.request.vars.verifier )
if result:
profile = self.api.GetProfile()
profile = self.api.GetProfile(profile).public_url = "http://www.linkedin.com/in/ozgurv"
return dict(first_name = profile.first_name,
last_name = profile.last_name,
username = profile.id)
| Python |
#!/usr/bin/env python
# coding: utf8
"""
Dropbox Authentication for web2py
Developed by Massimo Di Pierro (2011)
Same License as Web2py License
"""
# mind here session is dropbox session, not current.session
import os
import re
import urllib
from dropbox import client, rest, session
from gluon import *
from gluon.tools import fetch
from gluon.storage import Storage
import gluon.contrib.simplejson as json
class DropboxAccount(object):
"""
from gluon.contrib.login_methods.dropbox_account import DropboxAccount
auth.settings.actions_disabled=['register','change_password','request_reset_password']
auth.settings.login_form = DropboxAccount(request,
key="...",
secret="...",
access_type="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
when logged in
client = auth.settings.login_form.client
"""
def __init__(self,
request,
key = "",
secret = "",
access_type="app_folder",
login_url = "",
on_login_failure=None,
):
self.request=request
self.key=key
self.secret=secret
self.access_type=access_type
self.login_url = login_url
self.on_login_failure = on_login_failure
self.sess = session.DropboxSession(
self.key,self.secret,self.access_type)
def get_user(self):
request = self.request
token = current.session.dropbox_token
try:
access_token = self.sess.obtain_access_token(token)
except:
access_token = None
if access_token:
user = Storage()
self.client = client.DropboxClient(self.sess)
data = self.client.account_info()
display_name = data.get('display_name','').split(' ',1)
user = dict(email = data.get('email',None),
first_name = display_name[0],
last_name = display_name[-1],
registration_id = data.get('uid',None))
if not user['registration_id'] and self.on_login_failure:
redirect(self.on_login_failure)
return user
return None
def login_form(self):
token = self.sess.obtain_request_token()
current.session.dropbox_token = token
dropbox_url = self.sess.build_authorize_url(token,self.login_url)
redirect(dropbox_url)
form = IFRAME(_src=dropbox_url,
_scrolling="no",
_frameborder="no",
_style="width:400px;height:240px;")
return form
def logout_url(self, next = "/"):
current.session.dropbox_token=None
current.session.auth=None
redirect('https://www.dropbox.com/logout')
return next
def put(self,filename,file):
return json.loads(self.client.put_file(filename,file))['bytes']
def get(self,filename,file):
return self.client.get_file(filename)
def dir(self,path):
return json.loads(self.client.metadata(path))
def use_dropbox(auth,filename='private/dropbox.key',**kwargs):
path = os.path.join(current.request.folder,filename)
if os.path.exists(path):
request = current.request
key,secret,access_type = open(path,'r').read().strip().split(':')
host = current.request.env.http_host
login_url = "http://%s/%s/default/user/login" % \
(host,request.application)
auth.settings.actions_disabled = \
['register','change_password','request_reset_password']
auth.settings.login_form = DropboxAccount(
request,key=key,secret=secret,access_type=access_type,
login_url = login_url,**kwargs)
| Python |
# -*- coding: utf-8 -*-
#
# last tinkered with by szimszon at oregpreshaz.eu on 2012-03-19
#
import sys
import logging
try:
import ldap
import ldap.filter
ldap.set_option( ldap.OPT_REFERRALS, 0 )
except Exception, e:
logging.error( 'missing ldap, try "easy_install python-ldap"' )
raise e
def ldap_auth( server = 'ldap', port = None,
base_dn = 'ou=users,dc=domain,dc=com',
mode = 'uid', secure = False, cert_path = None, cert_file = None,
bind_dn = None, bind_pw = None, filterstr = 'objectClass=*',
username_attrib = 'uid',
custom_scope = 'subtree',
allowed_groups = None,
manage_user = False,
user_firstname_attrib = 'cn:1',
user_lastname_attrib = 'cn:2',
user_mail_attrib = 'mail',
manage_groups = False,
db = None,
group_dn = None,
group_name_attrib = 'cn',
group_member_attrib = 'memberUid',
group_filterstr = 'objectClass=*',
logging_level = 'error' ):
"""
to use ldap login with MS Active Directory:
from gluon.contrib.login_methods.ldap_auth import ldap_auth
auth.settings.login_methods.append(ldap_auth(
mode='ad', server='my.domain.controller',
base_dn='ou=Users,dc=domain,dc=com'))
to use ldap login with Notes Domino:
auth.settings.login_methods.append(ldap_auth(
mode='domino',server='my.domino.server'))
to use ldap login with OpenLDAP:
auth.settings.login_methods.append(ldap_auth(
server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com'))
to use ldap login with OpenLDAP and subtree search and (optionally) multiple DNs:
auth.settings.login_methods.append(ldap_auth(
mode='uid_r', server='my.ldap.server',
base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com']))
or (if using CN):
auth.settings.login_methods.append(ldap_auth(
mode='cn', server='my.ldap.server',
base_dn='ou=Users,dc=domain,dc=com'))
or you can full customize the search for user:
auth.settings.login_methods.append(ldap_auth(
mode='custom', server='my.ldap.server',
base_dn='ou=Users,dc=domain,dc=com',
username_attrib='uid',
custom_scope='subtree'))
the custom_scope can be: base, onelevel, subtree.
If using secure ldaps:// pass secure=True and cert_path="..."
If ldap is using GnuTLS then you need cert_file="..." instead cert_path because
cert_path isn't implemented in GnuTLS :(
If you need to bind to the directory with an admin account in order to search it then specify bind_dn & bind_pw to use for this.
- currently only implemented for Active Directory
If you need to restrict the set of allowed users (e.g. to members of a department) then specify
a rfc4515 search filter string.
- currently only implemented for mode in ['ad', 'company', 'uid_r']
You can manage user attribute first name, last name, email from ldap:
auth.settings.login_methods.append(ldap_auth(...as usual...,
manage_user = True,
user_firstname_attrib = 'cn:1',
user_lastname_attrib = 'cn:2',
user_mail_attrib = 'mail'
))
Where:
manage_user - let web2py handle user data from ldap
user_firstname_attrib - the attribute containing the user's first name
optionally you can specify parts.
Example: cn: "John Smith" - 'cn:1' = 'John'
user_lastname_attrib - the attribute containing the user's last name
optionally you can specify parts.
Example: cn: "John Smith" - 'cn:2' = 'Smith'
user_mail_attrib - the attribure containing the user's email address
If you need group control from ldap to web2py app's database feel free to set:
auth.settings.login_methods.append(ldap_auth(...as usual...,
manage_groups = True,
db = db,
group_dn = 'ou=Groups,dc=domain,dc=com',
group_name_attrib = 'cn',
group_member_attrib = 'memberUid',
group_filterstr = 'objectClass=*'
))
Where:
manage_group - let web2py handle the groups from ldap
db - is the database object (need to have auth_user, auth_group, auth_membership)
group_dn - the ldap branch of the groups
group_name_attrib - the attribute where the group name is stored
group_member_attrib - the attribute containing the group members name
group_filterstr - as the filterstr but for group select
You can restrict login access to specific groups if you specify:
auth.settings.login_methods.append(ldap_auth(...as usual...,
allowed_groups = [...],
group_dn = 'ou=Groups,dc=domain,dc=com',
group_name_attrib = 'cn',
group_member_attrib = 'memberUid',
group_filterstr = 'objectClass=*'
))
Where:
allowed_groups - a list with allowed ldap group names
group_dn - the ldap branch of the groups
group_name_attrib - the attribute where the group name is stored
group_member_attrib - the attibute containing the group members name
group_filterstr - as the filterstr but for group select
You can set the logging level with the "logging_level" parameter, default
is "error" and can be set to error, warning, info, debug.
"""
logger = logging.getLogger( 'web2py.auth.ldap_auth' )
if logging_level == 'error':
logger.setLevel( logging.ERROR )
elif logging_level == 'warning':
logger.setLevel( logging.WARNING )
elif logging_level == 'info':
logger.setLevel( logging.INFO )
elif logging_level == 'debug':
logger.setLevel( logging.DEBUG )
def ldap_auth_aux( username,
password,
ldap_server = server,
ldap_port = port,
ldap_basedn = base_dn,
ldap_mode = mode,
ldap_binddn = bind_dn,
ldap_bindpw = bind_pw,
secure = secure,
cert_path = cert_path,
cert_file = cert_file,
filterstr = filterstr,
username_attrib = username_attrib,
custom_scope = custom_scope,
manage_user = manage_user,
user_firstname_attrib = user_firstname_attrib,
user_lastname_attrib = user_lastname_attrib,
user_mail_attrib = user_mail_attrib,
manage_groups = manage_groups,
allowed_groups = allowed_groups,
db = db ):
logger.debug( 'mode: [%s] manage_user: [%s] custom_scope: [%s] manage_groups: [%s]' % (
str( mode ), str( manage_user ), str( custom_scope ), str( manage_groups ) ) )
if manage_user:
if user_firstname_attrib.count( ':' ) > 0:
( user_firstname_attrib, user_firstname_part ) = user_firstname_attrib.split( ':', 1 )
user_firstname_part = ( int( user_firstname_part ) - 1 )
else:
user_firstname_part = None
if user_lastname_attrib.count( ':' ) > 0:
( user_lastname_attrib, user_lastname_part ) = user_lastname_attrib.split( ':', 1 )
user_lastname_part = ( int( user_lastname_part ) - 1 )
else:
user_lastname_part = None
user_firstname_attrib = ldap.filter.escape_filter_chars( user_firstname_attrib )
user_lastname_attrib = ldap.filter.escape_filter_chars( user_lastname_attrib )
user_mail_attrib = ldap.filter.escape_filter_chars( user_mail_attrib )
try:
con = init_ldap()
if allowed_groups:
if not is_user_in_allowed_groups( username ):
return False
if ldap_mode == 'ad':
# Microsoft Active Directory
if '@' not in username:
domain = []
for x in ldap_basedn.split( ',' ):
if "DC=" in x.upper():
domain.append( x.split( '=' )[-1] )
username = "%s@%s" % ( username, '.'.join( domain ) )
username_bare = username.split( "@" )[0]
con.set_option( ldap.OPT_PROTOCOL_VERSION, 3 )
# In cases where ForestDnsZones and DomainDnsZones are found,
# result will look like the following:
# ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,DC=domain,DC=com']
if ldap_binddn:
# need to search directory with an admin account 1st
con.simple_bind_s( ldap_binddn, ldap_bindpw )
else:
# credentials should be in the form of username@domain.tld
con.simple_bind_s( username, password )
# this will throw an index error if the account is not found
# in the ldap_basedn
requested_attrs = ['sAMAccountName']
if manage_user:
requested_attrs.extend( [user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib] )
result = con.search_ext_s(
ldap_basedn, ldap.SCOPE_SUBTREE,
"(&(sAMAccountName=%s)(%s))" % ( ldap.filter.escape_filter_chars( username_bare ),
filterstr ),
requested_attrs )[0][1]
if not isinstance( result, dict ):
# result should be a dict in the form {'sAMAccountName': [username_bare]}
logger.warning( 'User [%s] not found!' % username )
return False
if ldap_binddn:
# We know the user exists & is in the correct OU
# so now we just check the password
con.simple_bind_s( username, password )
if ldap_mode == 'domino':
# Notes Domino
if "@" in username:
username = username.split( "@" )[0]
con.simple_bind_s( username, password )
if manage_user:
# TODO: sorry I have no clue how to query attrs in domino
result = {user_firstname_attrib: username,
user_lastname_attrib: None,
user_mail_attrib: None}
if ldap_mode == 'cn':
# OpenLDAP (CN)
dn = "cn=" + username + "," + ldap_basedn
con.simple_bind_s( dn, password )
if manage_user:
result = con.search_s(
dn, ldap.SCOPE_BASE,
"(objectClass=*)",
[user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib]
)[0][1]
if ldap_mode == 'uid':
# OpenLDAP (UID)
dn = "uid=" + username + "," + ldap_basedn
con.simple_bind_s( dn, password )
if manage_user:
result = con.search_s(
dn, ldap.SCOPE_BASE,
"(objectClass=*)",
[user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib]
)[0][1]
if ldap_mode == 'company':
# no DNs or password needed to search directory
dn = ""
pw = ""
# bind anonymously
con.simple_bind_s( dn, pw )
# search by e-mail address
filter = '(&(mail=' + ldap.filter.escape_filter_chars( username ) + \
')(' + filterstr + '))'
# find the uid
attrs = ['uid']
if manage_user:
attrs.extend( [user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib] )
# perform the actual search
company_search_result = con.search_s( ldap_basedn,
ldap.SCOPE_SUBTREE,
filter, attrs )
dn = company_search_result[0][0]
result = company_search_result[0][1]
# perform the real authentication test
con.simple_bind_s( dn, password )
if ldap_mode == 'uid_r':
# OpenLDAP (UID) with subtree search and multiple DNs
if type( ldap_basedn ) == type( [] ):
basedns = ldap_basedn
else:
basedns = [ldap_basedn]
filter = '(&(uid=%s)(%s))' % ( ldap.filter.escape_filter_chars( username ), filterstr )
finded = False
for basedn in basedns:
try:
result = con.search_s( basedn, ldap.SCOPE_SUBTREE, filter )
if result:
user_dn = result[0][0]
# Check the password
con.simple_bind_s( user_dn, password )
finded = True
break
except ldap.LDAPError, detail:
( exc_type, exc_value ) = sys.exc_info()[:2]
logger.warning( "ldap_auth: searching %s for %s resulted in %s: %s\n" %
( basedn, filter, exc_type, exc_value ) )
if not finded:
logger.warning( 'User [%s] not found!' % username )
return False
result = result[0][1]
if ldap_mode == 'custom':
# OpenLDAP (username_attrs) with subtree search and multiple DNs
if type( ldap_basedn ) == type( [] ):
basedns = ldap_basedn
else:
basedns = [ldap_basedn]
filter = '(&(%s=%s)(%s))' % ( username_attrib, ldap.filter.escape_filter_chars( username ), filterstr )
if custom_scope == 'subtree':
ldap_scope = ldap.SCOPE_SUBTREE
elif custom_scope == 'base':
ldap_scope = ldap.SCOPE_BASE
elif custom_scope == 'onelevel':
ldap_scope = ldap.SCOPE_ONELEVEL
finded = False
for basedn in basedns:
try:
result = con.search_s( basedn, ldap_scope, filter )
if result:
user_dn = result[0][0]
# Check the password
con.simple_bind_s( user_dn, password )
finded = True
break
except ldap.LDAPError, detail:
( exc_type, exc_value ) = sys.exc_info()[:2]
logger.warning( "ldap_auth: searching %s for %s resulted in %s: %s\n" %
( basedn, filter, exc_type, exc_value ) )
if not finded:
logger.warning( 'User [%s] not found!' % username )
return False
result = result[0][1]
if manage_user:
logger.info( '[%s] Manage user data' % str( username ) )
try:
if not user_firstname_part == None:
store_user_firstname = result[user_firstname_attrib][0].split( ' ', 1 )[user_firstname_part]
else:
store_user_firstname = result[user_firstname_attrib][0]
except KeyError, e:
store_user_firstname = None
try:
if not user_lastname_part == None:
store_user_lastname = result[user_lastname_attrib][0].split( ' ', 1 )[user_lastname_part]
else:
store_user_lastname = result[user_lastname_attrib][0]
except KeyError, e:
store_user_lastname = None
try:
store_user_mail = result[user_mail_attrib][0]
except KeyError, e:
store_user_mail = None
try:
#
# user as username
# #################
user_in_db = db( db.auth_user.username == username )
if user_in_db.count() > 0:
user_in_db.update( first_name = store_user_firstname,
last_name = store_user_lastname,
email = store_user_mail )
else:
db.auth_user.insert( first_name = store_user_firstname,
last_name = store_user_lastname,
email = store_user_mail,
username = username )
except:
#
# user as email
# ##############
user_in_db = db( db.auth_user.email == username )
if user_in_db.count() > 0:
user_in_db.update( first_name = store_user_firstname,
last_name = store_user_lastname,
)
else:
db.auth_user.insert( first_name = store_user_firstname,
last_name = store_user_lastname,
email = username
)
con.unbind()
if manage_groups:
if not do_manage_groups( username ):
return False
return True
except ldap.LDAPError, e:
import traceback
logger.warning( '[%s] Error in ldap processing' % str( username ) )
logger.debug( traceback.format_exc() )
return False
except IndexError, ex: # for AD membership test
import traceback
logger.warning( '[%s] Ldap result indexing error' % str( username ) )
logger.debug( traceback.format_exc() )
return False
def is_user_in_allowed_groups( username,
allowed_groups = allowed_groups
):
'''
Figure out if the username is a member of an allowed group in ldap or not
'''
#
# Get all group name where the user is in actually in ldap
# #########################################################
ldap_groups_of_the_user = get_user_groups_from_ldap( username )
# search for allowed group names
if type( allowed_groups ) != type( list() ):
allowed_groups = [allowed_groups]
for group in allowed_groups:
if ldap_groups_of_the_user.count( group ) > 0:
# Match
return True
# No match
return False
def do_manage_groups( username,
db = db,
):
'''
Manage user groups
Get all user's group from ldap and refresh the already stored
ones in web2py's application database or create new groups
according to ldap.
'''
logger.info( '[%s] Manage user groups' % str( username ) )
try:
#
# Get all group name where the user is in actually in ldap
# #########################################################
ldap_groups_of_the_user = get_user_groups_from_ldap( username )
#
# Get all group name where the user is in actually in local db
# #############################################################
try:
db_user_id = db( db.auth_user.username == username ).select( db.auth_user.id ).first().id
except:
try:
db_user_id = db( db.auth_user.email == username ).select( db.auth_user.id ).first().id
except AttributeError, e:
#
# There is no user in local db
# We create one
# ##############################
try:
db_user_id = db.auth_user.insert( username = username,
first_name = username )
except AttributeError, e:
db_user_id = db.auth_user.insert( email = username,
first_name = username )
if not db_user_id:
logging.error( 'There is no username or email for %s!' % username )
raise
db_group_search = db( ( db.auth_membership.user_id == db_user_id ) & \
( db.auth_user.id == db.auth_membership.user_id ) & \
( db.auth_group.id == db.auth_membership.group_id ) )
db_groups_of_the_user = list()
db_group_id = dict()
if db_group_search.count() > 0:
for group in db_group_search.select( db.auth_group.id, db.auth_group.role, distinct = True ):
db_group_id[group.role] = group.id
db_groups_of_the_user.append( group.role )
logging.debug( 'db groups of user %s: %s' % ( username, str( db_groups_of_the_user ) ) )
#
# Delete user membership from groups where user is not anymore
# #############################################################
for group_to_del in db_groups_of_the_user:
if ldap_groups_of_the_user.count( group_to_del ) == 0:
db( ( db.auth_membership.user_id == db_user_id ) & \
( db.auth_membership.group_id == db_group_id[group_to_del] ) ).delete()
#
# Create user membership in groups where user is not in already
# ##############################################################
for group_to_add in ldap_groups_of_the_user:
if db_groups_of_the_user.count( group_to_add ) == 0:
if db( db.auth_group.role == group_to_add ).count() == 0:
gid = db.auth_group.insert( role = group_to_add,
description = 'Generated from LDAP' )
else:
gid = db( db.auth_group.role == group_to_add ).select( db.auth_group.id ).first().id
db.auth_membership.insert( user_id = db_user_id,
group_id = gid )
except:
logger.warning( "[%s] Groups are not managed successully!" % str( username ) )
import traceback
logger.debug( traceback.format_exc() )
return False
return True
def init_ldap(
ldap_server = server,
ldap_port = port,
ldap_basedn = base_dn,
ldap_mode = mode,
secure = secure,
cert_path = cert_path,
cert_file = cert_file
):
'''
Inicialize ldap connection
'''
logger.info( '[%s] Inicialize ldap connection' % str( ldap_server ) )
if secure:
if not ldap_port:
ldap_port = 636
con = ldap.initialize(
"ldaps://" + ldap_server + ":" + str( ldap_port ) )
if cert_path:
con.set_option( ldap.OPT_X_TLS_CACERTDIR, cert_path )
if cert_file:
con.set_option( ldap.OPT_X_TLS_CACERTFILE, cert_file )
else:
if not ldap_port:
ldap_port = 389
con = ldap.initialize(
"ldap://" + ldap_server + ":" + str( ldap_port ) )
return con
def get_user_groups_from_ldap( username,
base_dn = base_dn,
ldap_binddn = bind_dn,
ldap_bindpw = bind_pw,
group_dn = group_dn,
group_name_attrib = group_name_attrib,
group_member_attrib = group_member_attrib,
group_filterstr = group_filterstr,
):
'''
Get all group names from ldap where the user is in
'''
logger.info( '[%s] Get user groups from ldap' % str( username ) )
#
# Get all group name where the user is in actually in ldap
# #########################################################
# Inicialize ldap
if not group_dn:
group_dn = base_dn
con = init_ldap()
if ldap_binddn:
# need to search directory with an bind_dn account 1st
con.simple_bind_s( ldap_binddn, ldap_bindpw )
else:
# bind as anonymous
con.simple_bind_s( '', '' )
# search for groups where user is in
filter = '(&(%s=%s)(%s))' % ( ldap.filter.escape_filter_chars( group_member_attrib ),
ldap.filter.escape_filter_chars( username ),
group_filterstr )
group_search_result = con.search_s( group_dn,
ldap.SCOPE_SUBTREE,
filter, [group_name_attrib] )
ldap_groups_of_the_user = list()
for group_row in group_search_result:
group = group_row[1]
ldap_groups_of_the_user.extend( group[group_name_attrib] )
con.unbind()
return list( ldap_groups_of_the_user )
if filterstr[0] == '(' and filterstr[-1] == ')': # rfc4515 syntax
filterstr = filterstr[1:-1] # parens added again where used
return ldap_auth_aux
| Python |
#!/usr/bin/env python
# coding: utf8
"""
OpenID authentication for web2py
Allowed using OpenID login together with web2py built-in login.
By default, to support OpenID login, put this in your db.py
>>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth
>>> auth.settings.login_form = OpenIDAuth(auth)
To show OpenID list in user profile, you can add the following code
before the end of function user() of your_app/controllers/default.py
+ if (request.args and request.args(0) == "profile"):
+ form = DIV(form, openid_login_form.list_user_openids())
return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
More detail in the description of the class OpenIDAuth.
Requirements:
python-openid version 2.2.5 or later
Reference:
* w2p openID
http://w2popenid.appspot.com/init/default/wiki/w2popenid
* RPX and web2py auth module
http://www.web2pyslices.com/main/slices/take_slice/28
* built-in file: gluon/contrib/login_methods/rpx_account.py
* built-in file: gluon/tools.py (Auth class)
"""
import time
from datetime import datetime, timedelta
from gluon import *
from gluon.storage import Storage, Messages
try:
import openid.consumer.consumer
from openid.association import Association
from openid.store.interface import OpenIDStore
from openid.extensions.sreg import SRegRequest, SRegResponse
from openid.store import nonce
from openid.consumer.discover import DiscoveryFailure
except ImportError, err:
raise ImportError("OpenIDAuth requires python-openid package")
DEFAULT = lambda: None
class OpenIDAuth(object):
"""
OpenIDAuth
It supports the logout_url, implementing the get_user and login_form
for cas usage of gluon.tools.Auth.
It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods
combined with the standard logon/register procedure.
It uses OpenID Consumer when render the form and begins the OpenID
authentication.
Example: (put these code after auth.define_tables() in your models.)
auth = Auth(globals(), db) # authentication/authorization
...
auth.define_tables() # creates all needed tables
...
#include in your model after auth has been defined
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(request, auth, db)
from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
extended_login_form = ExtendedLoginForm(request, auth, openid_login_form,
signals=['oid','janrain_nonce'])
auth.settings.login_form = extended_login_form
"""
def __init__(self, auth):
self.auth = auth
self.db = auth.db
request = current.request
self.nextvar = '_next'
self.realm = 'http://%s' % request.env.http_host
self.login_url = URL(r=request, f='user', args=['login'])
self.return_to_url = self.realm + self.login_url
self.table_alt_logins_name = "alt_logins"
if not auth.settings.table_user:
raise
self.table_user = self.auth.settings.table_user
self.openid_expiration = 15 #minutes
self.messages = self._define_messages()
if not self.table_alt_logins_name in self.db.tables:
self._define_alt_login_table()
def _define_messages(self):
messages = Messages(current.T)
messages.label_alt_login_username = 'Sign-in with OpenID: '
messages.label_add_alt_login_username = 'Add a new OpenID: '
messages.submit_button = 'Sign in'
messages.submit_button_add = 'Add'
messages.a_delete = 'Delete'
messages.comment_openid_signin = 'What is OpenID?'
messages.comment_openid_help_title = 'Start using your OpenID'
messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/'
messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?'
messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.'
messages.flash_openid_associated = 'OpenID associated'
messages.flash_associate_openid = 'Please login or register an account for this OpenID.'
messages.p_openid_not_registered = "This Open ID haven't be registered. " \
+ "Please login to associate with it or register an account for it."
messages.flash_openid_authenticated = 'OpenID authenticated successfully.'
messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)'
messages.flash_openid_canceled = 'OpenID authentication canceled by user.'
messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.'
messages.h_openid_login = 'OpenID Login'
messages.h_openid_list = 'OpenID List'
return messages
def _define_alt_login_table(self):
"""
Define the OpenID login table.
Note: type is what I used for our project. We're going to support 'fackbook' and
'plurk' alternate login methods. Otherwise it's always 'openid' and you
may not need it. This should be easy to changed.
(Just remove the field of "type" and remove the
"and db.alt_logins.type == type_" in _find_matched_openid function)
"""
db = self.db
table = db.define_table(
self.table_alt_logins_name,
Field('username', length=512, default=''),
Field('type', length=128, default='openid', readable=False),
Field('user', self.table_user, readable=False),
)
table.username.requires = IS_NOT_IN_DB(db, table.username)
self.table_alt_logins = table
def logout_url(self, next):
"""
Delete the w2popenid record in session as logout
"""
if current.session.w2popenid:
del(current.session.w2popenid)
return next
def login_form(self):
"""
Start to process the OpenID response if 'janrain_nonce' in request parameters
and not processed yet. Else return the OpenID form for login.
"""
request = current.request
if request.vars.has_key('janrain_nonce') and not self._processed():
self._process_response()
return self.auth()
return self._form()
def get_user(self):
"""
It supports the logout_url, implementing the get_user and login_form
for cas usage of gluon.tools.Auth.
"""
request = current.request
args = request.args
if args[0] == 'logout':
return True # Let logout_url got called
if current.session.w2popenid:
w2popenid = current.session.w2popenid
db = self.db
if (w2popenid.ok is True and w2popenid.oid): # OpenID authenticated
if self._w2popenid_expired(w2popenid):
del(current.session.w2popenid)
flash = self.messages.flash_openid_expired
current.session.warning = flash
redirect(self.auth.settings.login_url)
oid = self._remove_protocol(w2popenid.oid)
alt_login = self._find_matched_openid(db, oid)
nextvar = self.nextvar
# This OpenID not in the database. If user logged in then add it
# into database, else ask user to login or register.
if not alt_login:
if self.auth.is_logged_in():
# TODO: ask first maybe
self._associate_user_openid(self.auth.user, oid)
if current.session.w2popenid:
del(current.session.w2popenid)
current.session.flash = self.messages.flash_openid_associated
if request.vars.has_key(nextvar):
redirect(request.vars[nextvar])
redirect(self.auth.settings.login_next)
if not request.vars.has_key(nextvar):
# no next var, add it and do login again
# so if user login or register can go back here to associate the OpenID
redirect(URL(r=request,
args=['login'],
vars={nextvar:self.login_url}))
self.login_form = self._form_with_notification()
current.session.flash = self.messages.flash_associate_openid
return None # need to login or register to associate this openid
# Get existed OpenID user
user = db(self.table_user.id==alt_login.user).select().first()
if user:
if current.session.w2popenid:
del(current.session.w2popenid)
if 'username' in self.table_user.fields():
username = 'username'
elif 'email' in self.table_user.fields():
username = 'email'
return {username: user[username]} if user else None # login success (almost)
return None # just start to login
def _find_matched_openid(self, db, oid, type_='openid'):
"""
Get the matched OpenID for given
"""
query = ((db.alt_logins.username == oid) & (db.alt_logins.type == type_))
alt_login = db(query).select().first() # Get the OpenID record
return alt_login
def _associate_user_openid(self, user, oid):
"""
Associate the user logged in with given OpenID
"""
# print "[DB] %s authenticated" % oid
self.db.alt_logins.insert(username=oid, user=user.id)
def _form_with_notification(self):
"""
Render the form for normal login with a notice of OpenID authenticated
"""
form = DIV()
# TODO: check when will happen
if self.auth.settings.login_form in (self.auth, self):
self.auth.settings.login_form = self.auth
form = DIV(self.auth())
register_note = DIV(P(self.messages.p_openid_not_registered))
form.components.append(register_note)
return lambda: form
def _remove_protocol(self, oid):
"""
Remove https:// or http:// from oid url
"""
protocol = 'https://'
if oid.startswith(protocol):
oid = oid[len(protocol):]
return oid
protocol = 'http://'
if oid.startswith(protocol):
oid = oid[len(protocol):]
return oid
return oid
def _init_consumerhelper(self):
"""
Initialize the ConsumerHelper
"""
if not hasattr(self, "consumerhelper"):
self.consumerhelper = ConsumerHelper(current.session,
self.db)
return self.consumerhelper
def _form(self, style=None):
form = DIV(H3(self.messages.h_openid_login), self._login_form(style))
return form
def _login_form(self,
openid_field_label=None,
submit_button=None,
_next=None,
style=None):
"""
Render the form for OpenID login
"""
def warning_openid_fail(session):
session.warning = messages.openid_fail_discover
style = style or """
background-attachment: scroll;
background-repeat: no-repeat;
background-image: url("http://wiki.openid.net/f/openid-16x16.gif");
background-position: 0% 50%;
background-color: transparent;
padding-left: 18px;
width: 400px;
"""
style = style.replace("\n","")
request = current.request
session = current.session
messages = self.messages
hidden_next_input = ""
if _next == 'profile':
profile_url = URL(r=request, f='user', args=['profile'])
hidden_next_input = INPUT(_type="hidden", _name="_next", _value=profile_url)
form = FORM(openid_field_label or self.messages.label_alt_login_username,
INPUT(_type="input", _name="oid",
requires=IS_NOT_EMPTY(error_message=messages.openid_fail_discover),
_style=style),
hidden_next_input,
INPUT(_type="submit", _value=submit_button or messages.submit_button),
" ",
A(messages.comment_openid_signin,
_href=messages.comment_openid_help_url,
_title=messages.comment_openid_help_title,
_class='openid-identifier',
_target="_blank"),
_action=self.login_url
)
if form.accepts(request.vars, session):
oid = request.vars.oid
consumerhelper = self._init_consumerhelper()
url = self.login_url
return_to_url = self.return_to_url
if not oid:
warning_openid_fail(session)
redirect(url)
try:
if request.vars.has_key('_next'):
return_to_url = self.return_to_url + '?_next=' + request.vars._next
url = consumerhelper.begin(oid, self.realm, return_to_url)
except DiscoveryFailure:
warning_openid_fail(session)
redirect(url)
return form
def _processed(self):
"""
Check if w2popenid authentication is processed.
Return True if processed else False.
"""
processed = (hasattr(current.session, 'w2popenid') and
current.session.w2popenid.ok is True)
return processed
def _set_w2popenid_expiration(self, w2popenid):
"""
Set expiration for OpenID authentication.
"""
w2popenid.expiration = datetime.now() + timedelta(minutes=self.openid_expiration)
def _w2popenid_expired(self, w2popenid):
"""
Check if w2popenid authentication is expired.
Return True if expired else False.
"""
return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration)
def _process_response(self):
"""
Process the OpenID by ConsumerHelper.
"""
request = current.request
request_vars = request.vars
consumerhelper = self._init_consumerhelper()
process_status = consumerhelper.process_response(request_vars, self.return_to_url)
if process_status == "success":
w2popenid = current.session.w2popenid
user_data = self.consumerhelper.sreg()
current.session.w2popenid.ok = True
self._set_w2popenid_expiration(w2popenid)
w2popenid.user_data = user_data
current.session.flash = self.messages.flash_openid_authenticated
elif process_status == "failure":
flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message
current.session.warning = flash
elif process_status == "cancel":
current.session.warning = self.messages.flash_openid_canceled
elif process_status == "setup_needed":
current.session.warning = self.messages.flash_openid_need_setup
def list_user_openids(self):
messages = self.messages
request = current.request
if request.vars.has_key('delete_openid'):
self.remove_openid(request.vars.delete_openid)
query = self.db.alt_logins.user == self.auth.user.id
alt_logins = self.db(query).select()
l = []
for alt_login in alt_logins:
username = alt_login.username
delete_href = URL(r=request, f='user',
args=['profile'],
vars={'delete_openid': username})
delete_link = A(messages.a_delete, _href=delete_href)
l.append(LI(username, " ", delete_link))
profile_url = URL(r=request, f='user', args=['profile'])
#return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url
openid_list = DIV(H3(messages.h_openid_list), UL(l),
self._login_form(
_next='profile',
submit_button=messages.submit_button_add,
openid_field_label=messages.label_add_alt_login_username)
)
return openid_list
def remove_openid(self, openid):
query = self.db.alt_logins.username == openid
self.db(query).delete()
class ConsumerHelper(object):
"""
ConsumerHelper knows the python-openid and
"""
def __init__(self, session, db):
self.session = session
store = self._init_store(db)
self.consumer = openid.consumer.consumer.Consumer(session, store)
def _init_store(self, db):
"""
Initialize Web2pyStore
"""
if not hasattr(self, "store"):
store = Web2pyStore(db)
session = self.session
if not session.has_key('w2popenid'):
session.w2popenid = Storage()
self.store = store
return self.store
def begin(self, oid, realm, return_to_url):
"""
Begin the OpenID authentication
"""
w2popenid = self.session.w2popenid
w2popenid.oid = oid
auth_req = self.consumer.begin(oid)
auth_req.addExtension(SRegRequest(required=['email','nickname']))
url = auth_req.redirectURL(return_to=return_to_url, realm=realm)
return url
def process_response(self, request_vars, return_to_url):
"""
Complete the process and
"""
resp = self.consumer.complete(request_vars, return_to_url)
if resp:
if resp.status == openid.consumer.consumer.SUCCESS:
self.resp = resp
if hasattr(resp, "identity_url"):
self.session.w2popenid.oid = resp.identity_url
return "success"
if resp.status == openid.consumer.consumer.FAILURE:
self.error_message = resp.message
return "failure"
if resp.status == openid.consumer.consumer.CANCEL:
return "cancel"
if resp.status == openid.consumer.consumer.SETUP_NEEDED:
return "setup_needed"
return "no resp"
def sreg(self):
"""
Try to get OpenID Simple Registation
http://openid.net/specs/openid-simple-registration-extension-1_0.html
"""
if self.resp:
resp = self.resp
sreg_resp = SRegResponse.fromSuccessResponse(resp)
return sreg_resp.data if sreg_resp else None
else:
return None
class Web2pyStore(OpenIDStore):
"""
Web2pyStore
This class implements the OpenIDStore interface. OpenID stores take care
of persisting nonces and associations. The Janrain Python OpenID library
comes with implementations for file and memory storage. Web2pyStore uses
the web2py db abstration layer. See the source code docs of OpenIDStore
for a comprehensive description of this interface.
"""
def __init__(self, database):
self.database = database
self.table_oid_associations_name = 'oid_associations'
self.table_oid_nonces_name = 'oid_nonces'
self._initDB()
def _initDB(self):
if self.table_oid_associations_name not in self.database:
self.database.define_table(self.table_oid_associations_name,
Field('server_url', 'string', length=2047, required=True),
Field('handle', 'string', length=255, required=True),
Field('secret', 'blob', required=True),
Field('issued', 'integer', required=True),
Field('lifetime', 'integer', required=True),
Field('assoc_type', 'string', length=64, required=True)
)
if self.table_oid_nonces_name not in self.database:
self.database.define_table(self.table_oid_nonces_name,
Field('server_url', 'string', length=2047, required=True),
Field('timestamp', 'integer', required=True),
Field('salt', 'string', length=40, required=True)
)
def storeAssociation(self, server_url, association):
"""
Store associations. If there already is one with the same
server_url and handle in the table replace it.
"""
db = self.database
query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == association.handle)
db(query).delete()
db.oid_associations.insert(server_url = server_url,
handle = association.handle,
secret = association.secret,
issued = association.issued,
lifetime = association.lifetime,
assoc_type = association.assoc_type), 'insert '*10
def getAssociation(self, server_url, handle=None):
"""
Return the association for server_url and handle. If handle is
not None return the latests associations for that server_url.
Return None if no association can be found.
"""
db = self.database
query = (db.oid_associations.server_url == server_url)
if handle:
query &= (db.oid_associations.handle == handle)
rows = db(query).select(orderby=db.oid_associations.issued)
keep_assoc, _ = self._removeExpiredAssocations(rows)
if len(keep_assoc) == 0:
return None
else:
assoc = keep_assoc.pop() # pop the last one as it should be the latest one
return Association(assoc['handle'],
assoc['secret'],
assoc['issued'],
assoc['lifetime'],
assoc['assoc_type'])
def removeAssociation(self, server_url, handle):
db = self.database
query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == handle)
return db(query).delete() != None
def useNonce(self, server_url, timestamp, salt):
"""
This method returns Falase if a nonce has been used before or its
timestamp is not current.
"""
db = self.database
if abs(timestamp - time.time()) > nonce.SKEW:
return False
query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.timestamp == timestamp) & (db.oid_nonces.salt == salt)
if db(query).count() > 0:
return False
else:
db.oid_nonces.insert(server_url = server_url,
timestamp = timestamp,
salt = salt)
return True
def _removeExpiredAssocations(self, rows):
"""
This helper function is not part of the interface. Given a list of
association rows it checks which associations have expired and
deletes them from the db. It returns a tuple of the form
([valid_assoc], no_of_expired_assoc_deleted).
"""
db = self.database
keep_assoc = []
remove_assoc = []
t1970 = time.time()
for r in rows:
if r['issued'] + r['lifetime'] < t1970:
remove_assoc.append(r)
else:
keep_assoc.append(r)
for r in remove_assoc:
del db.oid_associations[r['id']]
return (keep_assoc, len(remove_assoc)) # return tuple (list of valid associations, number of deleted associations)
def cleanupNonces(self):
"""
Remove expired nonce entries from DB and return the number
of entries deleted.
"""
db = self.database
query = (db.oid_nonces.timestamp < time.time() - nonce.SKEW)
return db(query).delete()
def cleanupAssociations(self):
"""
Remove expired associations from db and return the number
of entries deleted.
"""
db = self.database
query = (db.oid_associations.id > 0)
return self._removeExpiredAssocations(db(query).select())[1] #return number of assoc removed
def cleanup(self):
"""
This method should be run periodically to free the db from
expired nonce and association entries.
"""
return self.cleanupNonces(), self.cleanupAssociations()
| Python |
#!/usr/bin/env python
# coding: utf8
"""
ExtendedLoginForm is used to extend normal login form in web2py with one more login method.
So user can choose the built-in login or extended login methods.
"""
from gluon import current, DIV
class ExtendedLoginForm(object):
"""
Put extended_login_form under web2py/gluon/contrib/login_methods folder.
Then inside your model where defines the auth:
auth = Auth(globals(),db) # authentication/authorization
...
auth.define_tables() # You might like to put the code after auth.define_tables
... # if the alt_login_form deals with tables of auth.
alt_login_form = RPXAccount(request,
api_key="...",
domain="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
extended_login_form = ExtendedLoginForm(auth, alt_login_form, signals=['token'])
auth.settings.login_form = extended_login_form
Note:
Since rpx_account doesn't create the password for the user, you
might need to provide a way for user to create password to do
normal login.
"""
def __init__(self,
auth,
alt_login_form,
signals=[],
login_arg = 'login'
):
self.auth = auth
self.alt_login_form = alt_login_form
self.signals = signals
self.login_arg = login_arg
def get_user(self):
"""
Delegate the get_user to alt_login_form.get_user.
"""
if hasattr(self.alt_login_form, 'get_user'):
return self.alt_login_form.get_user()
return None # let gluon.tools.Auth.get_or_create_user do the rest
def login_url(self, next):
"""
Optional implement for alt_login_form.
In normal case, this should be replaced by get_user, and never get called.
"""
if hasattr(self.alt_login_form, 'login_url'):
return self.alt_login_form.login_url(next)
return self.auth.settings.login_url
def logout_url(self, next):
"""
Optional implement for alt_login_form.
Called if bool(alt_login_form.get_user) is True.
If alt_login_form implemented logout_url function, it will return that function call.
"""
if hasattr(self.alt_login_form, 'logout_url'):
return self.alt_login_form.logout_url(next)
return next
def login_form(self):
"""
Combine the auth() form with alt_login_form.
If signals are set and a parameter in request matches any signals,
it will return the call of alt_login_form.login_form instead.
So alt_login_form can handle some particular situations, for example,
multiple steps of OpenID login inside alt_login_form.login_form.
Otherwise it will render the normal login form combined with
alt_login_form.login_form.
"""
request = current.request
args = request.args
if (self.signals and
any([True for signal in self.signals if request.vars.has_key(signal)])
):
return self.alt_login_form.login_form()
self.auth.settings.login_form = self.auth
form = DIV(self.auth())
self.auth.settings.login_form = self
form.components.append(self.alt_login_form.login_form())
return form
| Python |
import urllib
import urllib2
import base64
def basic_auth(server="http://127.0.0.1"):
"""
to use basic login with a different server
from gluon.contrib.login_methods.basic_auth import basic_auth
auth.settings.login_methods.append(basic_auth('http://server'))
"""
def basic_login_aux(username,
password,
server=server):
key = base64.b64encode(username+':'+password)
headers = {'Authorization': 'Basic ' + key}
request = urllib2.Request(server, None, headers)
try:
urllib2.urlopen(request)
return True
except (urllib2.URLError, urllib2.HTTPError):
return False
return basic_login_aux
| Python |
#!/usr/bin/env python
import time
from hashlib import md5
from gluon.dal import DAL
def motp_auth(db=DAL('sqlite://storage.sqlite'),
time_offset=60):
"""
motp allows you to login with a one time password(OTP) generated on a motp client,
motp clients are available for practically all platforms.
to know more about OTP visit http://en.wikipedia.org/wiki/One-time_password
to know more visit http://motp.sourceforge.net
Written by Madhukar R Pai (madspai@gmail.com)
License : MIT or GPL v2
thanks and credits to the web2py community
to use motp_auth:
motp_auth.py has to be located in gluon/contrib/login_methods/ folder
first auth_user has to have 2 extra fields - motp_secret and motp_pin
for that define auth like shown below:
## after auth = Auth(db)
db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=''),
Field('last_name', length=128, default=''),
Field('email', length=128, default='', unique=True), # required
Field('password', 'password', length=512, # required
readable=False, label='Password'),
Field('motp_secret',length=512,default='',
label='MOTP Seceret'),
Field('motp_pin',length=128,default='',
label='MOTP PIN'),
Field('registration_key', length=512, # required
writable=False, readable=False, default=''),
Field('reset_password_key', length=512, # required
writable=False, readable=False, default=''),
Field('registration_id', length=512, # required
writable=False, readable=False, default=''))
##validators
custom_auth_table = db[auth.settings.table_user_name] # get the custom_auth_table
custom_auth_table.first_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.last_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.password.requires = CRYPT()
custom_auth_table.email.requires = [
IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, custom_auth_table.email)]
auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table
## before auth.define_tables()
##after that:
from gluon.contrib.login_methods.motp_auth import motp_auth
auth.settings.login_methods.append(motp_auth(db=db))
##Instructions for using MOTP
- after configuring motp for web2py, Install a MOTP client on your phone (android,IOS, java, windows phone, etc)
- initialize the motp client (to reset a motp secret type in #**#),
During user creation enter the secret generated during initialization into the motp_secret field in auth_user and
similarly enter a pre-decided pin into the motp_pin
- done.. to login, just generate a fresh OTP by typing in the pin and use the OTP as password
###To Dos###
- both motp_secret and pin are stored in plain text! need to have some way of encrypting
- web2py stores the password in db on successful login (should not happen)
- maybe some utility or page to check the otp would be useful
- as of now user field is hardcoded to email. Some way of selecting user table and user field.
"""
def verify_otp(otp,pin,secret,offset=60):
epoch_time = int(time.time())
time_start = int(str(epoch_time - offset)[:-1])
time_end = int(str(epoch_time + offset)[:-1])
for t in range(time_start-1,time_end+1):
to_hash = str(t)+secret+pin
hash = md5(to_hash).hexdigest()[:6]
if otp == hash: return True
return False
def motp_auth_aux(email,
password,
db=db,
offset=time_offset):
if db:
user_data = db(db.auth_user.email == email ).select().first()
if user_data:
if user_data['motp_secret'] and user_data['motp_pin']:
motp_secret = user_data['motp_secret']
motp_pin = user_data['motp_pin']
otp_check = verify_otp(password,motp_pin,motp_secret,offset=offset)
if otp_check: return True
else: return False
else: return False
return False
return motp_auth_aux
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by Michele Comitini <mcm@glisco.it>
License: GPL v3
Adds support for x509 authentication.
"""
from gluon.globals import current
from gluon.storage import Storage
from gluon.http import HTTP,redirect
#requires M2Crypto
from M2Crypto import X509
class X509Auth(object):
"""
Login using x509 cert from client.
from gluon.contrib.login_methods.x509_auth import X509Account
auth.settings.actions_disabled=['register','change_password',
'request_reset_password','profile']
auth.settings.login_form = X509Account()
"""
def __init__(self):
self.request = current.request
self.ssl_client_raw_cert = self.request.env.ssl_client_raw_cert
# rebuild the certificate passed by the env
# this is double work, but it is the only way
# since we cannot access the web server ssl engine directly
if self.ssl_client_raw_cert:
x509=X509.load_cert_string(self.ssl_client_raw_cert, X509.FORMAT_PEM)
# extract it from the cert
self.serial = self.request.env.ssl_client_serial or ('%x' % x509.get_serial_number()).upper()
subject = x509.get_subject()
# Reordering the subject map to a usable Storage map
# this allows us a cleaner syntax:
# cn = self.subject.cn
self.subject = Storage(filter(None,
map(lambda x:
(x,map(lambda y:
y.get_data().as_text(),
subject.get_entries_by_nid(subject.nid[x]))),
subject.nid.keys())))
def login_form(self, **args):
raise HTTP(403,'Login not allowed. No valid x509 crentials')
def login_url(self, next="/"):
raise HTTP(403,'Login not allowed. No valid x509 crentials')
def logout_url(self, next="/"):
return next
def get_user(self):
'''Returns the user info contained in the certificate.
'''
# We did not get the client cert?
if not self.ssl_client_raw_cert:
return None
# Try to reconstruct some useful info for web2py auth machinery
p = profile = dict()
username = p['username'] = reduce(lambda a,b: '%s | %s' % (a,b), self.subject.CN or self.subject.commonName)
p['first_name'] = reduce(lambda a,b: '%s | %s' % (a,b),self.subject.givenName or username)
p['last_name'] = reduce(lambda a,b: '%s | %s' % (a,b),self.subject.surname)
p['email'] = reduce(lambda a,b: '%s | %s' % (a,b),self.subject.Email or self.subject.emailAddress)
# IMPORTANT WE USE THE CERT SERIAL AS UNIQUE KEY FOR THE USER
p['registration_id'] = self.serial
# If the auth table has a field certificate it will be used to
# save a PEM encoded copy of the user certificate.
p['certificate'] = self.ssl_client_raw_cert
return profile
| Python |
from gluon.contrib.pam import authenticate
def pam_auth():
"""
to use pam_login:
from gluon.contrib.login_methods.pam_auth import pam_auth
auth.settings.login_methods.append(pam_auth())
or
auth.settings.actions_disabled=[
'register','change_password','request_reset_password']
auth.settings.login_methods=[pam_auth()]
The latter method will not store the user password in auth_user.
"""
def pam_auth_aux(username, password):
return authenticate(username, password)
return pam_auth_aux
| Python |
import smtplib
import logging
def email_auth(server="smtp.gmail.com:587",
domain="@gmail.com",
tls_mode=None):
"""
to use email_login:
from gluon.contrib.login_methods.email_auth import email_auth
auth.settings.login_methods.append(email_auth("smtp.gmail.com:587",
"@gmail.com"))
"""
def email_auth_aux(email,
password,
server=server,
domain=domain,
tls_mode=tls_mode):
if domain:
if not isinstance(domain,(list,tuple)):
domain=[str(domain)]
if not [d for d in domain if email[-len(d):]==d]:
return False
(host, port) = server.split(':')
if tls_mode is None: # then auto detect
tls_mode = port == '587'
try:
server = None
server = smtplib.SMTP(host, port)
server.ehlo()
if tls_mode:
server.starttls()
server.ehlo()
server.login(email, password)
server.quit()
return True
except:
logging.exception('email_auth() failed')
if server:
try:
server.quit()
except: # server might already close connection after error
pass
return False
return email_auth_aux
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>.
License: GPL v2
Tinkered by Szabolcs Gyuris < szimszo n @ o regpreshaz dot eu>
"""
from gluon import current, redirect
class CasAuth( object ):
"""
Login will be done via Web2py's CAS application, instead of web2py's
login form.
Include in your model (eg db.py)::
from gluon.contrib.login_methods.cas_auth import CasAuth
auth.define_tables(username=True)
auth.settings.login_form=CasAuth(
urlbase = "https://[your CAS provider]/app/default/user/cas",
actions=['login','validate','logout'])
where urlbase is the actual CAS server url without the login,logout...
Enjoy.
###UPDATE###
if you want to connect to a CAS version 2 JASIG Server use this:
auth.settings.login_form=CasAuth(
urlbase = "https://[Your CAS server]/cas",
actions = ['login','serviceValidate','logout'],
casversion = 2,
casusername = "cas:user")
where casusername is the xml node returned by CAS server which contains
user's username.
"""
def __init__(self, g=None, ### g for backward compatibility ###
urlbase = "https://web2py.com/cas/cas",
actions=['login','validate','logout'],
maps=dict(username=lambda v:v.get('username',v['user']),
email=lambda v:v.get('email',None),
user_id=lambda v:v['user']),
casversion = 1,
casusername = 'cas:user'
):
self.urlbase=urlbase
self.cas_login_url="%s/%s"%(self.urlbase,actions[0])
self.cas_check_url="%s/%s"%(self.urlbase,actions[1])
self.cas_logout_url="%s/%s"%(self.urlbase,actions[2])
self.maps=maps
self.casversion = casversion
self.casusername = casusername
http_host=current.request.env.http_x_forwarded_host
if not http_host: http_host=current.request.env.http_host
if current.request.env.wsgi_url_scheme in [ 'https', 'HTTPS' ]:
scheme = 'https'
else:
scheme = 'http'
self.cas_my_url='%s://%s%s'%( scheme, http_host, current.request.env.path_info )
def login_url( self, next = "/" ):
current.session.token=self._CAS_login()
return next
def logout_url( self, next = "/" ):
current.session.token=None
current.session.auth=None
self._CAS_logout()
return next
def get_user( self ):
user=current.session.token
if user:
d = {'source':'web2py cas'}
for key in self.maps:
d[key]=self.maps[key](user)
return d
return None
def _CAS_login( self ):
"""
exposed as CAS.login(request)
returns a token on success, None on failed authentication
"""
import urllib
self.ticket=current.request.vars.ticket
if not current.request.vars.ticket:
redirect( "%s?service=%s"% (self.cas_login_url,
self.cas_my_url))
else:
url="%s?service=%s&ticket=%s" % (self.cas_check_url,
self.cas_my_url,
self.ticket )
data=urllib.urlopen( url ).read()
if data.startswith('yes') or data.startswith('no'):
data = data.split('\n')
if data[0]=='yes':
if ':' in data[1]: # for Compatibility with Custom CAS
items = data[1].split(':')
a = items[0]
b = len(items)>1 and items[1] or a
c = len(items)>2 and items[2] or b
else:
a = b = c = data[1]
return dict(user=a,email=b,username=c)
return None
import xml.dom.minidom as dom
import xml.parsers.expat as expat
try:
dxml=dom.parseString(data)
envelop = dxml.getElementsByTagName("cas:authenticationSuccess")
if len(envelop)>0:
res = dict()
for x in envelop[0].childNodes:
if x.nodeName.startswith('cas:') and len(x.childNodes):
key = x.nodeName[4:].encode('utf8')
value = x.childNodes[0].nodeValue.encode('utf8')
if not key in res:
res[key]=value
else:
if not isinstance(res[key],list):
res[key]=[res[key]]
res[key].append(value)
return res
except expat.ExpatError: pass
return None # fallback
def _CAS_logout( self ):
"""
exposed CAS.logout()
redirects to the CAS logout page
"""
import urllib
redirect("%s?service=%s" % (self.cas_logout_url,self.cas_my_url))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Loginza.ru authentication for web2py
Developed by Vladimir Dronnikov (Copyright © 2011)
Email <dronnikov@gmail.com>
"""
import urllib
from gluon.html import *
from gluon.tools import fetch
from gluon.storage import Storage
import gluon.contrib.simplejson as json
class Loginza(object):
"""
from gluon.contrib.login_methods.loginza import Loginza
auth.settings.login_form = Loginza(request,
url = "http://localhost:8000/%s/default/user/login" % request.application)
"""
def __init__(self,
request,
url = "",
embed = True,
auth_url = "http://loginza.ru/api/authinfo",
language = "en",
prompt = "loginza",
on_login_failure = None,
):
self.request = request
self.token_url = url
self.embed = embed
self.auth_url = auth_url
self.language = language
self.prompt = prompt
self.profile = None
self.on_login_failure = on_login_failure
self.mappings = Storage()
# TODO: profile.photo is the URL to the picture
# Howto download and store it locally?
# FIXME: what if email is unique=True
self.mappings["http://twitter.com/"] = lambda profile:\
dict(registration_id = profile.get("identity",""),
username = profile.get("nickname",""),
email = profile.get("email",""),
last_name = profile.get("name","").get("full_name",""),
#avatar = profile.get("photo",""),
)
self.mappings["https://www.google.com/accounts/o8/ud"] = lambda profile:\
dict(registration_id = profile.get("identity",""),
username = profile.get("name","").get("full_name",""),
email = profile.get("email",""),
first_name = profile.get("name","").get("first_name",""),
last_name = profile.get("name","").get("last_name",""),
#avatar = profile.get("photo",""),
)
self.mappings["http://vkontakte.ru/"] = lambda profile:\
dict(registration_id=profile.get("identity",""),
username = profile.get("name","").get("full_name",""),
email = profile.get("email",""),
first_name = profile.get("name","").get("first_name",""),
last_name = profile.get("name","").get("last_name",""),
#avatar = profile.get("photo",""),
)
self.mappings.default = lambda profile:\
dict(registration_id = profile.get("identity",""),
username = profile.get("name","").get("full_name"),
email = profile.get("email",""),
first_name = profile.get("name","").get("first_name",""),
last_name = profile.get("name","").get("last_name",""),
#avatar = profile.get("photo",""),
)
def get_user(self):
request = self.request
if request.vars.token:
user = Storage()
data = urllib.urlencode(dict(token = request.vars.token))
auth_info_json = fetch(self.auth_url+'?'+data)
#print auth_info_json
auth_info = json.loads(auth_info_json)
if auth_info["identity"] != None:
self.profile = auth_info
provider = self.profile["provider"]
user = self.mappings.get(provider, self.mappings.default)(self.profile)
#user["password"] = ???
#user["avatar"] = ???
return user
elif self.on_login_failure:
redirect(self.on_login_failure)
return None
def login_form(self):
request = self.request
args = request.args
LOGINZA_URL = "https://loginza.ru/api/widget?lang=%s&token_url=%s&overlay=loginza"
if self.embed:
form = IFRAME(_src=LOGINZA_URL % (self.language, self.token_url),
_scrolling="no",
_frameborder="no",
_style="width:359px;height:300px;")
else:
form = DIV(A(self.prompt, _href=LOGINZA_URL % (self.language, self.token_url), _class="loginza"),
SCRIPT(_src="https://s3-eu-west-1.amazonaws.com/s1.loginza.ru/js/widget.js", _type="text/javascript"))
return form
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by Michele Comitini <mcm@glisco.it>
License: LGPL v3
Adds support for OAuth 2.0 authentication to web2py.
OAuth 2.0 Draft: http://tools.ietf.org/html/draft-ietf-oauth-v2-10
"""
import time
import cgi
import urllib2
from urllib import urlencode
from gluon import current, redirect, HTTP
class OAuthAccount(object):
"""
Login will be done via OAuth Framework, instead of web2py's
login form.
You need to override the get_user method to match your auth provider needs.
Example for facebook in your model (eg db.py)::
# define the auth_table before call to auth.define_tables()
auth_table = db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=""),
Field('last_name', length=128, default=""),
Field('username', length=128, default="", unique=True),
Field('password', 'password', length=256,
readable=False, label='Password'),
Field('registration_key', length=128, default= "",
writable=False, readable=False))
auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
auth.define_tables()
CLIENT_ID=\"<put your fb application id here>\"
CLIENT_SECRET=\"<put your fb application secret here>\"
AUTH_URL="http://..."
TOKEN_URL="http://..."
# remember to download and install facebook GraphAPI module in your app
from facebook import GraphAPI, GraphAPIError
from gluon.contrib.login_methods.oauth20_account import OAuthAccount
class FaceBookAccount(OAuthAccount):
'''OAuth impl for FaceBook'''
AUTH_URL="https://graph.facebook.com/oauth/authorize"
TOKEN_URL="https://graph.facebook.com/oauth/access_token"
def __init__(self):
OAuthAccount.__init__(self,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
auth_url=self.AUTH_URL,
token_url=self.TOKEN_URL,
scope='user_photos,friends_photos')
self.graph = None
def get_user(self):
'''
Returns the user using the Graph API.
'''
if not self.accessToken():
return None
if not self.graph:
self.graph = GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object("me")
except GraphAPIError, e:
self.session.token = None
self.graph = None
if user:
return dict(first_name = user['first_name'],
last_name = user['last_name'],
username = user['id'])
auth.settings.actions_disabled=['register','change_password','request_reset_password','profile']
auth.settings.login_form=FaceBookAccount()
Any optional arg in the constructor will be passed asis to remote
server for requests. It can be used for the optional"scope" parameters for Facebook.
"""
def __redirect_uri(self, next=None):
"""
Build the uri used by the authenticating server to redirect
the client back to the page originating the auth request.
Appends the _next action to the generated url so the flows continues.
"""
r = current.request
http_host=r.env.http_x_forwarded_for
if not http_host: http_host=r.env.http_host
url_scheme = r.env.wsgi_url_scheme
if next:
path_info = next
else:
path_info = r.env.path_info
uri = '%s://%s%s' %(url_scheme, http_host, path_info)
if r.get_vars and not next:
uri += '?' + urlencode(r.get_vars)
return uri
def __build_url_opener(self, uri):
"""
Build the url opener for managing HTTP Basic Athentication
"""
# Create an OpenerDirector with support
# for Basic HTTP Authentication...
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(None,
uri,
self.client_id,
self.client_secret)
opener = urllib2.build_opener(auth_handler)
return opener
def accessToken(self):
"""
Return the access token generated by the authenticating server.
If token is already in the session that one will be used.
Otherwise the token is fetched from the auth server.
"""
if current.session.token and current.session.token.has_key('expires'):
expires = current.session.token['expires']
# reuse token until expiration
if expires == 0 or expires > time.time():
return current.session.token['access_token']
if current.session.code:
data = dict(client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=current.session.redirect_uri,
response_type='token', code=current.session.code)
if self.args:
data.update(self.args)
open_url = None
opener = self.__build_url_opener(self.token_url)
try:
open_url = opener.open(self.token_url, urlencode(data))
except urllib2.HTTPError, e:
tmp = e.read()
print tmp
raise Exception(tmp)
finally:
del current.session.code # throw it away
if open_url:
try:
data = open_url.read()
tokendata = cgi.parse_qs(data)
current.session.token = \
dict([(k,v[-1]) for k,v in tokendata.items()])
# set expiration absolute time try to avoid broken
# implementations where "expires_in" becomes "expires"
if current.session.token.has_key('expires_in'):
exps = 'expires_in'
elif current.session.token.has_key('expires'):
exps = 'expires'
else:
exps = None
current.session.token['expires'] = exps and \
int(current.session.token[exps]) + \
time.time()
finally:
opener.close()
return current.session.token['access_token']
current.session.token = None
return None
def __init__(self, g=None,
client_id=None, client_secret=None,
auth_url=None, token_url=None, **args):
"""
first argument is unused. Here only for legacy reasons.
"""
if [client_id, client_secret, auth_url, token_url].count(None) > 0:
raise RuntimeError("""Following args are mandatory:
client_id,
client_secret,
auth_url,
token_url.
""")
self.client_id = client_id
self.client_secret = client_secret
self.auth_url = auth_url
self.token_url = token_url
self.args = args
def login_url(self, next="/"):
self.__oauth_login(next)
return next
def logout_url(self, next="/"):
del current.session.token
return next
def get_user(self):
"""
Override this method by sublcassing the class.
"""
if not current.session.token: return None
return dict(first_name = 'Pinco',
last_name = 'Pallino',
username = 'pincopallino')
raise NotImplementedError, "Must override get_user()"
# Following code is never executed. It can be used as example
# for overriding in subclasses.
if not self.accessToken():
return None
if not self.graph:
self.graph = GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object("me")
except GraphAPIError:
current.session.token = None
self.graph = None
if user:
return dict(first_name = user['first_name'],
last_name = user['last_name'],
username = user['id'])
def __oauth_login(self, next):
"""
This method redirects the user to the authenticating form
on authentication server if the authentication code
and the authentication token are not available to the
application yet.
Once the authentication code has been received this method is
called to set the access token into the session by calling
accessToken()
"""
if not self.accessToken():
if not current.request.vars.code:
current.session.redirect_uri=self.__redirect_uri(next)
data = dict(redirect_uri=current.session.redirect_uri,
response_type='code',
client_id=self.client_id)
if self.args:
data.update(self.args)
auth_request_url = self.auth_url + "?" +urlencode(data)
raise HTTP(307,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
else:
current.session.code = current.request.vars.code
self.accessToken()
return current.session.code
return None
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by Michele Comitini <mcm@glisco.it>
License: GPL v3
Adds support for OAuth1.0a authentication to web2py.
Dependencies:
- python-oauth2 (http://github.com/simplegeo/python-oauth2)
"""
import oauth2 as oauth
import cgi
from urllib2 import urlopen
import urllib2
from urllib import urlencode
class OAuthAccount(object):
"""
Login will be done via OAuth Framework, instead of web2py's
login form.
Include in your model (eg db.py)::
# define the auth_table before call to auth.define_tables()
auth_table = db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=""),
Field('last_name', length=128, default=""),
Field('username', length=128, default="", unique=True),
Field('password', 'password', length=256,
readable=False, label='Password'),
Field('registration_key', length=128, default= "",
writable=False, readable=False))
auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
.
.
.
auth.define_tables()
.
.
.
CLIENT_ID=\"<put your fb application id here>\"
CLIENT_SECRET=\"<put your fb application secret here>\"
AUTH_URL="..."
TOKEN_URL="..."
ACCESS_TOKEN_URL="..."
from gluon.contrib.login_methods.oauth10a_account import OAuthAccount
auth.settings.login_form=OAuthAccount(globals(),CLIENT_ID,CLIENT_SECRET, AUTH_URL, TOKEN_URL, ACCESS_TOKEN_URL)
"""
def __redirect_uri(self, next=None):
"""Build the uri used by the authenticating server to redirect
the client back to the page originating the auth request.
Appends the _next action to the generated url so the flows continues.
"""
r = self.request
http_host=r.env.http_x_forwarded_for
if not http_host: http_host=r.env.http_host
url_scheme = r.env.wsgi_url_scheme
if next:
path_info = next
else:
path_info = r.env.path_info
uri = '%s://%s%s' %(url_scheme, http_host, path_info)
if r.get_vars and not next:
uri += '?' + urlencode(r.get_vars)
return uri
def accessToken(self):
"""Return the access token generated by the authenticating server.
If token is already in the session that one will be used.
Otherwise the token is fetched from the auth server.
"""
if self.session.access_token:
# return the token (TODO: does it expire?)
return self.session.access_token
if self.session.request_token:
# Exchange the request token with an authorization token.
token = self.session.request_token
self.session.request_token = None
# Build an authorized client
# OAuth1.0a put the verifier!
token.set_verifier(self.request.vars.oauth_verifier)
client = oauth.Client(self.consumer, token)
resp, content = client.request(self.access_token_url, "POST")
if str(resp['status']) != '200':
self.session.request_token = None
self.globals['redirect'](self.globals['URL'](f='user',args='logout'))
self.session.access_token = oauth.Token.from_string(content)
return self.session.access_token
self.session.access_token = None
return None
def __init__(self, g, client_id, client_secret, auth_url, token_url, access_token_url):
self.globals = g
self.client_id = client_id
self.client_secret = client_secret
self.code = None
self.request = g['request']
self.session = g['session']
self.auth_url = auth_url
self.token_url = token_url
self.access_token_url = access_token_url
# consumer init
self.consumer = oauth.Consumer(self.client_id, self.client_secret)
def login_url(self, next="/"):
self.__oauth_login(next)
return next
def logout_url(self, next="/"):
self.session.request_token = None
self.session.access_token = None
return next
def get_user(self):
'''Get user data.
Since OAuth does not specify what a user
is, this function must be implemented for the specific
provider.
'''
raise NotImplementedError, "Must override get_user()"
def __oauth_login(self, next):
'''This method redirects the user to the authenticating form
on authentication server if the authentication code
and the authentication token are not available to the
application yet.
Once the authentication code has been received this method is
called to set the access token into the session by calling
accessToken()
'''
if not self.accessToken():
# setup the client
client = oauth.Client(self.consumer, None)
# Get a request token.
# oauth_callback *is REQUIRED* for OAuth1.0a
# putting it in the body seems to work.
callback_url = self.__redirect_uri(next)
data = urlencode(dict(oauth_callback=callback_url))
resp, content = client.request(self.token_url, "POST", body=data)
if resp['status'] != '200':
self.session.request_token = None
self.globals['redirect'](self.globals['URL'](f='user',args='logout'))
# Store the request token in session.
request_token = self.session.request_token = oauth.Token.from_string(content)
# Redirect the user to the authentication URL and pass the callback url.
data = urlencode(dict(oauth_token=request_token.key,
oauth_callback=callback_url))
auth_request_url = self.auth_url + '?' +data
HTTP = self.globals['HTTP']
raise HTTP(307,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
return None
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and
Robin B <robi123@gmail.com>.
License: GPL v2
"""
__all__ = ['MEMDB', 'Field']
import re
import sys
import os
import types
import datetime
import thread
import cStringIO
import csv
import copy
import gluon.validators as validators
from gluon.storage import Storage
from gluon import SQLTABLE
import random
SQL_DIALECTS = {'memcache': {
'boolean': bool,
'string': unicode,
'text': unicode,
'password': unicode,
'blob': unicode,
'upload': unicode,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': int,
'reference': int,
'lower': None,
'upper': None,
'is null': 'IS NULL',
'is not null': 'IS NOT NULL',
'extract': None,
'left join': None,
}}
def cleanup(text):
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError, \
'Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text
return text
def assert_filter_fields(*fields):
for field in fields:
if isinstance(field, (Field, Expression)) and field.type\
in ['text', 'blob']:
raise SyntaxError, 'AppEngine does not index by: %s'\
% field.type
def dateobj_to_datetime(object):
# convert dates,times to datetimes for AppEngine
if isinstance(object, datetime.date):
object = datetime.datetime(object.year, object.month,
object.day)
if isinstance(object, datetime.time):
object = datetime.datetime(
1970,
1,
1,
object.hour,
object.minute,
object.second,
object.microsecond,
)
return object
def sqlhtml_validators(field_type, length):
v = {
'boolean': [],
'string': validators.IS_LENGTH(length),
'text': [],
'password': validators.IS_LENGTH(length),
'blob': [],
'upload': [],
'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100),
'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100),
'date': validators.IS_DATE(),
'time': validators.IS_TIME(),
'datetime': validators.IS_DATETIME(),
'reference': validators.IS_INT_IN_RANGE(0, 1e100),
}
try:
return v[field_type[:9]]
except KeyError:
return []
class DALStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self:
raise SyntaxError, 'Object \'%s\'exists and cannot be redefined' % key
self[key] = value
def __repr__(self):
return '<DALStorage ' + dict.__repr__(self) + '>'
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class MEMDB(DALStorage):
"""
an instance of this class represents a database connection
Example::
db=MEMDB(Client())
db.define_table('tablename',Field('fieldname1'),
Field('fieldname2'))
"""
def __init__(self, client):
self._dbname = 'memdb'
self['_lastsql'] = ''
self.tables = SQLCallableList()
self._translator = SQL_DIALECTS['memcache']
self.client = client
def define_table(
self,
tablename,
*fields,
**args
):
tablename = cleanup(tablename)
if tablename in dir(self) or tablename[0] == '_':
raise SyntaxError, 'invalid table name: %s' % tablename
if not tablename in self.tables:
self.tables.append(tablename)
else:
raise SyntaxError, 'table already defined: %s' % tablename
t = self[tablename] = Table(self, tablename, *fields)
t._create()
return t
def __call__(self, where=''):
return Set(self, where)
class SQLALL(object):
def __init__(self, table):
self.table = table
class Table(DALStorage):
"""
an instance of this class represents a database table
Example::
db=MEMDB(Client())
db.define_table('users',Field('name'))
db.users.insert(name='me')
"""
def __init__(
self,
db,
tablename,
*fields
):
self._db = db
self._tablename = tablename
self.fields = SQLCallableList()
self._referenced_by = []
fields = list(fields)
fields.insert(0, Field('id', 'id'))
for field in fields:
self.fields.append(field.name)
self[field.name] = field
field._tablename = self._tablename
field._table = self
field._db = self._db
self.ALL = SQLALL(self)
def _create(self):
fields = []
myfields = {}
for k in self.fields:
field = self[k]
attr = {}
if not field.type[:9] in ['id', 'reference']:
if field.notnull:
attr = dict(required=True)
if field.type[:2] == 'id':
continue
if field.type[:9] == 'reference':
referenced = field.type[10:].strip()
if not referenced:
raise SyntaxError, \
'Table %s: reference \'%s\' to nothing!' % (self._tablename, k)
if not referenced in self._db:
raise SyntaxError, \
'Table: table %s does not exist' % referenced
referee = self._db[referenced]
ftype = \
self._db._translator[field.type[:9]](
self._db[referenced]._tableobj)
if self._tablename in referee.fields: # ## THIS IS OK
raise SyntaxError, \
'Field: table \'%s\' has same name as a field ' \
'in referenced table \'%s\'' % (self._tablename, referenced)
self._db[referenced]._referenced_by.append((self._tablename,
field.name))
elif not field.type in self._db._translator\
or not self._db._translator[field.type]:
raise SyntaxError, 'Field: unkown field type %s' % field.type
self._tableobj = self._db.client
return None
def create(self):
# nothing to do, here for backward compatility
pass
def drop(self):
# nothing to do, here for backward compatibility
self._db(self.id > 0).delete()
def insert(self, **fields):
id = self._create_id()
if self.update(id, **fields):
return long(id)
else:
return None
def get(self, id):
val = self._tableobj.get(self._id_to_key(id))
if val:
return Storage(val)
else:
return None
def update(self, id, **fields):
for field in fields:
if not field in fields and self[field].default\
!= None:
fields[field] = self[field].default
if field in fields:
fields[field] = obj_represent(fields[field],
self[field].type, self._db)
return self._tableobj.set(self._id_to_key(id), fields)
def delete(self, id):
return self._tableobj.delete(self._id_to_key(id))
def _shard_key(self, shard):
return self._id_to_key('s/%s' % shard)
def _id_to_key(self, id):
return '__memdb__/t/%s/k/%s' % (self._tablename, str(id))
def _create_id(self):
shard = random.randint(10, 99)
shard_id = self._shard_key(shard)
id = self._tableobj.incr(shard_id)
if not id:
if self._tableobj.set(shard_id, '0'):
id = 0
else:
raise Exception, 'cannot set memcache'
return long(str(shard) + str(id))
def __str__(self):
return self._tablename
class Expression(object):
def __init__(
self,
name,
type='string',
db=None,
):
(self.name, self.type, self._db) = (name, type, db)
def __str__(self):
return self.name
def __or__(self, other): # for use in sortby
assert_filter_fields(self, other)
return Expression(self.name + '|' + other.name, None, None)
def __invert__(self):
assert_filter_fields(self)
return Expression('-' + self.name, self.type, None)
# for use in Query
def __eq__(self, value):
return Query(self, '=', value)
def __ne__(self, value):
return Query(self, '!=', value)
def __lt__(self, value):
return Query(self, '<', value)
def __le__(self, value):
return Query(self, '<=', value)
def __gt__(self, value):
return Query(self, '>', value)
def __ge__(self, value):
return Query(self, '>=', value)
# def like(self,value): return Query(self,' LIKE ',value)
# def belongs(self,value): return Query(self,' IN ',value)
# for use in both Query and sortby
def __add__(self, other):
return Expression('%s+%s' % (self, other), 'float', None)
def __sub__(self, other):
return Expression('%s-%s' % (self, other), 'float', None)
def __mul__(self, other):
return Expression('%s*%s' % (self, other), 'float', None)
def __div__(self, other):
return Expression('%s/%s' % (self, other), 'float', None)
class Field(Expression):
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, required=False,
default=None, requires=IS_NOT_EMPTY(), notnull=False,
unique=False, uploadfield=True)
to be used as argument of GQLDB.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length or 512 by default.
fields should have a default or they will be required in SQLFORMs
the requires argument are used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=None,
required=False,
requires=sqlhtml_validators,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
):
self.name = cleanup(fieldname)
if fieldname in dir(Table) or fieldname[0] == '_':
raise SyntaxError, 'Field: invalid field name: %s' % fieldname
if isinstance(type, Table):
type = 'reference ' + type._tablename
if not length:
length = 512
self.type = type # 'string', 'integer'
self.length = length # the length of the string
self.default = default # default value for field
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
if requires == sqlhtml_validators:
requires = sqlhtml_validators(type, length)
elif requires is None:
requires = []
self.requires = requires # list of validators
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def __str__(self):
return '%s.%s' % (self._tablename, self.name)
MEMDB.Field = Field # ## required by gluon/globals.py session.connect
def obj_represent(object, fieldtype, db):
if object != None:
if fieldtype == 'date' and not isinstance(object,
datetime.date):
(y, m, d) = [int(x) for x in str(object).strip().split('-')]
object = datetime.date(y, m, d)
elif fieldtype == 'time' and not isinstance(object, datetime.time):
time_items = [int(x) for x in str(object).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.time(h, mi, s)
elif fieldtype == 'datetime' and not isinstance(object,
datetime.datetime):
(y, m, d) = [int(x) for x in
str(object)[:10].strip().split('-')]
time_items = [int(x) for x in
str(object)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
elif fieldtype == 'integer' and not isinstance(object, long):
object = long(object)
return object
class QueryException:
def __init__(self, **a):
self.__dict__ = a
class Query(object):
"""
A query object necessary to define a set.
It can be stored or can be passed to GQLDB.__call__() to obtain a Set
Example:
query=db.users.name=='Max'
set=db(query)
records=set.select()
"""
def __init__(
self,
left,
op=None,
right=None,
):
if isinstance(right, (Field, Expression)):
raise SyntaxError, \
'Query: right side of filter must be a value or entity'
if isinstance(left, Field) and left.name == 'id':
if op == '=':
self.get_one = \
QueryException(tablename=left._tablename,
id=long(right))
return
else:
raise SyntaxError, 'only equality by id is supported'
raise SyntaxError, 'not supported'
def __str__(self):
return str(self.left)
class Set(object):
"""
As Set represents a set of records in the database,
the records are identified by the where=Query(...) object.
normally the Set is generated by GQLDB.__call__(Query(...))
given a set, for example
set=db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
and take subsets:
subset=set(db.users.id<5)
"""
def __init__(self, db, where=None):
self._db = db
self._tables = []
self.filters = []
if hasattr(where, 'get_all'):
self.where = where
self._tables.insert(0, where.get_all)
elif hasattr(where, 'get_one') and isinstance(where.get_one,
QueryException):
self.where = where.get_one
else:
# find out which tables are involved
if isinstance(where, Query):
self.filters = where.left
self.where = where
self._tables = [field._tablename for (field, op, val) in
self.filters]
def __call__(self, where):
if isinstance(self.where, QueryException) or isinstance(where,
QueryException):
raise SyntaxError, \
'neither self.where nor where can be a QueryException instance'
if self.where:
return Set(self._db, self.where & where)
else:
return Set(self._db, where)
def _get_table_or_raise(self):
tablenames = list(set(self._tables)) # unique
if len(tablenames) < 1:
raise SyntaxError, 'Set: no tables selected'
if len(tablenames) > 1:
raise SyntaxError, 'Set: no join in appengine'
return self._db[tablenames[0]]._tableobj
def _getitem_exception(self):
(tablename, id) = (self.where.tablename, self.where.id)
fields = self._db[tablename].fields
self.colnames = ['%s.%s' % (tablename, t) for t in fields]
item = self._db[tablename].get(id)
return (item, fields, tablename, id)
def _select_except(self):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return []
new_item = []
for t in fields:
if t == 'id':
new_item.append(long(id))
else:
new_item.append(getattr(item, t))
r = [new_item]
return Rows(self._db, r, *self.colnames)
def select(self, *fields, **attributes):
"""
Always returns a Rows object, even if it may be empty
"""
if isinstance(self.where, QueryException):
return self._select_except()
else:
raise SyntaxError, 'select arguments not supported'
def count(self):
return len(self.select())
def delete(self):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
self._db[tablename].delete(id)
else:
raise Exception, 'deletion not implemented'
def update(self, **update_fields):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
for (key, value) in update_fields.items():
setattr(item, key, value)
self._db[tablename].update(id, **item)
else:
raise Exception, 'update not implemented'
def update_record(
t,
s,
id,
a,
):
item = s.get(id)
for (key, value) in a.items():
t[key] = value
setattr(item, key, value)
s.update(id, **item)
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## this class still needs some work to care for ID/OID
def __init__(
self,
db,
response,
*colnames
):
self._db = db
self.colnames = colnames
self.response = response
def __len__(self):
return len(self.response)
def __getitem__(self, i):
if i >= len(self.response) or i < 0:
raise SyntaxError, 'Rows: no such row: %i' % i
if len(self.response[0]) != len(self.colnames):
raise SyntaxError, 'Rows: internal error'
row = DALStorage()
for j in xrange(len(self.colnames)):
value = self.response[i][j]
if isinstance(value, unicode):
value = value.encode('utf-8')
packed = self.colnames[j].split('.')
try:
(tablename, fieldname) = packed
except:
if not '_extra' in row:
row['_extra'] = DALStorage()
row['_extra'][self.colnames[j]] = value
continue
table = self._db[tablename]
field = table[fieldname]
if not tablename in row:
row[tablename] = DALStorage()
if field.type[:9] == 'reference':
referee = field.type[10:].strip()
rid = value
row[tablename][fieldname] = rid
elif field.type == 'boolean' and value != None:
# row[tablename][fieldname]=Set(self._db[referee].id==rid)
if value == True or value == 'T':
row[tablename][fieldname] = True
else:
row[tablename][fieldname] = False
elif field.type == 'date' and value != None\
and not isinstance(value, datetime.date):
(y, m, d) = [int(x) for x in
str(value).strip().split('-')]
row[tablename][fieldname] = datetime.date(y, m, d)
elif field.type == 'time' and value != None\
and not isinstance(value, datetime.time):
time_items = [int(x) for x in
str(value).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.time(h, mi, s)
elif field.type == 'datetime' and value != None\
and not isinstance(value, datetime.datetime):
(y, m, d) = [int(x) for x in
str(value)[:10].strip().split('-')]
time_items = [int(x) for x in
str(value)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
else:
row[tablename][fieldname] = value
if fieldname == 'id':
id = row[tablename].id
row[tablename].update_record = lambda t = row[tablename], \
s = self._db[tablename], id = id, **a: update_record(t,
s, id, a)
for (referee_table, referee_name) in \
table._referenced_by:
s = self._db[referee_table][referee_name]
row[tablename][referee_table] = Set(self._db, s
== id)
if len(row.keys()) == 1:
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
writer = csv.writer(s)
writer.writerow(self.colnames)
c = len(self.colnames)
for i in xrange(len(self)):
row = [self.response[i][j] for j in xrange(c)]
for k in xrange(c):
if isinstance(row[k], unicode):
row[k] = row[k].encode('utf-8')
writer.writerow(row)
return s.getvalue()
def xml(self):
"""
serializes the table using SQLTABLE (if present)
"""
return SQLTABLE(self).xml()
def test_all():
"""
How to run from web2py dir:
export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH
python gluon/contrib/memdb.py
Setup the UTC timezone and database stubs
>>> import os
>>> os.environ['TZ'] = 'UTC'
>>> import time
>>> if hasattr(time, 'tzset'):
... time.tzset()
>>>
>>> from google.appengine.api import apiproxy_stub_map
>>> from google.appengine.api.memcache import memcache_stub
>>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
>>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
Create a table with all possible field types
>>> from google.appengine.api.memcache import Client
>>> db=MEMDB(Client())
>>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table')
Insert a field
>>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15))
>>> user_id != None
True
Select all
# >>> all = db().select(db.users.ALL)
Drop the table
# >>> db.users.drop()
Select many entities
>>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime'))
>>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow)
>>> few = 5
>>> most = many - few
>>> 0 < few < most < many
True
>>> for i in range(many):
... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i))
>>>
# test timezones
>>> class TZOffset(datetime.tzinfo):
... def __init__(self,offset=0):
... self.offset = offset
... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset)
... def dst(self, dt): return datetime.timedelta(0)
... def tzname(self, dt): return 'UTC' + str(self.offset)
...
>>> SERVER_OFFSET = -8
>>>
>>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201)
>>> post_id = db.posts.insert(created_at=stamp,body='body1')
>>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at
>>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset())
>>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET))
>>> stamp == naive_stamp
True
>>> utc_stamp == server_stamp
True
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 1
True
>>> rows[0].body == 'body1'
True
>>> db(db.posts.id==post_id).delete()
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 0
True
>>> id = db.posts.insert(total='0') # coerce str to integer
>>> rows = db(db.posts.id==id).select()
>>> len(rows) == 1
True
>>> rows[0].total == 0
True
Examples of insert, select, update, delete
>>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table')
>>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21')
>>> me=db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.id==person_id).update(name='massimo') # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.name
'massimo'
>>> str(me.birth)
'1971-12-21'
# resave date to ensure it comes back the same
>>> me=db(db.person.id==person_id).update(birth=me.birth) # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.birth
datetime.date(1971, 12, 21)
>>> db(db.person.id==marco_id).delete() # test delete
>>> len(db(db.person.id==marco_id).select())
0
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
>>> me = db(db.person.id == person_id).select()[0]
>>> me.name
'Max'
"""
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = DALStorage
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
# Only Python 2.6 and up, because of NamedTuple.
import time
from collections import namedtuple
Score = namedtuple('Score', ['tag', 'stamp'])
class TimeCollector(object):
def __init__(self):
'''The first time stamp is created here'''
self.scores = [Score(tag='start',stamp=time.clock())]
def addStamp(self, description):
'''Adds a new time stamp, with a description.'''
self.scores.append(Score(tag=description, stamp=time.clock()))
def _stampDelta(self, index1, index2):
'''Private utility function to clean up this common calculation.'''
return self.scores[index1].stamp - self.scores[index2].stamp
def getReportItems(self, orderByCost=True):
'''Returns a list of dicts. Each dict has
start (ms),
end (ms),
delta (ms),
perc (%),
tag (str)
'''
self.scores.append(Score(tag='finish', stamp=time.clock()))
total_time = self._stampDelta(-1, 0)
data = []
for i in range(1, len(self.scores)):
delta = self._stampDelta(i, i-1)
if abs(total_time) < 1e-6:
perc = 0
else:
perc = delta / total_time * 100
data.append(
dict(
start = self._stampDelta(i-1, 0) * 1000,
end = self._stampDelta(i, 0) * 1000,
delta = delta * 1000,
perc = perc,
tag = self.scores[i].tag
)
)
if orderByCost:
data.sort(key=lambda x: x['perc'], reverse=True)
return data
def getReportLines(self, orderByCost=True):
'''Produces a report of logged time-stamps as a list of strings.
if orderByCost is False, then the order of the stamps is
chronological.'''
data = self.getReportItems(orderByCost)
headerTemplate = '%10s | %10s | %10s | %11s | %-30s'
headerData = ('Start(ms)', 'End(ms)', 'Delta(ms)', 'Time Cost',
'Description')
bodyTemplate = '%(start)10.0f | %(end)10.0f | %(delta)10.0f |' \
+ ' %(perc)10.0f%% | %(tag)-30s'
return [headerTemplate % headerData] + [bodyTemplate % d for d in data]
def getReportText(self, **kwargs):
return '\n'.join(self.getReportLines(**kwargs))
def restart(self):
self.scores = [Score(tag='start',stamp=time.clock())]
if __name__=='__main__':
print('')
print('Testing:')
print('')
# First create the collector
t = TimeCollector()
x = [i for i in range(1000)]
# Every time some work gets done, add a stamp
t.addStamp('Initialization Section')
x = [i for i in range(10000)]
t.addStamp('A big loop')
x = [i for i in range(100000)]
t.addStamp('calling builder function')
# Finally, obtain the results
print('')
print(t.getReportText())
# If you want to measure something else in the same scope, you can
# restart the collector.
t.restart()
x = [i for i in range(1000000)]
t.addStamp('Part 2')
x = [i for i in range(1000000)]
t.addStamp('Cleanup')
# And once again report results
print('')
print(t.getReportText())
t.restart()
for y in range(1, 200, 20):
x = [i for i in range(10000)*y]
t.addStamp('Iteration when y = ' + str(y))
print('')
# You can turn off ordering of results
print(t.getReportText(orderByCost=False))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""`cssmin` - A Python port of the YUI CSS compressor."""
"""
Home page: https://github.com/zacharyvoase/cssmin
License: BSD: https://github.com/zacharyvoase/cssmin/blob/master/LICENSE
Original author: Zachary Voase
Modified for inclusion into web2py by: Ross Peoples <ross.peoples@gmail.com>
"""
from StringIO import StringIO # The pure-Python StringIO supports unicode.
import re
__version__ = '0.1.4'
def remove_comments(css):
"""Remove all CSS comment blocks."""
iemac = False
preserve = False
comment_start = css.find("/*")
while comment_start >= 0:
# Preserve comments that look like `/*!...*/`.
# Slicing is used to make sure we don"t get an IndexError.
preserve = css[comment_start + 2:comment_start + 3] == "!"
comment_end = css.find("*/", comment_start + 2)
if comment_end < 0:
if not preserve:
css = css[:comment_start]
break
elif comment_end >= (comment_start + 2):
if css[comment_end - 1] == "\\":
# This is an IE Mac-specific comment; leave this one and the
# following one alone.
comment_start = comment_end + 2
iemac = True
elif iemac:
comment_start = comment_end + 2
iemac = False
elif not preserve:
css = css[:comment_start] + css[comment_end + 2:]
else:
comment_start = comment_end + 2
comment_start = css.find("/*", comment_start)
return css
def remove_unnecessary_whitespace(css):
"""Remove unnecessary whitespace characters."""
def pseudoclasscolon(css):
"""
Prevents 'p :link' from becoming 'p:link'.
Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is
translated back again later.
"""
regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)")
match = regex.search(css)
while match:
css = ''.join([
css[:match.start()],
match.group().replace(":", "___PSEUDOCLASSCOLON___"),
css[match.end():]])
match = regex.search(css)
return css
css = pseudoclasscolon(css)
# Remove spaces from before things.
css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css)
# If there is a `@charset`, then only allow one, and move to the beginning.
css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css)
css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css)
# Put the space back in for a few cases, such as `@media screen` and
# `(-webkit-min-device-pixel-ratio:0)`.
css = re.sub(r"\band\(", "and (", css)
# Put the colons back.
css = css.replace('___PSEUDOCLASSCOLON___', ':')
# Remove spaces from after things.
css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css)
return css
def remove_unnecessary_semicolons(css):
"""Remove unnecessary semicolons."""
return re.sub(r";+\}", "}", css)
def remove_empty_rules(css):
"""Remove empty rules."""
return re.sub(r"[^\}\{]+\{\}", "", css)
def normalize_rgb_colors_to_hex(css):
"""Convert `rgb(51,102,153)` to `#336699`."""
regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)")
match = regex.search(css)
while match:
colors = map(lambda s: s.strip(), match.group(1).split(","))
hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))
css = css.replace(match.group(), hexcolor)
match = regex.search(css)
return css
def condense_zero_units(css):
"""Replace `0(px, em, %, etc)` with `0`."""
return re.sub(r"([\s:])(0)(px|em|%|in|cm|mm|pc|pt|ex)", r"\1\2", css)
def condense_multidimensional_zeros(css):
"""Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`."""
css = css.replace(":0 0 0 0;", ":0;")
css = css.replace(":0 0 0;", ":0;")
css = css.replace(":0 0;", ":0;")
# Revert `background-position:0;` to the valid `background-position:0 0;`.
css = css.replace("background-position:0;", "background-position:0 0;")
return css
def condense_floating_points(css):
"""Replace `0.6` with `.6` where possible."""
return re.sub(r"(:|\s)0+\.(\d+)", r"\1.\2", css)
def condense_hex_colors(css):
"""Shorten colors from #AABBCC to #ABC where possible."""
regex = re.compile(r"([^\"'=\s])(\s*)#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])")
match = regex.search(css)
while match:
first = match.group(3) + match.group(5) + match.group(7)
second = match.group(4) + match.group(6) + match.group(8)
if first.lower() == second.lower():
css = css.replace(match.group(), match.group(1) + match.group(2) + '#' + first)
match = regex.search(css, match.end() - 3)
else:
match = regex.search(css, match.end())
return css
def condense_whitespace(css):
"""Condense multiple adjacent whitespace characters into one."""
return re.sub(r"\s+", " ", css)
def condense_semicolons(css):
"""Condense multiple adjacent semicolon characters into one."""
return re.sub(r";;+", ";", css)
def wrap_css_lines(css, line_length):
"""Wrap the lines of the given CSS to an approximate length."""
lines = []
line_start = 0
for i, char in enumerate(css):
# It's safe to break after `}` characters.
if char == '}' and (i - line_start >= line_length):
lines.append(css[line_start:i + 1])
line_start = i + 1
if line_start < len(css):
lines.append(css[line_start:])
return '\n'.join(lines)
def cssmin(css, wrap=None):
css = remove_comments(css)
css = condense_whitespace(css)
# A pseudo class for the Box Model Hack
# (see http://tantek.com/CSS/Examples/boxmodelhack.html)
css = css.replace('"\\"}\\""', "___PSEUDOCLASSBMH___")
css = remove_unnecessary_whitespace(css)
css = remove_unnecessary_semicolons(css)
css = condense_zero_units(css)
css = condense_multidimensional_zeros(css)
css = condense_floating_points(css)
css = normalize_rgb_colors_to_hex(css)
css = condense_hex_colors(css)
if wrap is not None:
css = wrap_css_lines(css, wrap)
css = css.replace("___PSEUDOCLASSBMH___", '"\\"}\\""')
css = condense_semicolons(css)
return css.strip()
def main():
import optparse
import sys
p = optparse.OptionParser(
prog="cssmin", version=__version__,
usage="%prog [--wrap N]",
description="""Reads raw CSS from stdin, and writes compressed CSS to stdout.""")
p.add_option(
'-w', '--wrap', type='int', default=None, metavar='N',
help="Wrap output to approximately N chars per line.")
options, args = p.parse_args()
sys.stdout.write(cssmin(sys.stdin.read(), wrap=options.wrap))
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
High-level CSS and JS minification class for web2py.
Called by response.include_files()
Created by: Ross Peoples <ross.peoples@gmail.com>
Modified by: Massimo Di Pierro <massimo.dipierro@gmail.com>
"""
import cssmin
import jsmin
import os
def read_binary_file(filename):
f = open(filename,'rb')
data = f.read()
f.close()
return data
def write_binary_file(filename,data):
f =open(filename,'wb')
f.write(data)
f.close()
def fix_links(css,static_path):
return css.replace('../',static_path+'/')
def minify(files, path_info, folder, optimize_css, optimize_js,
ignore_concat = [],
ignore_minify = ['/jquery.js', '/anytime.js']):
"""
Input:
files: is a list of URLs to JS and CSS files (not repeated)
path_info: is the URL of a temp static folder
folder: is the application folder
optimize_css: is a string of the form 'concat|minify|inline'
optimize_js: is a string of the form 'concat|minify|inline'
(minify requires concat, inline requires concat also)
Returns a new list of:
- filename (absolute or relative, css or js, actual or temporary) or
- ('css:inline','...css..')
- ('js:inline','...js..')
"""
optimize_css = optimize_css or ''
optimize_js = optimize_js or ''
concat_css = 'concat' in optimize_css
minify_css = 'minify' in optimize_css
inline_css = 'inline' in optimize_css
concat_js = 'concat' in optimize_js
minify_js = 'minify' in optimize_js
inline_js = 'inline' in optimize_js
static_path,temp = path_info.rsplit('/',1)
new_files = []
css = []
js = []
for k,filename in enumerate(files):
if not filename.startswith('/') or \
any(filename.endswith(x) for x in ignore_concat):
new_files.append(filename)
continue
abs_filename = os.path.join(folder,'static',
filename[len(static_path)+1:])
if filename.lower().endswith('.css'):
if concat_css:
contents = read_binary_file(abs_filename)
if minify_css:
css.append(cssmin.cssmin(contents))
else:
css.append(contents)
else:
css.append(filename)
elif filename.lower().endswith('.js'):
if concat_js:
contents = read_binary_file(abs_filename)
if minify_js and not filename.endswith('.min.js') and \
not any(filename.endswith(x) for x in ignore_minify):
js.append(jsmin.jsmin(contents))
else:
js.append(contents)
else:
js.append(filename)
if css and concat_css:
css = '\n\n'.join(contents for contents in css)
if inline_css:
css = ('css:inline',fix_links(css,static_path))
else:
temppath = os.path.join(folder,'static',temp)
if not os.path.exists(temppath): os.mkdir(temppath)
tempfile = os.path.join(temppath,'compressed.css')
write_binary_file(tempfile,css)
css = path_info+'/compressed.css'
new_files.append(css)
else:
new_files += css
if js and concat_js:
js = '\n'.join(contents for contents in js)
if inline_js:
js = ('js:inline',js)
else:
temppath = os.path.join(folder,'static',temp)
if not os.path.exists(temppath): os.mkdir(temppath)
tempfile = os.path.join(folder,'static',temp,'compressed.js')
write_binary_file(tempfile,js)
js = path_info+'/compressed.js'
new_files.append(js)
else:
new_files += js
return new_files
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.