code
stringlengths
1
1.72M
language
stringclasses
1 value
import os, logging import webapp2 from google.appengine.ext.webapp import template from app_settings import * ROOT_PATH = os.path.dirname(__file__) TEMPLATE = os.path.join(ROOT_PATH, 'templates') class MainPage(webapp2.RequestHandler): def get(self): path = os.path.join(TEMPLATE, 'index.html') form = """ <form action="/post" method="post"> </form> """ template_values = { 'form': form} self.response.out.write(template.render(path, template_values)) class AddData(webapp2.RequestHandler): def get(self): self.response.out.write(""" <html> <body> <form action="/add_data" method="post"> <div><textarea name="personas" rows="3" cols="60"></textarea></div> <div><input type="submit" value="Sign Guestbook"></div> </form> </body> </html>""") def post(self): return app = webapp2.WSGIApplication([('/', MainPage), ('/add_data', AddData)], debug=True)
Python
__all__ = ( 'BaseForm', 'Form', ) class BaseForm(object): """ Base Form Class. Provides core behaviour like field construction, validation, and data and error proxying. """ def __init__(self, fields, prefix=''): """ :param fields: A dict or sequence of 2-tuples of partially-constructed fields. :param prefix: If provided, all fields will have their name prefixed with the value. """ if prefix and prefix[-1] not in '-_;:/.': prefix += '-' self._prefix = prefix self._errors = None self._fields = {} if hasattr(fields, 'iteritems'): fields = fields.iteritems() translations = self._get_translations() for name, unbound_field in fields: field = unbound_field.bind(form=self, name=name, prefix=prefix, translations=translations) self._fields[name] = field def __iter__(self): """ Iterate form fields in arbitrary order """ return self._fields.itervalues() def __contains__(self, item): """ Returns `True` if the named field is a member of this form. """ return (item in self._fields) def __getitem__(self, name): """ Dict-style access to this form's fields.""" return self._fields[name] def __setitem__(self, name, value): """ Bind a field to this form. """ self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix) def __delitem__(self, name): """ Remove a field from this form. """ del self._fields[name] def _get_translations(self): """ Override in subclasses to provide alternate translations factory. Must return an object that provides gettext() and ngettext() methods. """ return None def populate_obj(self, obj): """ Populates the attributes of the passed `obj` with data from the form's fields. :note: This is a destructive operation; Any attribute with the same name as a field will be overridden. Use with caution. """ for name, field in self._fields.iteritems(): field.populate_obj(obj, name) def process(self, formdata=None, obj=None, **kwargs): """ Take form, object data, and keyword arg input and have the fields process them. :param formdata: Used to pass data coming from the enduser, usually `request.POST` or equivalent. :param obj: If `formdata` has no data for a field, the form will try to get it from the passed object. :param `**kwargs`: If neither `formdata` or `obj` contains a value for a field, the form will assign the value of a matching keyword argument to the field, if provided. """ if formdata is not None and not hasattr(formdata, 'getlist'): if hasattr(formdata, 'getall'): formdata = WebobInputWrapper(formdata) else: raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method") for name, field, in self._fields.iteritems(): if obj is not None and hasattr(obj, name): field.process(formdata, getattr(obj, name)) elif name in kwargs: field.process(formdata, kwargs[name]) else: field.process(formdata) def validate(self, extra_validators=None): """ Validates the form by calling `validate` on each field. :param extra_validators: If provided, is a dict mapping field names to a sequence of callables which will be passed as extra validators to the field's `validate` method. Returns `True` if no errors occur. """ self._errors = None success = True for name, field in self._fields.iteritems(): if extra_validators is not None and name in extra_validators: extra = extra_validators[name] else: extra = tuple() if not field.validate(self, extra): success = False return success @property def data(self): return dict((name, f.data) for name, f in self._fields.iteritems()) @property def errors(self): if self._errors is None: self._errors = dict((name, f.errors) for name, f in self._fields.iteritems() if f.errors) return self._errors class FormMeta(type): """ The metaclass for `Form` and any subclasses of `Form`. `FormMeta`'s responsibility is to create the `_unbound_fields` list, which is a list of `UnboundField` instances sorted by their order of instantiation. The list is created at the first instantiation of the form. If any fields are added/removed from the form, the list is cleared to be re-generated on the next instantiaton. Any properties which begin with an underscore or are not `UnboundField` instances are ignored by the metaclass. """ def __init__(cls, name, bases, attrs): type.__init__(cls, name, bases, attrs) cls._unbound_fields = None def __call__(cls, *args, **kwargs): """ Construct a new `Form` instance, creating `_unbound_fields` on the class if it is empty. """ if cls._unbound_fields is None: fields = [] for name in dir(cls): if not name.startswith('_'): unbound_field = getattr(cls, name) if hasattr(unbound_field, '_formfield'): fields.append((name, unbound_field)) # We keep the name as the second element of the sort # to ensure a stable sort. fields.sort(key=lambda x: (x[1].creation_counter, x[0])) cls._unbound_fields = fields return type.__call__(cls, *args, **kwargs) def __setattr__(cls, name, value): """ Add an attribute to the class, clearing `_unbound_fields` if needed. """ if not name.startswith('_') and hasattr(value, '_formfield'): cls._unbound_fields = None type.__setattr__(cls, name, value) def __delattr__(cls, name): """ Remove an attribute from the class, clearing `_unbound_fields` if needed. """ if not name.startswith('_'): cls._unbound_fields = None type.__delattr__(cls, name) class Form(BaseForm): """ Declarative Form base class. Extends BaseForm's core behaviour allowing fields to be defined on Form subclasses as class attributes. In addition, form and instance input data are taken at construction time and passed to `process()`. """ __metaclass__ = FormMeta def __init__(self, formdata=None, obj=None, prefix='', **kwargs): """ :param formdata: Used to pass data coming from the enduser, usually `request.POST` or equivalent. :param obj: If `formdata` has no data for a field, the form will try to get it from the passed object. :param prefix: If provided, all fields will have their name prefixed with the value. :param `**kwargs`: If neither `formdata` or `obj` contains a value for a field, the form will assign the value of a matching keyword argument to the field, if provided. """ super(Form, self).__init__(self._unbound_fields, prefix=prefix) for name, field in self._fields.iteritems(): # Set all the fields to attributes so that they obscure the class # attributes with the same names. setattr(self, name, field) self.process(formdata, obj, **kwargs) def __iter__(self): """ Iterate form fields in their order of definition on the form. """ for name, _ in self._unbound_fields: if name in self._fields: yield self._fields[name] def __setitem__(self, name, value): raise TypeError('Fields may not be added to Form instances, only classes.') def __delitem__(self, name): del self._fields[name] setattr(self, name, None) def __delattr__(self, name): try: self.__delitem__(name) except KeyError: super(Form, self).__delattr__(name) def validate(self): """ Validates the form by calling `validate` on each field, passing any extra `Form.validate_<fieldname>` validators to the field validator. """ extra = {} for name in self._fields: inline = getattr(self.__class__, 'validate_%s' % name, None) if inline is not None: extra[name] = [inline] return super(Form, self).validate(extra) class WebobInputWrapper(object): """ Wrap a webob MultiDict for use as passing as `formdata` to Field. Since for consistency, we have decided in WTForms to support as input a small subset of the API provided in common between cgi.FieldStorage, Django's QueryDict, and Werkzeug's MultiDict, we need to wrap Webob, the only supported framework whose multidict does not fit this API, but is nevertheless used by a lot of frameworks. While we could write a full wrapper to support all the methods, this will undoubtedly result in bugs due to some subtle differences between the various wrappers. So we will keep it simple. """ def __init__(self, multidict): self._wrapped = multidict def __iter__(self): return iter(self._wrapped) def __len__(self): return len(self._wrapped) def __contains__(self, name): return (name in self._wrapped) def getlist(self, name): return self._wrapped.getall(name)
Python
from cgi import escape __all__ = ( 'CheckboxInput', 'FileInput', 'HiddenInput', 'ListWidget', 'PasswordInput', 'RadioInput', 'Select', 'SubmitInput', 'TableWidget', 'TextArea', 'TextInput', ) def html_params(**kwargs): """ Generate HTML parameters from inputted keyword arguments. The output value is sorted by the passed keys, to provide consistent output each time this function is called with the same parameters. Because of the frequent use of the normally reserved keywords `class` and `for`, suffixing these with an underscore will allow them to be used. >>> html_params(name='text1', id='f', class_='text') u'class="text" id="f" name="text1"' """ params = [] for k,v in sorted(kwargs.iteritems()): if k in ('class_', 'class__', 'for_'): k = k[:-1] params.append(u'%s="%s"' % (unicode(k), escape(unicode(v), quote=True))) return u' '.join(params) class HTMLString(unicode): def __html__(self): return self class ListWidget(object): """ Renders a list of fields as a `ul` or `ol` list. This is used for fields which encapsulate many inner fields as subfields. The widget will try to iterate the field to get access to the subfields and call them to render them. If `prefix_label` is set, the subfield's label is printed before the field, otherwise afterwards. The latter is useful for iterating radios or checkboxes. """ def __init__(self, html_tag='ul', prefix_label=True): assert html_tag in ('ol', 'ul') self.html_tag = html_tag self.prefix_label = prefix_label def __call__(self, field, **kwargs): kwargs.setdefault('id', field.id) html = [u'<%s %s>' % (self.html_tag, html_params(**kwargs))] for subfield in field: if self.prefix_label: html.append(u'<li>%s: %s</li>' % (subfield.label, subfield())) else: html.append(u'<li>%s %s</li>' % (subfield(), subfield.label)) html.append(u'</%s>' % self.html_tag) return HTMLString(u''.join(html)) class TableWidget(object): """ Renders a list of fields as a set of table rows with th/td pairs. If `with_table_tag` is True, then an enclosing <table> is placed around the rows. Hidden fields will not be displayed with a row, instead the field will be pushed into a subsequent table row to ensure XHTML validity. Hidden fields at the end of the field list will appear outside the table. """ def __init__(self, with_table_tag=True): self.with_table_tag = with_table_tag def __call__(self, field, **kwargs): html = [] if self.with_table_tag: kwargs.setdefault('id', field.id) html.append(u'<table %s>' % html_params(**kwargs)) hidden = u'' for subfield in field: if subfield.type == 'HiddenField': hidden += unicode(subfield) else: html.append(u'<tr><th>%s</th><td>%s%s</td></tr>' % (unicode(subfield.label), hidden, unicode(subfield))) hidden = u'' if self.with_table_tag: html.append(u'</table>') if hidden: html.append(hidden) return HTMLString(u''.join(html)) class Input(object): """ Render a basic ``<input>`` field. This is used as the basis for most of the other input fields. By default, the `_value()` method will be called upon the associated field to provide the ``value=`` HTML attribute. """ def __init__(self, input_type=None): if input_type is not None: self.input_type = input_type def __call__(self, field, **kwargs): kwargs.setdefault('id', field.id) kwargs.setdefault('type', self.input_type) if 'value' not in kwargs: kwargs['value'] = field._value() return HTMLString(u'<input %s />' % html_params(name=field.name, **kwargs)) class TextInput(Input): """ Render a single-line text input. """ input_type = 'text' class PasswordInput(Input): """ Render a password input. For security purposes, this field will not reproduce the value on a form submit by default. To have the value filled in, set `hide_value` to `False`. """ input_type = 'password' def __init__(self, hide_value=True): self.hide_value = hide_value def __call__(self, field, **kwargs): if self.hide_value: kwargs['value'] = '' return super(PasswordInput, self).__call__(field, **kwargs) class HiddenInput(Input): """ Render a hidden input. """ input_type = 'hidden' class CheckboxInput(Input): """ Render a checkbox. The ``checked`` HTML attribute is set if the field's data is a non-false value. """ input_type = 'checkbox' def __call__(self, field, **kwargs): if getattr(field, 'checked', field.data): kwargs['checked'] = u'checked' return super(CheckboxInput, self).__call__(field, **kwargs) class RadioInput(Input): """ Render a single radio button. This widget is most commonly used in conjunction with ListWidget or some other listing, as singular radio buttons are not very useful. """ input_type = 'radio' def __call__(self, field, **kwargs): if field.checked: kwargs['checked'] = u'checked' return super(RadioInput, self).__call__(field, **kwargs) class FileInput(object): """ Renders a file input chooser field. """ def __call__(self, field, **kwargs): kwargs.setdefault('id', field.id) value = field._value() if value: kwargs.setdefault('value', value) return HTMLString(u'<input %s />' % html_params(name=field.name, type=u'file', **kwargs)) class SubmitInput(Input): """ Renders a submit button. The field's label is used as the text of the submit button instead of the data on the field. """ input_type = 'submit' def __call__(self, field, **kwargs): kwargs.setdefault('value', field.label.text) return super(SubmitInput, self).__call__(field, **kwargs) class TextArea(object): """ Renders a multi-line text area. `rows` and `cols` ought to be passed as keyword args when rendering. """ def __call__(self, field, **kwargs): kwargs.setdefault('id', field.id) return HTMLString(u'<textarea %s>%s</textarea>' % (html_params(name=field.name, **kwargs), escape(unicode(field._value())))) class Select(object): """ Renders a select field. If `multiple` is True, then the `size` property should be specified on rendering to make the field useful. The field must provide an `iter_choices()` method which the widget will call on rendering; this method must yield tuples of `(value, label, selected)`. """ def __init__(self, multiple=False): self.multiple = multiple def __call__(self, field, **kwargs): kwargs.setdefault('id', field.id) if self.multiple: kwargs['multiple'] = 'multiple' html = [u'<select %s>' % html_params(name=field.name, **kwargs)] for val, label, selected in field.iter_choices(): html.append(self.render_option(val, label, selected)) html.append(u'</select>') return HTMLString(u''.join(html)) @classmethod def render_option(cls, value, label, selected): options = {'value': value} if selected: options['selected'] = u'selected' return HTMLString(u'<option %s>%s</option>' % (html_params(**options), escape(unicode(label)))) class Option(object): """ Renders the individual option from a select field. This is just a convenience for various custom rendering situations, and an option by itself does not constitute an entire field. """ def __call__(self, field, **kwargs): return Select.render_option(field._value(), field.label.text, field.checked)
Python
import datetime import decimal import itertools import time from wtforms import widgets from wtforms.validators import StopValidation __all__ = ( 'BooleanField', 'DecimalField', 'DateField', 'DateTimeField', 'FieldList', 'FileField', 'FloatField', 'FormField', 'HiddenField', 'IntegerField', 'PasswordField', 'RadioField', 'SelectField', 'SelectMultipleField', 'SubmitField', 'TextField', 'TextAreaField', ) _unset_value = object() class DummyTranslations(object): def gettext(self, string): return string def ngettext(self, singular, plural, n): if n == 1: return singular return plural class Field(object): """ Field base class """ widget = None errors = tuple() process_errors = tuple() _formfield = True _translations = DummyTranslations() def __new__(cls, *args, **kwargs): if '_form' in kwargs and '_name' in kwargs: return super(Field, cls).__new__(cls) else: return UnboundField(cls, *args, **kwargs) def __init__(self, label=None, validators=None, filters=tuple(), description=u'', id=None, default=None, widget=None, _form=None, _name=None, _prefix='', _translations=None): """ Construct a new field. :param label: The label of the field. :param validators: A sequence of validators to call when `validate` is called. :param filters: A sequence of filters which are run on input data by `process`. :param description: A description for the field, typically used for help text. :param id: An id to use for the field. A reasonable default is set by the form, and you shouldn't need to set this manually. :param default: The default value to assign to the field, if no form or object input is provided. May be a callable. :param widget: If provided, overrides the widget used to render the field. :param _form: The form holding this field. It is passed by the form itself during construction. You should never pass this value yourself. :param _name: The name of this field, passed by the enclosing form during its construction. You should never pass this value yourself. :param _prefix: The prefix to prepend to the form name of this field, passed by the enclosing form during construction. If `_form` and `_name` isn't provided, an :class:`UnboundField` will be returned instead. Call its :func:`bind` method with a form instance and a name to construct the field. """ self.short_name = _name self.name = _prefix + _name if _translations is not None: self._translations = _translations self.id = id or self.name if label is None: label = _name.replace('_', ' ').title() self.label = Label(self.id, label) if validators is None: validators = [] self.validators = validators self.filters = filters self.description = description self.type = type(self).__name__ self.default = default self.raw_data = None if widget: self.widget = widget self.flags = Flags() for v in validators: flags = getattr(v, 'field_flags', ()) for f in flags: setattr(self.flags, f, True) def __unicode__(self): """ Returns a HTML representation of the field. For more powerful rendering, see the `__call__` method. """ return self() def __str__(self): """ Returns a HTML representation of the field. For more powerful rendering, see the `__call__` method. """ return self() def __html__(self): """ Returns a HTML representation of the field. For more powerful rendering, see the `__call__` method. """ return self() def __call__(self, **kwargs): """ Render this field as HTML, using keyword args as additional attributes. Any HTML attribute passed to the method will be added to the tag and entity-escaped properly. """ return self.widget(self, **kwargs) def gettext(self, string): return self._translations.gettext(string) def ngettext(self, singular, plural, n): return self._translations.ngettext(singular, plural, n) def validate(self, form, extra_validators=tuple()): """ Validates the field and returns True or False. `self.errors` will contain any errors raised during validation. This is usually only called by `Form.validate`. Subfields shouldn't override this, but rather override either `pre_validate`, `post_validate` or both, depending on needs. :param form: The form the field belongs to. :param extra_validators: A list of extra validators to run. """ self.errors = list(self.process_errors) stop_validation = False # Call pre_validate try: self.pre_validate(form) except StopValidation, e: if e.args and e.args[0]: self.errors.append(e.args[0]) stop_validation = True except ValueError, e: self.errors.append(e.args[0]) # Run validators if not stop_validation: for validator in itertools.chain(self.validators, extra_validators): try: validator(form, self) except StopValidation, e: if e.args and e.args[0]: self.errors.append(e.args[0]) stop_validation = True break except ValueError, e: self.errors.append(e.args[0]) # Call post_validate try: self.post_validate(form, stop_validation) except ValueError, e: self.errors.append(e.args[0]) return len(self.errors) == 0 def pre_validate(self, form): """ Override if you need field-level validation. Runs before any other validators. :param form: The form the field belongs to. """ pass def post_validate(self, form, validation_stopped): """ Override if you need to run any field-level validation tasks after normal validation. This shouldn't be needed in most cases. :param form: The form the field belongs to. :param validation_stopped: `True` if any validator raised StopValidation. """ pass def process(self, formdata, data=_unset_value): """ Process incoming data, calling process_data, process_formdata as needed, and run filters. If `data` is not provided, process_data will be called on the field's default. Field subclasses usually won't override this, instead overriding the process_formdata and process_data methods. Only override this for special advanced processing, such as when a field encapsulates many inputs. """ self.process_errors = [] if data is _unset_value: try: data = self.default() except TypeError: data = self.default try: self.process_data(data) except ValueError, e: self.process_errors.append(e.args[0]) if formdata: try: if self.name in formdata: self.raw_data = formdata.getlist(self.name) else: self.raw_data = [] self.process_formdata(self.raw_data) except ValueError, e: self.process_errors.append(e.args[0]) for filter in self.filters: try: self.data = filter(self.data) except ValueError, e: self.process_errors.append(e.args[0]) def process_data(self, value): """ Process the Python data applied to this field and store the result. This will be called during form construction by the form's `kwargs` or `obj` argument. :param value: The python object containing the value to process. """ self.data = value def process_formdata(self, valuelist): """ Process data received over the wire from a form. This will be called during form construction with data supplied through the `formdata` argument. :param valuelist: A list of strings to process. """ if valuelist: self.data = valuelist[0] def populate_obj(self, obj, name): """ Populates `obj.<name>` with the field's data. :note: This is a destructive operation. If `obj.<name>` already exists, it will be overridden. Use with caution. """ setattr(obj, name, self.data) class UnboundField(object): _formfield = True creation_counter = 0 def __init__(self, field_class, *args, **kwargs): UnboundField.creation_counter += 1 self.field_class = field_class self.args = args self.kwargs = kwargs self.creation_counter = UnboundField.creation_counter def bind(self, form, name, prefix='', translations=None, **kwargs): return self.field_class(_form=form, _prefix=prefix, _name=name, _translations=translations, *self.args, **dict(self.kwargs, **kwargs)) def __repr__(self): return '<UnboundField(%s, %r, %r)>' % (self.field_class.__name__, self.args, self.kwargs) class Flags(object): """ Holds a set of boolean flags as attributes. Accessing a non-existing attribute returns False for its value. """ def __getattr__(self, name): return False def __contains__(self, name): return getattr(self, name) def __repr__(self): flags = (name for name in dir(self) if not name.startswith('_')) return '<wtforms.fields.Flags: {%s}>' % ', '.join(flags) class Label(object): """ An HTML form label. """ def __init__(self, field_id, text): self.field_id = field_id self.text = text def __str__(self): return self() def __unicode__(self): return self() def __html__(self): return self() def __call__(self, text=None, **kwargs): kwargs['for'] = self.field_id attributes = widgets.html_params(**kwargs) return widgets.HTMLString(u'<label %s>%s</label>' % (attributes, text or self.text)) def __repr__(self): return 'Label(%r, %r)' % (self.field_id, self.text) class SelectFieldBase(Field): option_widget = widgets.Option() """ Base class for fields which can be iterated to produce options. This isn't a field, but an abstract base class for fields which want to provide this functionality. """ def __init__(self, label=None, validators=None, option_widget=None, **kwargs): super(SelectFieldBase, self).__init__(label, validators, **kwargs) if option_widget is not None: self.option_widget = option_widget def iter_choices(self): """ Provides data for choice widget rendering. Must return a sequence or iterable of (value, label, selected) tuples. """ raise NotImplementedError() def __iter__(self): opts = dict(widget=self.option_widget, _name=self.name, _form=None) for i, (value, label, checked) in enumerate(self.iter_choices()): opt = self._Option(label=label, id=u'%s-%d' % (self.id, i), **opts) opt.process(None, value) opt.checked = checked yield opt class _Option(Field): checked = False def _value(self): return self.data class SelectField(SelectFieldBase): widget = widgets.Select() def __init__(self, label=None, validators=None, coerce=unicode, choices=None, **kwargs): super(SelectField, self).__init__(label, validators, **kwargs) self.coerce = coerce self.choices = choices def iter_choices(self): for value, label in self.choices: yield (value, label, self.coerce(value) == self.data) def process_data(self, value): try: self.data = self.coerce(value) except (ValueError, TypeError): self.data = None def process_formdata(self, valuelist): if valuelist: try: self.data = self.coerce(valuelist[0]) except ValueError: raise ValueError(self.gettext(u'Invalid Choice: could not coerce')) def pre_validate(self, form): for v, _ in self.choices: if self.data == v: break else: raise ValueError(self.gettext(u'Not a valid choice')) class SelectMultipleField(SelectField): """ No different from a normal select field, except this one can take (and validate) multiple choices. You'll need to specify the HTML `rows` attribute to the select field when rendering. """ widget = widgets.Select(multiple=True) def iter_choices(self): for value, label in self.choices: selected = self.data is not None and self.coerce(value) in self.data yield (value, label, selected) def process_data(self, value): try: self.data = list(self.coerce(v) for v in value) except (ValueError, TypeError): self.data = None def process_formdata(self, valuelist): try: self.data = list(self.coerce(x) for x in valuelist) except ValueError: raise ValueError(self.gettext(u'Invalid choice(s): one or more data inputs could not be coerced')) def pre_validate(self, form): if self.data: values = list(c[0] for c in self.choices) for d in self.data: if d not in values: raise ValueError(self.gettext(u"'%(value)s' is not a valid choice for this field") % dict(value=d)) class RadioField(SelectField): """ Like a SelectField, except displays a list of radio buttons. Iterating the field will produce subfields (each containing a label as well) in order to allow custom rendering of the individual radio fields. """ widget = widgets.ListWidget(prefix_label=False) option_widget = widgets.RadioInput() class TextField(Field): """ This field is the base for most of the more complicated fields, and represents an ``<input type="text">``. """ widget = widgets.TextInput() def process_formdata(self, valuelist): if valuelist: self.data = valuelist[0] else: self.data = u'' def _value(self): return self.data is not None and unicode(self.data) or u'' class HiddenField(TextField): """ Represents an ``<input type="hidden">``. """ widget = widgets.HiddenInput() class TextAreaField(TextField): """ This field represents an HTML ``<textarea>`` and can be used to take multi-line input. """ widget = widgets.TextArea() class PasswordField(TextField): """ Represents an ``<input type="password">``. """ widget = widgets.PasswordInput() class FileField(TextField): """ Can render a file-upload field. Will take any passed filename value, if any is sent by the browser in the post params. This field will NOT actually handle the file upload portion, as wtforms does not deal with individual frameworks' file handling capabilities. """ widget = widgets.FileInput() class IntegerField(TextField): """ A text field, except all input is coerced to an integer. Erroneous input is ignored and will not be accepted as a value. """ def __init__(self, label=None, validators=None, **kwargs): super(IntegerField, self).__init__(label, validators, **kwargs) def _value(self): if self.raw_data: return self.raw_data[0] elif self.data is not None: return unicode(self.data) else: return u'' def process_formdata(self, valuelist): if valuelist: try: self.data = int(valuelist[0]) except ValueError: raise ValueError(self.gettext(u'Not a valid integer value')) class DecimalField(TextField): """ A text field which displays and coerces data of the `decimal.Decimal` type. :param places: How many decimal places to quantize the value to for display on form. If None, does not quantize value. :param rounding: How to round the value during quantize, for example `decimal.ROUND_UP`. If unset, uses the rounding value from the current thread's context. """ def __init__(self, label=None, validators=None, places=2, rounding=None, **kwargs): super(DecimalField, self).__init__(label, validators, **kwargs) self.places = places self.rounding = rounding def _value(self): if self.raw_data: return self.raw_data[0] elif self.data is not None: if self.places is not None: if hasattr(self.data, 'quantize'): exp = decimal.Decimal('.1') ** self.places quantized = self.data.quantize(exp, rounding=self.rounding) return unicode(quantized) else: # If for some reason, data is a float or int, then format # as we would for floats using string formatting. format = u'%%0.%df' % self.places return format % self.data else: return unicode(self.data) else: return u'' def process_formdata(self, valuelist): if valuelist: try: self.data = decimal.Decimal(valuelist[0]) except (decimal.InvalidOperation, ValueError): raise ValueError(self.gettext(u'Not a valid decimal value')) class FloatField(TextField): """ A text field, except all input is coerced to an float. Erroneous input is ignored and will not be accepted as a value. """ def __init__(self, label=None, validators=None, **kwargs): super(FloatField, self).__init__(label, validators, **kwargs) def _value(self): if self.raw_data: return self.raw_data[0] elif self.data is not None: return unicode(self.data) else: return u'' def process_formdata(self, valuelist): if valuelist: try: self.data = float(valuelist[0]) except ValueError: raise ValueError(self.gettext(u'Not a valid float value')) class BooleanField(Field): """ Represents an ``<input type="checkbox">``. """ widget = widgets.CheckboxInput() def __init__(self, label=None, validators=None, **kwargs): super(BooleanField, self).__init__(label, validators, **kwargs) def process_data(self, value): self.data = bool(value) def process_formdata(self, valuelist): self.data = bool(valuelist) def _value(self): if self.raw_data: return unicode(self.raw_data[0]) else: return u'y' class DateTimeField(Field): """ A text field which stores a `datetime.datetime` matching a format. """ widget = widgets.TextInput() def __init__(self, label=None, validators=None, format='%Y-%m-%d %H:%M:%S', **kwargs): super(DateTimeField, self).__init__(label, validators, **kwargs) self.format = format def _value(self): if self.raw_data: return u' '.join(self.raw_data) else: return self.data and self.data.strftime(self.format) or u'' def process_formdata(self, valuelist): if valuelist: date_str = u' '.join(valuelist) try: timetuple = time.strptime(date_str, self.format) self.data = datetime.datetime(*timetuple[:6]) except ValueError: self.data = None raise class DateField(DateTimeField): """ Same as DateTimeField, except stores a `datetime.date`. """ def __init__(self, label=None, validators=None, format='%Y-%m-%d', **kwargs): super(DateField, self).__init__(label, validators, format, **kwargs) def process_formdata(self, valuelist): if valuelist: date_str = u' '.join(valuelist) try: timetuple = time.strptime(date_str, self.format) self.data = datetime.date(*timetuple[:3]) except ValueError: self.data = None raise class SubmitField(BooleanField): """ Represents an ``<input type="submit">``. This allows checking if a given submit button has been pressed. """ widget = widgets.SubmitInput() class FormField(Field): """ Encapsulate a form as a field in another form. :param form_class: A subclass of Form that will be encapsulated. :param separator: A string which will be suffixed to this field's name to create the prefix to enclosed fields. The default is fine for most uses. """ widget = widgets.TableWidget() def __init__(self, form_class, label=None, validators=None, separator='-', **kwargs): super(FormField, self).__init__(label, validators, **kwargs) self.form_class = form_class self.separator = separator self._obj = None if self.filters: raise TypeError('FormField cannot take filters, as the encapsulated data is not mutable.') if validators: raise TypeError('FormField does not accept any validators. Instead, define them on the enclosed form.') def process(self, formdata, data=_unset_value): if data is _unset_value: try: data = self.default() except TypeError: data = self.default self._obj = data prefix = self.name + self.separator if isinstance(data, dict): self.form = self.form_class(formdata=formdata, prefix=prefix, **data) else: self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix) def validate(self, form, extra_validators=tuple()): if extra_validators: raise TypeError('FormField does not accept in-line validators, as it gets errors from the enclosed form.') return self.form.validate() def populate_obj(self, obj, name): candidate = getattr(obj, name, None) if candidate is None: if self._obj is None: raise TypeError('populate_obj: cannot find a value to populate from the provided obj or input data/defaults') candidate = self._obj setattr(obj, name, candidate) self.form.populate_obj(candidate) def __iter__(self): return iter(self.form) def __getitem__(self, name): return self.form[name] def __getattr__(self, name): return getattr(self.form, name) @property def data(self): return self.form.data @property def errors(self): return self.form.errors class FieldList(Field): """ Encapsulate an ordered list of multiple instances of the same field type, keeping data as a list. >>> authors = FieldList(TextField('Name', [validators.required()])) :param unbound_field: A partially-instantiated field definition, just like that would be defined on a form directly. :param min_entries: if provided, always have at least this many entries on the field, creating blank ones if the provided input does not specify a sufficient amount. :param max_entries: accept no more than this many entries as input, even if more exist in formdata. """ widget=widgets.ListWidget() def __init__(self, unbound_field, label=None, validators=None, min_entries=0, max_entries=None, default=tuple(), **kwargs): super(FieldList, self).__init__(label, validators, default=default, **kwargs) if self.filters: raise TypeError('FieldList does not accept any filters. Instead, define them on the enclosed field.') if validators: raise TypeError('FieldList does not accept any validators. Instead, define them on the enclosed field.') assert isinstance(unbound_field, UnboundField), 'Field must be unbound, not a field class' self.unbound_field = unbound_field self.min_entries = min_entries self.max_entries = max_entries self.last_index = -1 self._prefix = kwargs.get('_prefix', '') def process(self, formdata, data=_unset_value): self.entries = [] if data is _unset_value or not data: try: data = self.default() except TypeError: data = self.default if formdata: indices = sorted(set(self._extract_indices(self.name, formdata))) if self.max_entries: indices = indices[:self.max_entries] idata = iter(data) for index in indices: try: obj_data = idata.next() except StopIteration: obj_data = _unset_value self._add_entry(formdata, obj_data, index=index) else: for obj_data in data: self._add_entry(formdata, obj_data) while len(self.entries) < self.min_entries: self._add_entry(formdata) def _extract_indices(self, prefix, formdata): """ Yield indices of any keys with given prefix. formdata must be an object which will produce keys when iterated. For example, if field 'foo' contains keys 'foo-0-bar', 'foo-1-baz', then the numbers 0 and 1 will be yielded, but not neccesarily in order. """ offset = len(prefix) + 1 for k in formdata: if k.startswith(prefix): k = k[offset:].split('-', 1)[0] if k.isdigit(): yield int(k) def validate(self, form, extra_validators=tuple()): self.errors = [] success = True for subfield in self.entries: if not subfield.validate(form): success = False self.errors.append(subfield.errors) return success def populate_obj(self, obj, name): values = getattr(obj, name, None) try: ivalues = iter(values) except TypeError: ivalues = iter([]) candidates = itertools.chain(ivalues, itertools.repeat(None)) _fake = type('_fake', (object, ), {}) output = [] for field, data in itertools.izip(self.entries, candidates): fake_obj = _fake() fake_obj.data = data field.populate_obj(fake_obj, 'data') output.append(fake_obj.data) setattr(obj, name, output) def _add_entry(self, formdata=None, data=_unset_value, index=None): assert not self.max_entries or len(self.entries) < self.max_entries, \ 'You cannot have more than max_entries entries in this FieldList' new_index = self.last_index = index or (self.last_index + 1) name = '%s-%d' % (self.short_name, new_index) id = '%s-%d' % (self.id, new_index) field = self.unbound_field.bind(form=None, name=name, prefix=self._prefix, id=id) field.process(formdata, data) self.entries.append(field) return field def append_entry(self, data=_unset_value): """ Create a new entry with optional default data. Entries added in this way will *not* receive formdata however, and can only receive object data. """ return self._add_entry(data=data) def pop_entry(self): """ Removes the last entry from the list and returns it. """ entry = self.entries.pop() self.last_index -= 1 return entry def __iter__(self): return iter(self.entries) def __len__(self): return len(self.entries) def __getitem__(self, index): return self.entries[index] @property def data(self): return [f.data for f in self.entries]
Python
import decimal from wtforms import fields, widgets class ReferencePropertyField(fields.SelectFieldBase): """ A field for ``db.ReferenceProperty``. The list items are rendered in a select. :param reference_class: A db.Model class which will be used to generate the default query to make the list of items. If this is not specified, The `query` property must be overridden before validation. :param label_attr: If specified, use this attribute on the model class as the label associated with each option. Otherwise, the model object's `__str__` or `__unicode__` will be used. :param allow_blank: If set to true, a blank choice will be added to the top of the list to allow `None` to be chosen. :param blank_text: Use this to override the default blank option's label. """ widget = widgets.Select() def __init__(self, label=None, validators=None, reference_class=None, label_attr=None, allow_blank=False, blank_text=u'', **kwargs): super(ReferencePropertyField, self).__init__(label, validators, **kwargs) self.label_attr = label_attr self.allow_blank = allow_blank self.blank_text = blank_text self._set_data(None) if reference_class is not None: self.query = reference_class.all() def _get_data(self): if self._formdata is not None: for obj in self.query: if str(obj.key()) == self._formdata: self._set_data(obj) break return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def iter_choices(self): if self.allow_blank: yield (u'__None', self.blank_text, self.data is None) for obj in self.query: key = str(obj.key()) label = self.label_attr and getattr(obj, self.label_attr) or obj yield (key, label, self.data and ( self.data.key( ) == obj.key() ) ) def process_formdata(self, valuelist): if valuelist: if valuelist[0] == '__None': self.data = None else: self._data = None self._formdata = valuelist[0] def pre_validate(self, form): if not self.allow_blank or self.data is not None: for obj in self.query: if str(self.data.key()) == str(obj.key()): break else: raise ValueError(self.gettext(u'Not a valid choice')) class StringListPropertyField(fields.TextAreaField): """ A field for ``db.StringListProperty``. The list items are rendered in a textarea. """ def _value(self): if self.raw_data: return self.raw_data[0] else: return self.data and unicode("\n".join(self.data)) or u'' def process_formdata(self, valuelist): if valuelist: try: self.data = valuelist[0].splitlines() except ValueError: raise ValueError(self.gettext(u'Not a valid list')) class GeoPtPropertyField(fields.TextField): def process_formdata(self, valuelist): if valuelist: try: lat, lon = valuelist[0].split(',') self.data = u'%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),) except (decimal.InvalidOperation, ValueError): raise ValueError(u'Not a valid coordinate location')
Python
""" Form generation utilities for App Engine's ``db.Model`` class. The goal of ``model_form()`` is to provide a clean, explicit and predictable way to create forms based on ``db.Model`` classes. No malabarism or black magic should be necessary to generate a form for models, and to add custom non-model related fields: ``model_form()`` simply generates a form class that can be used as it is, or that can be extended directly or even be used to create other forms using ``model_form()``. Example usage: .. code-block:: python from google.appengine.ext import db from tipfy.ext.model.form import model_form # Define an example model and add a record. class Contact(db.Model): name = db.StringProperty(required=True) city = db.StringProperty() age = db.IntegerProperty(required=True) is_admin = db.BooleanProperty(default=False) new_entity = Contact(key_name='test', name='Test Name', age=17) new_entity.put() # Generate a form based on the model. ContactForm = model_form(Contact) # Get a form populated with entity data. entity = Contact.get_by_key_name('test') form = ContactForm(obj=entity) Properties from the model can be excluded from the generated form, or it can include just a set of properties. For example: .. code-block:: python # Generate a form based on the model, excluding 'city' and 'is_admin'. ContactForm = model_form(Contact, exclude=('city', 'is_admin')) # or... # Generate a form based on the model, only including 'name' and 'age'. ContactForm = model_form(Contact, only=('name', 'age')) The form can be generated setting field arguments: .. code-block:: python ContactForm = model_form(Contact, only=('name', 'age'), field_args={ 'name': { 'label': 'Full name', 'description': 'Your name', }, 'age': { 'label': 'Age', 'validators': [validators.NumberRange(min=14, max=99)], } }) The class returned by ``model_form()`` can be used as a base class for forms mixing non-model fields and/or other model forms. For example: .. code-block:: python # Generate a form based on the model. BaseContactForm = model_form(Contact) # Generate a form based on other model. ExtraContactForm = model_form(MyOtherModel) class ContactForm(BaseContactForm): # Add an extra, non-model related field. subscribe_to_news = f.BooleanField() # Add the other model form as a subform. extra = f.FormField(ExtraContactForm) The class returned by ``model_form()`` can also extend an existing form class: .. code-block:: python class BaseContactForm(Form): # Add an extra, non-model related field. subscribe_to_news = f.BooleanField() # Generate a form based on the model. ContactForm = model_form(Contact, base_class=BaseContactForm) """ from wtforms import Form, validators, widgets, fields as f from wtforms.ext.appengine.fields import GeoPtPropertyField, ReferencePropertyField, StringListPropertyField def get_TextField(kwargs): """ Returns a ``TextField``, applying the ``db.StringProperty`` length limit of 500 bytes. """ kwargs['validators'].append(validators.length(max=500)) return f.TextField(**kwargs) def get_IntegerField(kwargs): """ Returns an ``IntegerField``, applying the ``db.IntegerProperty`` range limits. """ v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff) kwargs['validators'].append(v) return f.IntegerField(**kwargs) def convert_StringProperty(model, prop, kwargs): """Returns a form field for a ``db.StringProperty``.""" if prop.multiline: kwargs['validators'].append(validators.length(max=500)) return f.TextAreaField(**kwargs) else: return get_TextField(kwargs) def convert_ByteStringProperty(model, prop, kwargs): """Returns a form field for a ``db.ByteStringProperty``.""" return get_TextField(kwargs) def convert_BooleanProperty(model, prop, kwargs): """Returns a form field for a ``db.BooleanProperty``.""" return f.BooleanField(**kwargs) def convert_IntegerProperty(model, prop, kwargs): """Returns a form field for a ``db.IntegerProperty``.""" return get_IntegerField(kwargs) def convert_FloatProperty(model, prop, kwargs): """Returns a form field for a ``db.FloatProperty``.""" return f.FloatField(**kwargs) def convert_DateTimeProperty(model, prop, kwargs): """Returns a form field for a ``db.DateTimeProperty``.""" if prop.auto_now or prop.auto_now_add: return None return f.DateTimeField(format='%Y-%m-%d %H:%M:%S', **kwargs) def convert_DateProperty(model, prop, kwargs): """Returns a form field for a ``db.DateProperty``.""" if prop.auto_now or prop.auto_now_add: return None return f.DateField(format='%Y-%m-%d', **kwargs) def convert_TimeProperty(model, prop, kwargs): """Returns a form field for a ``db.TimeProperty``.""" if prop.auto_now or prop.auto_now_add: return None return f.DateTimeField(format='%H:%M:%S', **kwargs) def convert_ListProperty(model, prop, kwargs): """Returns a form field for a ``db.ListProperty``.""" return None def convert_StringListProperty(model, prop, kwargs): """Returns a form field for a ``db.StringListProperty``.""" return StringListPropertyField(**kwargs) def convert_ReferenceProperty(model, prop, kwargs): """Returns a form field for a ``db.ReferenceProperty``.""" kwargs['reference_class'] = prop.reference_class return ReferencePropertyField(**kwargs) def convert_SelfReferenceProperty(model, prop, kwargs): """Returns a form field for a ``db.SelfReferenceProperty``.""" return None def convert_UserProperty(model, prop, kwargs): """Returns a form field for a ``db.UserProperty``.""" return None def convert_BlobProperty(model, prop, kwargs): """Returns a form field for a ``db.BlobProperty``.""" return f.FileField(**kwargs) def convert_TextProperty(model, prop, kwargs): """Returns a form field for a ``db.TextProperty``.""" return f.TextAreaField(**kwargs) def convert_CategoryProperty(model, prop, kwargs): """Returns a form field for a ``db.CategoryProperty``.""" return get_TextField(kwargs) def convert_LinkProperty(model, prop, kwargs): """Returns a form field for a ``db.LinkProperty``.""" kwargs['validators'].append(validators.url()) return get_TextField(kwargs) def convert_EmailProperty(model, prop, kwargs): """Returns a form field for a ``db.EmailProperty``.""" kwargs['validators'].append(validators.email()) return get_TextField(kwargs) def convert_GeoPtProperty(model, prop, kwargs): """Returns a form field for a ``db.GeoPtProperty``.""" return GeoPtPropertyField(**kwargs) def convert_IMProperty(model, prop, kwargs): """Returns a form field for a ``db.IMProperty``.""" return None def convert_PhoneNumberProperty(model, prop, kwargs): """Returns a form field for a ``db.PhoneNumberProperty``.""" return get_TextField(kwargs) def convert_PostalAddressProperty(model, prop, kwargs): """Returns a form field for a ``db.PostalAddressProperty``.""" return get_TextField(kwargs) def convert_RatingProperty(model, prop, kwargs): """Returns a form field for a ``db.RatingProperty``.""" kwargs['validators'].append(validators.NumberRange(min=0, max=100)) return f.IntegerField(**kwargs) class ModelConverter(object): """ Converts properties from a ``db.Model`` class to form fields. Default conversions between properties and fields: +====================+===================+==============+==================+ | Property subclass | Field subclass | datatype | notes | +====================+===================+==============+==================+ | StringProperty | TextField | unicode | TextArea | | | | | if multiline | +--------------------+-------------------+--------------+------------------+ | ByteStringProperty | TextField | str | | +--------------------+-------------------+--------------+------------------+ | BooleanProperty | BooleanField | bool | | +--------------------+-------------------+--------------+------------------+ | IntegerProperty | IntegerField | int or long | | +--------------------+-------------------+--------------+------------------+ | FloatProperty | TextField | float | | +--------------------+-------------------+--------------+------------------+ | DateTimeProperty | DateTimeField | datetime | skipped if | | | | | auto_now[_add] | +--------------------+-------------------+--------------+------------------+ | DateProperty | DateField | date | skipped if | | | | | auto_now[_add] | +--------------------+-------------------+--------------+------------------+ | TimeProperty | DateTimeField | time | skipped if | | | | | auto_now[_add] | +--------------------+-------------------+--------------+------------------+ | ListProperty | None | list | always skipped | +--------------------+-------------------+--------------+------------------+ | StringListProperty | TextAreaField | list of str | | +--------------------+-------------------+--------------+------------------+ | ReferenceProperty | ReferencePropertyF| db.Model | | +--------------------+-------------------+--------------+------------------+ | SelfReferenceP. | ReferencePropertyF| db.Model | | +--------------------+-------------------+--------------+------------------+ | UserProperty | None | users.User | always skipped | +--------------------+-------------------+--------------+------------------+ | BlobProperty | FileField | str | | +--------------------+-------------------+--------------+------------------+ | TextProperty | TextAreaField | unicode | | +--------------------+-------------------+--------------+------------------+ | CategoryProperty | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | LinkProperty | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | EmailProperty | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | GeoPtProperty | TextField | db.GeoPt | | +--------------------+-------------------+--------------+------------------+ | IMProperty | None | db.IM | always skipped | +--------------------+-------------------+--------------+------------------+ | PhoneNumberProperty| TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | PostalAddressP. | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | RatingProperty | IntegerField | int or long | | +--------------------+-------------------+--------------+------------------+ | _ReverseReferenceP.| None | <iterable> | always skipped | +====================+===================+==============+==================+ """ default_converters = { 'StringProperty': convert_StringProperty, 'ByteStringProperty': convert_ByteStringProperty, 'BooleanProperty': convert_BooleanProperty, 'IntegerProperty': convert_IntegerProperty, 'FloatProperty': convert_FloatProperty, 'DateTimeProperty': convert_DateTimeProperty, 'DateProperty': convert_DateProperty, 'TimeProperty': convert_TimeProperty, 'ListProperty': convert_ListProperty, 'StringListProperty': convert_StringListProperty, 'ReferenceProperty': convert_ReferenceProperty, 'SelfReferenceProperty': convert_SelfReferenceProperty, 'UserProperty': convert_UserProperty, 'BlobProperty': convert_BlobProperty, 'TextProperty': convert_TextProperty, 'CategoryProperty': convert_CategoryProperty, 'LinkProperty': convert_LinkProperty, 'EmailProperty': convert_EmailProperty, 'GeoPtProperty': convert_GeoPtProperty, 'IMProperty': convert_IMProperty, 'PhoneNumberProperty': convert_PhoneNumberProperty, 'PostalAddressProperty': convert_PostalAddressProperty, 'RatingProperty': convert_RatingProperty, } def __init__(self, converters=None): """ Constructs the converter, setting the converter callables. :param converters: A dictionary of converter callables for each property type. The callable must accept the arguments (model, prop, kwargs). """ self.converters = converters or self.default_converters def convert(self, model, prop, field_args): """ Returns a form field for a single model property. :param model: The ``db.Model`` class that contains the property. :param prop: The model property: a ``db.Property`` instance. :param field_args: Optional keyword arguments to construct the field. """ kwargs = { 'label': prop.name.replace('_', ' ').title(), 'default': prop.default_value(), 'validators': [], } if field_args: kwargs.update(field_args) if prop.required: kwargs['validators'].append(validators.required()) if prop.choices: # Use choices in a select field. kwargs['choices'] = [(v, v) for v in prop.choices] return f.SelectField(**kwargs) else: converter = self.converters.get(type(prop).__name__, None) if converter is not None: return converter(model, prop, kwargs) def model_fields(model, only=None, exclude=None, field_args=None, converter=None): """ Extracts and returns a dictionary of form fields for a given ``db.Model`` class. :param model: The ``db.Model`` class to extract fields from. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to a keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ converter = converter or ModelConverter() field_args = field_args or {} # Get the field names we want to include or exclude, starting with the # full list of model properties. props = model.properties() field_names = props.keys() if only: field_names = list(f for f in only if f in field_names) elif exclude: field_names = list(f for f in field_names if f not in exclude) # Create all fields. field_dict = {} for name in field_names: field = converter.convert(model, props[name], field_args.get(name)) if field is not None: field_dict[name] = field return field_dict def model_form(model, base_class=Form, only=None, exclude=None, field_args=None, converter=None): """ Creates and returns a dynamic ``wtforms.Form`` class for a given ``db.Model`` class. The form class can be used as it is or serve as a base for extended form classes, which can then mix non-model related fields, subforms with other model forms, among other possibilities. :param model: The ``db.Model`` class to generate a form for. :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ # Extract the fields from the model. field_dict = model_fields(model, only, exclude, field_args, converter) # Return a dynamically created form class, extending from base_class and # including the created fields as properties. return type(model.kind() + 'Form', (base_class,), field_dict)
Python
""" Tools for generating forms based on SQLAlchemy models. """ import inspect from wtforms import fields as f from wtforms import validators from wtforms.form import Form __all__ = ( 'model_fields', 'model_form', ) def converts(*args): def _inner(func): func._converter_for = frozenset(args) return func return _inner class ModelConverterBase(object): def __init__(self, converters, use_mro=True): self.use_mro = use_mro if not converters: converters = {} for name in dir(self): obj = getattr(self, name) if hasattr(obj, '_converter_for'): for classname in obj._converter_for: converters[classname] = obj self.converters = converters def convert(self, model, mapper, prop, field_args): if not hasattr(prop, 'columns'): # XXX We don't support anything but ColumnProperty at the moment. return elif len(prop.columns) != 1: raise TypeError('Do not know how to convert multiple-column properties currently') column = prop.columns[0] kwargs = { 'validators': [], 'filters': [], 'default': column.default, } if field_args: kwargs.update(field_args) if column.nullable: kwargs['validators'].append(validators.Optional()) if self.use_mro: types = inspect.getmro(type(column.type)) else: types = [type(column.type)] converter = None for col_type in types: type_string = '%s.%s' % (col_type.__module__, col_type.__name__) if type_string.startswith('sqlalchemy'): type_string = type_string[11:] if type_string in self.converters: converter = self.converters[type_string] break else: for col_type in types: if col_type.__name__ in self.converters: converter = self.converters[col_type.__name__] break else: return return converter(model=model, mapper=mapper, prop=prop, column=column, field_args=kwargs) class ModelConverter(ModelConverterBase): def __init__(self, extra_converters=None): super(ModelConverter, self).__init__(extra_converters) @classmethod def _string_common(cls, column, field_args, **extra): if column.type.length: field_args['validators'].append(validators.Length(max=column.type.length)) @converts('String', 'Unicode') def conv_String(self, field_args, **extra): self._string_common(field_args=field_args, **extra) return f.TextField(**field_args) @converts('Text', 'UnicodeText', 'types.LargeBinary', 'types.Binary') def conv_Text(self, field_args, **extra): self._string_common(field_args=field_args, **extra) return f.TextAreaField(**field_args) @converts('Boolean') def conv_Boolean(self, field_args, **extra): return f.BooleanField(**field_args) @converts('Date') def conv_Date(self, field_args, **extra): return f.DateField(**field_args) @converts('DateTime') def conv_DateTime(self, field_args, **extra): return f.DateTimeField(**field_args) @converts('Integer', 'SmallInteger') def handle_integer_types(self, column, field_args, **extra): unsigned = getattr(column.type, 'unsigned', False) if unsigned: field_args['validators'].append(validators.NumberRange(min=0)) return f.IntegerField(**field_args) @converts('Numeric', 'Float') def handle_decimal_types(self, column, field_args, **extra): places = getattr(column.type, 'scale', 2) if places is not None: field_args['places'] = places return f.DecimalField(**field_args) @converts('databases.mysql.MSYear') def conv_MSYear(self, field_args, **extra): field_args['validators'].append(validators.NumberRange(min=1901, max=2155)) return f.TextField(**field_args) @converts('databases.postgres.PGInet', 'dialects.postgresql.base.INET') def conv_PGInet(self, field_args, **extra): kwargs.setdefault('label', u'IP Address') kwargs['validators'].append(validators.IPAddress()) return f.TextField(**kwargs) def model_fields(model, only=None, exclude=None, field_args=None, converter=None): """ Generate a dictionary of fields for a given SQLAlchemy model. See `model_form` docstring for description of parameters. """ if not hasattr(model, '_sa_class_manager'): raise TypeError('model must be a sqlalchemy mapped model') mapper = model._sa_class_manager.mapper converter = converter or ModelConverter() field_args = field_args or {} properties = ((p.key, p) for p in mapper.iterate_properties) if only: properties = (x for x in properties if x[0] in only) elif exclude: properties = (x for x in properties if x[0] not in exclude) field_dict = {} for name, prop in properties: field = converter.convert(model, mapper, prop, field_args.get(name)) if field is not None: field_dict[name] = field return field_dict def model_form(model, base_class=Form, only=None, exclude=None, field_args=None, converter=None): """ Create a wtforms Form for a given SQLAlchemy model class:: from wtforms.ext.sqlalchemy.orm import model_form from myapp.models import User UserForm = model_form(User) :param model: A SQLAlchemy mapped model class. :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ field_dict = model_fields(model, only, exclude, field_args, converter) return type(model.__name__ + 'Form', (base_class, ), field_dict)
Python
""" Useful form fields for use with SQLAlchemy ORM. """ import operator from wtforms import widgets from wtforms.fields import SelectFieldBase from wtforms.validators import ValidationError try: from sqlalchemy.orm.util import identity_key has_identity_key = True except ImportError: has_identity_key = False __all__ = ( 'QuerySelectField', 'QuerySelectMultipleField', ) class QuerySelectField(SelectFieldBase): """ Will display a select drop-down field to choose between ORM results in a sqlalchemy `Query`. The `data` property actually will store/keep an ORM model instance, not the ID. Submitting a choice which is not in the query will result in a validation error. This field only works for queries on models whose primary key column(s) have a consistent string representation. This means it mostly only works for those composed of string, unicode, and integer types. For the most part, the primary keys will be auto-detected from the model, alternately pass a one-argument callable to `get_pk` which can return a unique comparable key. The `query` property on the field can be set from within a view to assign a query per-instance to the field. If the property is not set, the `query_factory` callable passed to the field constructor will be called to obtain a query. Specify `get_label` to customize the label associated with each option. If a string, this is the name of an attribute on the model object to use as the label text. If a one-argument callable, this callable will be passed model instance and expected to return the label text. Otherwise, the model object's `__str__` or `__unicode__` will be used. If `allow_blank` is set to `True`, then a blank choice will be added to the top of the list. Selecting this choice will result in the `data` property being `None`. The label for this blank choice can be set by specifying the `blank_text` parameter. """ widget = widgets.Select() def __init__(self, label=None, validators=None, query_factory=None, get_pk=None, get_label=None, allow_blank=False, blank_text=u'', **kwargs): super(QuerySelectField, self).__init__(label, validators, **kwargs) self.query_factory = query_factory if get_pk is None: if not has_identity_key: raise Exception('The sqlalchemy identity_key function could not be imported.') self.get_pk = get_pk_from_identity else: self.get_pk = get_pk if get_label is None: self.get_label = lambda x: x elif isinstance(get_label, basestring): self.get_label = operator.attrgetter(get_label) else: self.get_label = get_label self.allow_blank = allow_blank self.blank_text = blank_text self.query = None self._object_list = None def _get_data(self): if self._formdata is not None: for pk, obj in self._get_object_list(): if pk == self._formdata: self._set_data(obj) break return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def _get_object_list(self): if self._object_list is None: query = self.query or self.query_factory() get_pk = self.get_pk self._object_list = list((unicode(get_pk(obj)), obj) for obj in query) return self._object_list def iter_choices(self): if self.allow_blank: yield (u'__None', self.blank_text, self.data is None) for pk, obj in self._get_object_list(): yield (pk, self.get_label(obj), obj == self.data) def process_formdata(self, valuelist): if valuelist: if self.allow_blank and valuelist[0] == u'__None': self.data = None else: self._data = None self._formdata = valuelist[0] def pre_validate(self, form): if not self.allow_blank or self.data is not None: for pk, obj in self._get_object_list(): if self.data == obj: break else: raise ValidationError(self.gettext(u'Not a valid choice')) class QuerySelectMultipleField(QuerySelectField): """ Very similar to QuerySelectField with the difference that this will display a multiple select. The data property will hold a list with ORM model instances and will be an empty list when no value is selected. If any of the items in the data list or submitted form data cannot be found in the query, this will result in a validation error. """ widget = widgets.Select(multiple=True) def __init__(self, label=None, validators=None, default=None, **kwargs): if default is None: default = [] super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs) self._invalid_formdata = False def _get_data(self): formdata = self._formdata if formdata is not None: data = [] for pk, obj in self._get_object_list(): if not formdata: break elif pk in formdata: formdata.remove(pk) data.append(obj) if formdata: self._invalid_formdata = True self._set_data(data) return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def iter_choices(self): for pk, obj in self._get_object_list(): yield (pk, self.get_label(obj), obj in self.data) def process_formdata(self, valuelist): self._formdata = set(valuelist) def pre_validate(self, form): if self._invalid_formdata: raise ValidationError(self.gettext(u'Not a valid choice')) elif self.data: obj_list = list(x[1] for x in self._get_object_list()) for v in self.data: if v not in obj_list: raise ValidationError(self.gettext('Not a valid choice')) def get_pk_from_identity(obj): cls, key = identity_key(instance=obj) return u':'.join(unicode(x) for x in key)
Python
""" A DateTimeField and DateField that use the `dateutil` package for parsing. """ from dateutil import parser from wtforms.fields import Field from wtforms.validators import ValidationError from wtforms.widgets import TextInput __all__ = ( 'DateTimeField', 'DateField', ) class DateTimeField(Field): """ DateTimeField represented by a text input, accepts all input text formats that `dateutil.parser.parse` will. :param parse_kwargs: A dictionary of keyword args to pass to the dateutil parse() function. See dateutil docs for available keywords. :param display_format: A format string to pass to strftime() to format dates for display. """ widget = TextInput() def __init__(self, label=None, validators=None, parse_kwargs=None, display_format='%Y-%m-%d %H:%M', **kwargs): super(DateTimeField, self).__init__(label, validators, **kwargs) if parse_kwargs is None: parse_kwargs = {} self.parse_kwargs = parse_kwargs self.display_format = display_format def _value(self): if self.raw_data: return u' '.join(self.raw_data) else: return self.data and self.data.strftime(self.display_format) or u'' def process_formdata(self, valuelist): if valuelist: date_str = u' '.join(valuelist) if not date_str: self.data = None raise ValidationError(self.gettext(u'Please input a date/time value')) parse_kwargs = self.parse_kwargs.copy() if 'default' not in parse_kwargs: try: parse_kwargs['default'] = self.default() except TypeError: parse_kwargs['default'] = self.default try: self.data = parser.parse(date_str, **parse_kwargs) except ValueError: self.data = None raise ValidationError(self.gettext(u'Invalid date/time input')) class DateField(DateTimeField): """ Same as the DateTimeField, but stores only the date portion. """ def __init__(self, label=None, validators=None, parse_kwargs=None, display_format='%Y-%m-%d', **kwargs): super(DateField, self).__init__(label, validators, parse_kwargs=parse_kwargs, display_format=display_format, **kwargs) def process_formdata(self, valuelist): super(DateField, self).process_formdata(valuelist) if self.data is not None and hasattr(self.data, 'date'): self.data = self.data.date()
Python
""" Tools for generating forms based on Django models. """ from wtforms import fields as f from wtforms import Form from wtforms import validators from wtforms.ext.django.fields import ModelSelectField __all__ = ( 'model_fields', 'model_form', ) class ModelConverterBase(object): def __init__(self, converters): self.converters = converters def convert(self, model, field, field_args): kwargs = { 'label': field.verbose_name, 'description': field.help_text, 'validators': [], 'filters': [], 'default': field.default, } if field_args: kwargs.update(field_args) if field.blank: kwargs['validators'].append(validators.Optional()) if field.max_length is not None and field.max_length > 0: kwargs['validators'].append(validators.Length(max=field.max_length)) ftype = type(field).__name__ if field.choices: kwargs['choices'] = field.choices return f.SelectField(**kwargs) elif ftype in self.converters: return self.converters[ftype](model, field, kwargs) else: converter = getattr(self, 'conv_%s' % ftype, None) if converter is not None: return converter(model, field, kwargs) class ModelConverter(ModelConverterBase): DEFAULT_SIMPLE_CONVERSIONS = { f.IntegerField: ['AutoField', 'IntegerField', 'SmallIntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField'], f.DecimalField: ['DecimalField', 'FloatField'], f.FileField: ['FileField', 'FilePathField', 'ImageField'], f.DateTimeField: ['DateTimeField'], f.DateField : ['DateField'], f.BooleanField: ['BooleanField'], f.TextField: ['CharField', 'PhoneNumberField', 'SlugField'], f.TextAreaField: ['TextField', 'XMLField'], } def __init__(self, extra_converters=None, simple_conversions=None): converters = {} if simple_conversions is None: simple_conversions = self.DEFAULT_SIMPLE_CONVERSIONS for field_type, django_fields in simple_conversions.iteritems(): converter = self.make_simple_converter(field_type) for name in django_fields: converters[name] = converter if extra_converters: converters.update(extra_converters) super(ModelConverter, self).__init__(converters) def make_simple_converter(self, field_type): def _converter(model, field, kwargs): return field_type(**kwargs) return _converter def conv_ForeignKey(self, model, field, kwargs): return ModelSelectField(model=field.rel.to, **kwargs) def conv_TimeField(self, model, field, kwargs): def time_only(obj): try: return obj.time() except AttributeError: return obj kwargs['filters'].append(time_only) return f.DateTimeField(format='%H:%M:%S', **kwargs) def conv_EmailField(self, model, field, kwargs): kwargs['validators'].append(validators.email()) return f.TextField(**kwargs) def conv_IPAddressField(self, model, field, kwargs): kwargs['validators'].append(validators.ip_address()) return f.TextField(**kwargs) def conv_URLField(self, model, field, kwargs): kwargs['validators'].append(validators.url()) return f.TextField(**kwargs) def conv_USStateField(self, model, field, kwargs): try: from django.contrib.localflavor.us.us_states import STATE_CHOICES except ImportError: STATE_CHOICES = [] return f.SelectField(choices=STATE_CHOICES, **kwargs) def conv_NullBooleanField(self, model, field, kwargs): def coerce_nullbool(value): d = {'None': None, None: None, 'True': True, 'False': False} if value in d: return d[value] else: return bool(int(value)) choices = ((None, 'Unknown'), (True, 'Yes'), (False, 'No')) return f.SelectField(choices=choices, coerce=coerce_nullbool, **kwargs) def model_fields(model, only=None, exclude=None, field_args=None, converter=None): """ Generate a dictionary of fields for a given Django model. See `model_form` docstring for description of parameters. """ converter = converter or ModelConverter() field_args = field_args or {} model_fields = ((f.attname, f) for f in model._meta.fields) if only: model_fields = (x for x in model_fields if x[0] in only) elif exclude: model_fields = (x for x in model_fields if x[0] not in exclude) field_dict = {} for name, model_field in model_fields: field = converter.convert(model, model_field, field_args.get(name)) if field is not None: field_dict[name] = field return field_dict def model_form(model, base_class=Form, only=None, exclude=None, field_args=None, converter=None): """ Create a wtforms Form for a given Django model class:: from wtforms.ext.django.orm import model_form from myproject.myapp.models import User UserForm = model_form(User) :param model: A Django ORM model class :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ field_dict = model_fields(model, only, exclude, field_args, converter) return type(model._meta.object_name + 'Form', (base_class, ), field_dict)
Python
""" Template tags for easy WTForms access in Django templates. """ import re from django import template from django.conf import settings from django.template import Variable register = template.Library() class FormFieldNode(template.Node): def __init__(self, field_var, html_attrs): self.field_var = field_var self.html_attrs = html_attrs def render(self, context): try: if '.' in self.field_var: base, field_name = self.field_var.rsplit('.', 1) field = getattr(Variable(base).resolve(context), field_name) else: field = context[self.field_var] except (template.VariableDoesNotExist, KeyError, AttributeError): return settings.TEMPLATE_STRING_IF_INVALID h_attrs = {} for k, v in self.html_attrs.iteritems(): try: h_attrs[k] = v.resolve(context) except template.VariableDoesNotExist: h_attrs[k] = settings.TEMPLATE_STRING_IF_INVALID return field(**h_attrs) @register.tag(name='form_field') def do_form_field(parser, token): """ Render a WTForms form field allowing optional HTML attributes. Invocation looks like this: {% form_field form.username class="big_text" onclick="alert('hello')" %} where form.username is the path to the field value we want. Any number of key="value" arguments are supported. Unquoted values are resolved as template variables. """ parts = token.contents.split(' ', 2) if len(parts) < 2: raise template.TemplateSyntaxError('%r tag must have the form field name as the first value, followed by optional key="value" attributes.' % parts[0]) html_attrs = {} if len(parts) == 3: raw_args = list(args_split(parts[2])) if (len(raw_args) % 2) != 0: raise template.TemplateSyntaxError('%r tag received the incorrect number of key=value arguments.' % parts[0]) for x in range(0, len(raw_args), 2): html_attrs[str(raw_args[x])] = Variable(raw_args[x+1]) return FormFieldNode(parts[1], html_attrs) args_split_re = re.compile(ur'''("(?:[^"\\]*(?:\\.[^"\\]*)*)"|'(?:[^'\\]*(?:\\.[^'\\]*)*)'|[^\s=]+)''') def args_split(text): """ Split space-separated key=value arguments. Keeps quoted strings intact. """ for bit in args_split_re.finditer(text): bit = bit.group(0) if bit[0] == '"' and bit[-1] == '"': yield '"' + bit[1:-1].replace('\\"', '"').replace('\\\\', '\\') + '"' elif bit[0] == "'" and bit[-1] == "'": yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'" else: yield bit
Python
""" Useful form fields for use with the Django ORM. """ import operator import warnings from wtforms import widgets from wtforms.fields import SelectFieldBase from wtforms.validators import ValidationError __all__ = ( 'ModelSelectField', 'QuerySetSelectField', ) class QuerySetSelectField(SelectFieldBase): """ Given a QuerySet either at initialization or inside a view, will display a select drop-down field of choices. The `data` property actually will store/keep an ORM model instance, not the ID. Submitting a choice which is not in the queryset will result in a validation error. Specify `get_label` to customize the label associated with each option. If a string, this is the name of an attribute on the model object to use as the label text. If a one-argument callable, this callable will be passed model instance and expected to return the label text. Otherwise, the model object's `__str__` or `__unicode__` will be used. If `allow_blank` is set to `True`, then a blank choice will be added to the top of the list. Selecting this choice will result in the `data` property being `None`. The label for the blank choice can be set by specifying the `blank_text` parameter. """ widget = widgets.Select() def __init__(self, label=None, validators=None, queryset=None, get_label=None, label_attr=None, allow_blank=False, blank_text=u'', **kwargs): super(QuerySetSelectField, self).__init__(label, validators, **kwargs) self.allow_blank = allow_blank self.blank_text = blank_text self._set_data(None) if queryset is not None: self.queryset = queryset.all() # Make sure the queryset is fresh if label_attr is not None: warnings.warn('label_attr= will be removed in WTForms 0.7, use get_label= instead.', DeprecationWarning) self.get_label = operator.attrgetter(label_attr) elif get_label is None: self.get_label = lambda x: x elif isinstance(get_label, basestring): self.get_label = operator.attrgetter(get_label) else: self.get_label = get_label def _get_data(self): if self._formdata is not None: for obj in self.queryset: if obj.pk == self._formdata: self._set_data(obj) break return self._data def _set_data(self, data): self._data = data self._formdata = None data = property(_get_data, _set_data) def iter_choices(self): if self.allow_blank: yield (u'__None', self.blank_text, self.data is None) for obj in self.queryset: yield (obj.pk, self.get_label(obj), obj == self.data) def process_formdata(self, valuelist): if valuelist: if valuelist[0] == '__None': self.data = None else: self._data = None self._formdata = int(valuelist[0]) def pre_validate(self, form): if not self.allow_blank or self.data is not None: for obj in self.queryset: if self.data == obj: break else: raise ValidationError(self.gettext('Not a valid choice')) class ModelSelectField(QuerySetSelectField): """ Like a QuerySetSelectField, except takes a model class instead of a queryset and lists everything in it. """ def __init__(self, label=None, validators=None, model=None, **kwargs): super(ModelSelectField, self).__init__(label, validators, queryset=model._default_manager.all(), **kwargs)
Python
""" WTForms ======= WTForms is a flexible forms validation and rendering library for python web development. :copyright: Copyright (c) 2010 by Thomas Johansson, James Crasta and others. :license: BSD, see LICENSE.txt for details. """ from wtforms import validators, widgets from wtforms.fields import * from wtforms.form import Form from wtforms.validators import ValidationError __version__ = '0.6.3'
Python
import re __all__ = ( 'Email', 'email', 'EqualTo', 'equal_to', 'IPAddress', 'ip_address', 'Length', 'length', 'NumberRange', 'number_range', 'Optional', 'optional', 'Required', 'required', 'Regexp', 'regexp', 'URL', 'url', 'AnyOf', 'any_of', 'NoneOf', 'none_of' ) class ValidationError(ValueError): """ Raised when a validator fails to validate its input. """ def __init__(self, message=u'', *args, **kwargs): ValueError.__init__(self, message, *args, **kwargs) class StopValidation(Exception): """ Causes the validation chain to stop. If StopValidation is raised, no more validators in the validation chain are called. If raised with a message, the message will be added to the errors list. """ def __init__(self, message=u'', *args, **kwargs): Exception.__init__(self, message, *args, **kwargs) class EqualTo(object): """ Compares the values of two fields. :param fieldname: The name of the other field to compare to. :param message: Error message to raise in case of a validation error. Can be interpolated with `%(other_label)s` and `%(other_name)s` to provide a more helpful error. """ def __init__(self, fieldname, message=None): self.fieldname = fieldname self.message = message def __call__(self, form, field): try: other = form[self.fieldname] except KeyError: raise ValidationError(field.gettext(u"Invalid field name '%s'.") % self.fieldname) if field.data != other.data: d = { 'other_label': hasattr(other, 'label') and other.label.text or self.fieldname, 'other_name': self.fieldname } if self.message is None: self.message = field.gettext(u'Field must be equal to %(other_name)s.') raise ValidationError(self.message % d) class Length(object): """ Validates the length of a string. :param min: The minimum required length of the string. If not provided, minimum length will not be checked. :param max: The maximum length of the string. If not provided, maximum length will not be checked. :param message: Error message to raise in case of a validation error. Can be interpolated using `%(min)d` and `%(max)d` if desired. Useful defaults are provided depending on the existence of min and max. """ def __init__(self, min=-1, max=-1, message=None): assert min != -1 or max!=-1, 'At least one of `min` or `max` must be specified.' assert max == -1 or min <= max, '`min` cannot be more than `max`.' self.min = min self.max = max self.message = message def __call__(self, form, field): l = field.data and len(field.data) or 0 if l < self.min or self.max != -1 and l > self.max: if self.message is None: if self.max == -1: self.message = field.ngettext(u'Field must be at least %(min)d character long.', u'Field must be at least %(min)d characters long.', self.min) elif self.min == -1: self.message = field.ngettext(u'Field cannot be longer than %(max)d character.', u'Field cannot be longer than %(max)d characters.', self.max) else: self.message = field.gettext(u'Field must be between %(min)d and %(max)d characters long.') raise ValidationError(self.message % dict(min=self.min, max=self.max)) class NumberRange(object): """ Validates that a number is of a minimum and/or maximum value, inclusive. This will work with any comparable number type, such as floats and decimals, not just integers. :param min: The minimum required value of the number. If not provided, minimum value will not be checked. :param max: The maximum value of the number. If not provided, maximum value will not be checked. :param message: Error message to raise in case of a validation error. Can be interpolated using `%(min)s` and `%(max)s` if desired. Useful defaults are provided depending on the existence of min and max. """ def __init__(self, min=None, max=None, message=None): self.min = min self.max = max self.message = message def __call__(self, form, field): data = field.data if data is None or (self.min is not None and data < self.min) or \ (self.max is not None and data > self.max): if self.message is None: # we use %(min)s interpolation to support floats, None, and # Decimals without throwing a formatting exception. if self.max is None: self.message = field.gettext(u'Number must be greater than %(min)s.') elif self.min is None: self.message = field.gettext(u'Number must be less than %(max)s.') else: self.message = field.gettext(u'Number must be between %(min)s and %(max)s.') raise ValidationError(self.message % dict(min=self.min, max=self.max)) class Optional(object): """ Allows empty input and stops the validation chain from continuing. If input is empty, also removes prior errors (such as processing errors) from the field. """ field_flags = ('optional', ) def __call__(self, form, field): if not field.raw_data or isinstance(field.raw_data[0], basestring) and not field.raw_data[0].strip(): field.errors[:] = [] raise StopValidation() class Required(object): """ Validates that the field contains data. This validator will stop the validation chain on error. :param message: Error message to raise in case of a validation error. """ field_flags = ('required', ) def __init__(self, message=None): self.message = message def __call__(self, form, field): if not field.data or isinstance(field.data, basestring) and not field.data.strip(): if self.message is None: self.message = field.gettext(u'This field is required.') field.errors[:] = [] raise StopValidation(self.message) class Regexp(object): """ Validates the field against a user provided regexp. :param regex: The regular expression string to use. Can also be a compiled regular expression pattern. :param flags: The regexp flags to use, for example re.IGNORECASE. Ignored if `regex` is not a string. :param message: Error message to raise in case of a validation error. """ def __init__(self, regex, flags=0, message=None): if isinstance(regex, basestring): regex = re.compile(regex, flags) self.regex = regex self.message = message def __call__(self, form, field): if not self.regex.match(field.data or u''): if self.message is None: self.message = field.gettext(u'Invalid input.') raise ValidationError(self.message) class Email(Regexp): """ Validates an email address. Note that this uses a very primitive regular expression and should only be used in instances where you later verify by other means, such as email activation or lookups. :param message: Error message to raise in case of a validation error. """ def __init__(self, message=None): super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, message) def __call__(self, form, field): if self.message is None: self.message = field.gettext(u'Invalid email address.') super(Email, self).__call__(form, field) class IPAddress(Regexp): """ Validates an IP(v4) address. :param message: Error message to raise in case of a validation error. """ def __init__(self, message=None): super(IPAddress, self).__init__(r'^([0-9]{1,3}\.){3}[0-9]{1,3}$', message=message) def __call__(self, form, field): if self.message is None: self.message = field.gettext(u'Invalid IP address.') super(IPAddress, self).__call__(form, field) class URL(Regexp): """ Simple regexp based url validation. Much like the email validator, you probably want to validate the url later by other means if the url must resolve. :param require_tld: If true, then the domain-name portion of the URL must contain a .tld suffix. Set this to false if you want to allow domains like `localhost`. :param message: Error message to raise in case of a validation error. """ def __init__(self, require_tld=True, message=None): tld_part = (require_tld and ur'\.[a-z]{2,10}' or u'') regex = ur'^[a-z]+://([^/:]+%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$' % tld_part super(URL, self).__init__(regex, re.IGNORECASE, message) def __call__(self, form, field): if self.message is None: self.message = field.gettext(u'Invalid URL.') super(URL, self).__call__(form, field) class AnyOf(object): """ Compares the incoming data to a sequence of valid inputs. :param values: A sequence of valid inputs. :param message: Error message to raise in case of a validation error. `%(values)s` contains the list of values. :param values_formatter: Function used to format the list of values in the error message. """ def __init__(self, values, message=None, values_formatter=None): self.values = values self.message = message if values_formatter is None: values_formatter = lambda v: u', '.join(v) self.values_formatter = values_formatter def __call__(self, form, field): if field.data not in self.values: if self.message is None: self.message = field.gettext(u'Invalid value, must be one of: %(values)s.') raise ValueError(self.message % dict(values=self.values_formatter(self.values))) class NoneOf(object): """ Compares the incoming data to a sequence of invalid inputs. :param values: A sequence of invalid inputs. :param message: Error message to raise in case of a validation error. `%(values)s` contains the list of values. :param values_formatter: Function used to format the list of values in the error message. """ def __init__(self, values, message=None, values_formatter=None): self.values = values self.message = message if values_formatter is None: values_formatter = lambda v: u', '.join(v) self.values_formatter = values_formatter def __call__(self, form, field): if field.data in self.values: if self.message is None: self.message = field.gettext(u'Invalid value, can\'t be any of: %(values)s.') raise ValueError(self.message % dict(values=self.values_formatter(self.values))) email = Email equal_to = EqualTo ip_address = IPAddress length = Length number_range = NumberRange optional = Optional required = Required regexp = Regexp url = URL any_of = AnyOf none_of = NoneOf
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This is a WSGI handler for Apache Requires apache+mod_wsgi. In httpd.conf put something like: LoadModule wsgi_module modules/mod_wsgi.so WSGIScriptAlias / /path/to/wsgihandler.py """ # change these parameters as required LOGGING = False SOFTCRON = False import sys import os path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path]+[p for p in sys.path if not p==path] sys.stdout=sys.stderr import gluon.main if LOGGING: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=None) else: application = gluon.main.wsgibase if SOFTCRON: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft'
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This is a handler for lighttpd+fastcgi This file has to be in the PYTHONPATH Put something like this in the lighttpd.conf file: server.port = 8000 server.bind = '127.0.0.1' server.event-handler = 'freebsd-kqueue' server.modules = ('mod_rewrite', 'mod_fastcgi') server.error-handler-404 = '/test.fcgi' server.document-root = '/somewhere/web2py' server.errorlog = '/tmp/error.log' fastcgi.server = ('.fcgi' => ('localhost' => ('min-procs' => 1, 'socket' => '/tmp/fcgi.sock' ) ) ) """ LOGGING = False SOFTCRON = False import sys import os path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path]+[p for p in sys.path if not p==path] import gluon.main import gluon.contrib.gateways.fcgi as fcgi if LOGGING: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=None) else: application = gluon.main.wsgibase if SOFTCRON: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft' fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run()
Python
password="cfcd208495d565ef66e7dff9f98764da"
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ scgihandler.py - handler for SCGI protocol Modified by Michele Comitini <michele.comitini@glisco.it> from fcgihandler.py to support SCGI fcgihandler has the following copyright: " This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) " This is a handler for lighttpd+scgi This file has to be in the PYTHONPATH Put something like this in the lighttpd.conf file: server.document-root="/var/www/web2py/" # for >= linux-2.6 server.event-handler = "linux-sysepoll" url.rewrite-once = ( "^(/.+?/static/.+)$" => "/applications$1", "(^|/.*)$" => "/handler_web2py.scgi$1", ) scgi.server = ( "/handler_web2py.scgi" => ("handler_web2py" => ( "host" => "127.0.0.1", "port" => "4000", "check-local" => "disable", # don't forget to set "disable"! ) ) ) """ LOGGING = False SOFTCRON = False import sys import os path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path]+[p for p in sys.path if not p==path] import gluon.main # uncomment one of the two imports below depending on the SCGIWSGI server installed #import paste.util.scgiserver as scgi from wsgitools.scgi.forkpool import SCGIServer if LOGGING: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=None) else: application = gluon.main.wsgibase if SOFTCRON: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft' # uncomment one of the two rows below depending on the SCGIWSGI server installed #scgi.serve_application(application, '', 4000).run() SCGIServer(application, port=4000).run()
Python
#!/usr/bin/python # -*- coding: utf-8 -*- # when web2py is run as a windows service (web2py.exe -W) # it does not load the command line options but it # expects to find conifguration settings in a file called # # web2py/options.py # # this file is an example for options.py import socket import os ip = '0.0.0.0' port = 80 interfaces=[('0.0.0.0',80),('0.0.0.0',443,'ssl_private_key.pem','ssl_certificate.pem')] password = '<recycle>' # ## <recycle> means use the previous password pid_filename = 'httpserver.pid' log_filename = 'httpserver.log' profiler_filename = None #ssl_certificate = 'ssl_certificate.pem' # ## path to certificate file #ssl_private_key = 'ssl_private_key.pem' # ## path to private key file #numthreads = 50 # ## deprecated; remove minthreads = None maxthreads = None server_name = socket.gethostname() request_queue_size = 5 timeout = 30 shutdown_timeout = 5 folder = os.getcwd() extcron = None nocron = None
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This file is based, althought a rewrite, on MIT code from the Bottle web framework. """ import os, sys, optparse path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path]+[p for p in sys.path if not p==path] import gluon.main from gluon.fileutils import read_file, write_file class Servers: @staticmethod def cgi(app, address=None, **options): from wsgiref.handlers import CGIHandler CGIHandler().run(app) # Just ignore host and port here @staticmethod def flup(app,address, **options): import flup.server.fcgi flup.server.fcgi.WSGIServer(app, bindAddress=address).run() @staticmethod def wsgiref(app,address,**options): # pragma: no cover from wsgiref.simple_server import make_server, WSGIRequestHandler class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass options['handler_class'] = QuietHandler srv = make_server(address[0],address[1],app,**options) srv.serve_forever() @staticmethod def cherrypy(app,address, **options): from cherrypy import wsgiserver server = wsgiserver.CherryPyWSGIServer(address, app) server.start() @staticmethod def rocket(app,address, **options): from gluon.rocket import CherryPyWSGIServer server = CherryPyWSGIServer(address, app) server.start() @staticmethod def rocket_with_repoze_profiler(app,address, **options): from gluon.rocket import CherryPyWSGIServer from repoze.profile.profiler import AccumulatingProfileMiddleware from gluon.settings import global_settings global_settings.web2py_crontype = 'none' wrapped = AccumulatingProfileMiddleware( app, log_filename='wsgi.prof', discard_first_request=True, flush_at_shutdown=True, path = '/__profile__' ) server = CherryPyWSGIServer(address, wrapped) server.start() @staticmethod def paste(app,address,**options): from paste import httpserver from paste.translogger import TransLogger httpserver.serve(app, host=address[0], port=address[1], **options) @staticmethod def fapws(app,address, **options): import fapws._evwsgi as evwsgi from fapws import base evwsgi.start(address[0],str(address[1])) evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return app(environ, start_response) evwsgi.wsgi_cb(('',app)) evwsgi.run() @staticmethod def gevent(app,address, **options): from gevent import monkey; monkey.patch_all() from gevent import pywsgi from gevent.pool import Pool pywsgi.WSGIServer(address, app, spawn = 'workers' in options and Pool(int(option.workers)) or 'default').serve_forever() @staticmethod def bjoern(app,address, **options): import bjoern bjoern.run(app, *address) @staticmethod def tornado(app,address, **options): import tornado.wsgi import tornado.httpserver import tornado.ioloop container = tornado.wsgi.WSGIContainer(app) server = tornado.httpserver.HTTPServer(container) server.listen(address=address[0], port=address[1]) tornado.ioloop.IOLoop.instance().start() @staticmethod def twisted(app,address, **options): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app)) reactor.listenTCP(address[1], factory, interface=address[0]) reactor.run() @staticmethod def diesel(app,address, **options): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(app, port=address[1]) app.run() @staticmethod def gnuicorn(app,address, **options): import gunicorn.arbiter gunicorn.arbiter.Arbiter(address, 4, app).run() @staticmethod def eventlet(app,address, **options): from eventlet import wsgi, listen wsgi.server(listen(address), app) def run(servername,ip,port,softcron=True,logging=False,profiler=None): if logging: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=profiler) else: application = gluon.main.wsgibase if softcron: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft' getattr(Servers,servername)(application,(ip,int(port))) def main(): usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P" try: version = read_file('VERSION') except IOError: version = '' parser = optparse.OptionParser(usage, None, optparse.Option, version) parser.add_option('-l', '--logging', action='store_true', default=False, dest='logging', help='log into httpserver.log') parser.add_option('-P', '--profiler', default=False, dest='profiler', help='profiler filename') servers = ', '.join(x for x in dir(Servers) if not x[0]=='_') parser.add_option('-s', '--server', default='rocket', dest='server', help='server name (%s)' % servers) parser.add_option('-i', '--ip', default='127.0.0.1', dest='ip', help='ip address') parser.add_option('-p', '--port', default='8000', dest='port', help='port number') parser.add_option('-w', '--workers', default='', dest='workers', help='number of workers number') (options, args) = parser.parse_args() print 'starting %s on %s:%s...' % (options.server,options.ip,options.port) run(options.server,options.ip,options.port,logging=options.logging,profiler=options.profiler) if __name__=='__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Usage: Install py2exe: http://sourceforge.net/projects/py2exe/files/ Copy script to the web2py directory c:\bin\python26\python build_windows_exe.py py2exe Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py """ from distutils.core import setup import py2exe from gluon.import_all import base_modules, contributed_modules from gluon.fileutils import readlines_file from glob import glob import fnmatch import os import shutil import sys import re import zipfile #read web2py version from VERSION file web2py_version_line = readlines_file('VERSION')[0] #use regular expression to get just the version number v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+') web2py_version = v_re.search(web2py_version_line).group(0) #pull in preferences from config file import ConfigParser Config = ConfigParser.ConfigParser() Config.read('setup_exe.conf') remove_msft_dlls = Config.getboolean("Setup", "remove_microsoft_dlls") copy_apps = Config.getboolean("Setup", "copy_apps") copy_site_packages = Config.getboolean("Setup", "copy_site_packages") copy_scripts = Config.getboolean("Setup", "copy_scripts") make_zip = Config.getboolean("Setup", "make_zip") zip_filename = Config.get("Setup", "zip_filename") remove_build_files = Config.getboolean("Setup", "remove_build_files") # Python base version python_version = sys.version[:3] # List of modules deprecated in python2.6 that are in the above set py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter'] if python_version == '2.6': base_modules += ['json', 'multiprocessing'] base_modules = list(set(base_modules).difference(set(py26_deprecated))) #I don't know if this is even necessary if python_version == '2.6': # Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52 try: shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/') except: print "You MUST copy Microsoft.VC90.CRT folder into the dist directory" setup( console=['web2py.py'], windows=[{'script':'web2py.py', 'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe }], name="web2py", version=web2py_version, description="web2py web framework", author="Massimo DiPierro", license = "LGPL v3", data_files=[ 'ABOUT', 'LICENSE', 'VERSION', 'splashlogo.gif', 'logging.example.conf', 'options_std.py', 'app.example.yaml', 'queue.example.yaml' ], options={'py2exe': { 'packages': contributed_modules, 'includes': base_modules, }}, ) print "web2py binary successfully built" def copy_folders(source, destination): """Copy files & folders from source to destination (within dist/)""" if os.path.exists(os.path.join('dist',destination)): shutil.rmtree(os.path.join('dist',destination)) shutil.copytree(os.path.join(source), os.path.join('dist',destination)) #should we remove Windows OS dlls user is unlikely to be able to distribute if remove_msft_dlls: print "Deleted Microsoft files not licensed for open source distribution" print "You are still responsible for making sure you have the rights to distribute any other included files!" #delete the API-MS-Win-Core DLLs for f in glob ('dist/API-MS-Win-*.dll'): os.unlink (f) #then delete some other files belonging to Microsoft other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll'] for f in other_ms_files: try: os.unlink(os.path.join('dist',f)) except: print "unable to delete dist/"+f sys.exit(1) #Should we include applications? if copy_apps: copy_folders('applications', 'applications') print "Your application(s) have been added" else: #only copy web2py's default applications copy_folders('applications/admin', 'applications/admin') copy_folders('applications/welcome', 'applications/welcome') copy_folders('applications/examples', 'applications/examples') print "Only web2py's admin, examples & welcome applications have been added" #should we copy project's site-packages into dist/site-packages if copy_site_packages: #copy site-packages copy_folders('site-packages', 'site-packages') else: #no worries, web2py will create the (empty) folder first run print "Skipping site-packages" pass #should we copy project's scripts into dist/scripts if copy_scripts: #copy scripts copy_folders('scripts', 'scripts') else: #no worries, web2py will create the (empty) folder first run print "Skipping scripts" pass #borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile def recursive_zip(zipf, directory, folder = ""): for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)): zipf.write(os.path.join(directory, item), folder + os.sep + item) elif os.path.isdir(os.path.join(directory, item)): recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item) #should we create a zip file of the build? if make_zip: #to keep consistent with how official web2py windows zip file is setup, #create a web2py folder & copy dist's files into it shutil.copytree('dist','zip_temp/web2py') #create zip file #use filename specified via command line zipf = zipfile.ZipFile(zip_filename+".zip", "w", compression=zipfile.ZIP_DEFLATED ) path = 'zip_temp' #just temp so the web2py directory is included in our zip file recursive_zip(zipf, path) #leave the first folder as None, as path is root. zipf.close() shutil.rmtree('zip_temp') print "Your Windows binary version of web2py can be found in "+zip_filename+".zip" print "You may extract the archive anywhere and then run web2py/web2py.exe" #should py2exe build files be removed? if remove_build_files: shutil.rmtree('build') shutil.rmtree('deposit') shutil.rmtree('dist') print "py2exe build files removed" #final info if not make_zip and not remove_build_files: print "Your Windows binary & associated files can also be found in /dist" print "Finished!" print "Enjoy web2py " +web2py_version_line
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The widget is called from web2py. """ import sys import cStringIO import time import thread import re import os import socket import signal import math import logging import newcron import main from fileutils import w2p_pack, read_file, write_file from shell import run, test from settings import global_settings try: import Tkinter, tkMessageBox import contrib.taskbar_widget from winservice import web2py_windows_service_handler except: pass try: BaseException except NameError: BaseException = Exception ProgramName = 'web2py Web Framework' ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-2011' ProgramVersion = read_file('VERSION').strip() ProgramInfo = '''%s %s %s''' % (ProgramName, ProgramAuthor, ProgramVersion) if not sys.version[:3] in ['2.4', '2.5', '2.6', '2.7']: msg = 'Warning: web2py requires Python 2.4, 2.5 (recommended), 2.6 or 2.7 but you are running:\n%s' msg = msg % sys.version sys.stderr.write(msg) logger = logging.getLogger("web2py") class IO(object): """ """ def __init__(self): """ """ self.buffer = cStringIO.StringIO() def write(self, data): """ """ sys.__stdout__.write(data) if hasattr(self, 'callback'): self.callback(data) else: self.buffer.write(data) def try_start_browser(url): """ Try to start the default browser """ try: import webbrowser webbrowser.open(url) except: print 'warning: unable to detect your browser' def start_browser(ip, port): """ Starts the default browser """ print 'please visit:' print '\thttp://%s:%s' % (ip, port) print 'starting browser...' try_start_browser('http://%s:%s' % (ip, port)) def presentation(root): """ Draw the splash screen """ root.withdraw() dx = root.winfo_screenwidth() dy = root.winfo_screenheight() dialog = Tkinter.Toplevel(root, bg='white') dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150)) dialog.overrideredirect(1) dialog.focus_force() canvas = Tkinter.Canvas(dialog, background='white', width=500, height=300) canvas.pack() root.update() img = Tkinter.PhotoImage(file='splashlogo.gif') pnl = Tkinter.Label(canvas, image=img, background='white', bd=0) pnl.pack(side='top', fill='both', expand='yes') # Prevent garbage collection of img pnl.image=img def add_label(text='Change Me', font_size=12, foreground='#195866', height=1): return Tkinter.Label( master=canvas, width=250, height=height, text=text, font=('Helvetica', font_size), anchor=Tkinter.CENTER, foreground=foreground, background='white' ) add_label('Welcome to...').pack(side='top') add_label(ProgramName, 18, '#FF5C1F', 2).pack() add_label(ProgramAuthor).pack() add_label(ProgramVersion).pack() root.update() time.sleep(5) dialog.destroy() return class web2pyDialog(object): """ Main window dialog """ def __init__(self, root, options): """ web2pyDialog constructor """ root.title('web2py server') self.root = Tkinter.Toplevel(root) self.options = options self.menu = Tkinter.Menu(self.root) servermenu = Tkinter.Menu(self.menu, tearoff=0) httplog = os.path.join(self.options.folder, 'httpserver.log') # Building the Menu item = lambda: try_start_browser(httplog) servermenu.add_command(label='View httpserver.log', command=item) servermenu.add_command(label='Quit (pid:%i)' % os.getpid(), command=self.quit) self.menu.add_cascade(label='Server', menu=servermenu) self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0) self.menu.add_cascade(label='Pages', menu=self.pagesmenu) helpmenu = Tkinter.Menu(self.menu, tearoff=0) # Home Page item = lambda: try_start_browser('http://www.web2py.com') helpmenu.add_command(label='Home Page', command=item) # About item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo) helpmenu.add_command(label='About', command=item) self.menu.add_cascade(label='Info', menu=helpmenu) self.root.config(menu=self.menu) if options.taskbar: self.root.protocol('WM_DELETE_WINDOW', lambda: self.quit(True)) else: self.root.protocol('WM_DELETE_WINDOW', self.quit) sticky = Tkinter.NW # IP Tkinter.Label(self.root, text='Server IP:', justify=Tkinter.LEFT).grid(row=0, column=0, sticky=sticky) self.ip = Tkinter.Entry(self.root) self.ip.insert(Tkinter.END, self.options.ip) self.ip.grid(row=0, column=1, sticky=sticky) # Port Tkinter.Label(self.root, text='Server Port:', justify=Tkinter.LEFT).grid(row=1, column=0, sticky=sticky) self.port_number = Tkinter.Entry(self.root) self.port_number.insert(Tkinter.END, self.options.port) self.port_number.grid(row=1, column=1, sticky=sticky) # Password Tkinter.Label(self.root, text='Choose Password:', justify=Tkinter.LEFT).grid(row=2, column=0, sticky=sticky) self.password = Tkinter.Entry(self.root, show='*') self.password.bind('<Return>', lambda e: self.start()) self.password.focus_force() self.password.grid(row=2, column=1, sticky=sticky) # Prepare the canvas self.canvas = Tkinter.Canvas(self.root, width=300, height=100, bg='black') self.canvas.grid(row=3, column=0, columnspan=2) self.canvas.after(1000, self.update_canvas) # Prepare the frame frame = Tkinter.Frame(self.root) frame.grid(row=4, column=0, columnspan=2) # Start button self.button_start = Tkinter.Button(frame, text='start server', command=self.start) self.button_start.grid(row=0, column=0) # Stop button self.button_stop = Tkinter.Button(frame, text='stop server', command=self.stop) self.button_stop.grid(row=0, column=1) self.button_stop.configure(state='disabled') if options.taskbar: self.tb = contrib.taskbar_widget.TaskBarIcon() self.checkTaskBar() if options.password != '<ask>': self.password.insert(0, options.password) self.start() self.root.withdraw() else: self.tb = None def checkTaskBar(self): """ Check taskbar status """ if self.tb.status: if self.tb.status[0] == self.tb.EnumStatus.QUIT: self.quit() elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE: if self.root.state() == 'withdrawn': self.root.deiconify() else: self.root.withdraw() elif self.tb.status[0] == self.tb.EnumStatus.STOP: self.stop() elif self.tb.status[0] == self.tb.EnumStatus.START: self.start() elif self.tb.status[0] == self.tb.EnumStatus.RESTART: self.stop() self.start() del self.tb.status[0] self.root.after(1000, self.checkTaskBar) def update(self, text): """ Update app text """ try: self.text.configure(state='normal') self.text.insert('end', text) self.text.configure(state='disabled') except: pass # ## this should only happen in case app is destroyed def connect_pages(self): """ Connect pages """ for arq in os.listdir('applications/'): if os.path.exists('applications/%s/__init__.py' % arq): url = self.url + '/' + arq start_browser = lambda u = url: try_start_browser(u) self.pagesmenu.add_command(label=url, command=start_browser) def quit(self, justHide=False): """ Finish the program execution """ if justHide: self.root.withdraw() else: try: self.server.stop() except: pass try: self.tb.Destroy() except: pass self.root.destroy() sys.exit() def error(self, message): """ Show error message """ tkMessageBox.showerror('web2py start server', message) def start(self): """ Start web2py server """ password = self.password.get() if not password: self.error('no password, no web admin interface') ip = self.ip.get() regexp = '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' if ip and not re.compile(regexp).match(ip): return self.error('invalid host ip address') try: port = int(self.port_number.get()) except: return self.error('invalid port number') self.url = 'http://%s:%s' % (ip, port) self.connect_pages() self.button_start.configure(state='disabled') try: options = self.options req_queue_size = options.request_queue_size self.server = main.HttpServer( ip, port, password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_filename=options.profiler_filename, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=req_queue_size, timeout=options.timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder, interfaces=options.interfaces) thread.start_new_thread(self.server.start, ()) except Exception, e: self.button_start.configure(state='normal') return self.error(str(e)) self.button_stop.configure(state='normal') if not options.taskbar: thread.start_new_thread(start_browser, (ip, port)) self.password.configure(state='readonly') self.ip.configure(state='readonly') self.port_number.configure(state='readonly') if self.tb: self.tb.SetServerRunning() def stop(self): """ Stop web2py server """ self.button_start.configure(state='normal') self.button_stop.configure(state='disabled') self.password.configure(state='normal') self.ip.configure(state='normal') self.port_number.configure(state='normal') self.server.stop() if self.tb: self.tb.SetServerStopped() def update_canvas(self): """ Update canvas """ try: t1 = os.path.getsize('httpserver.log') except: self.canvas.after(1000, self.update_canvas) return try: fp = open('httpserver.log', 'r') fp.seek(self.t0) data = fp.read(t1 - self.t0) fp.close() value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))] self.p0 = value for i in xrange(len(self.p0) - 1): c = self.canvas.coords(self.q0[i]) self.canvas.coords(self.q0[i], (c[0], self.p0[i], c[2], self.p0[i + 1])) self.t0 = t1 except BaseException: self.t0 = time.time() self.t0 = t1 self.p0 = [100] * 300 self.q0 = [self.canvas.create_line(i, 100, i + 1, 100, fill='green') for i in xrange(len(self.p0) - 1)] self.canvas.after(1000, self.update_canvas) def console(): """ Defines the behavior of the console web2py execution """ import optparse import textwrap usage = "python web2py.py" description = """\ web2py Web Framework startup script. ATTENTION: unless a password is specified (-a 'passwd') web2py will attempt to run a GUI. In this case command line options are ignored.""" description = textwrap.dedent(description) parser = optparse.OptionParser(usage, None, optparse.Option, ProgramVersion) parser.description = description parser.add_option('-i', '--ip', default='127.0.0.1', dest='ip', help='ip address of the server (127.0.0.1)') parser.add_option('-p', '--port', default='8000', dest='port', type='int', help='port of server (8000)') msg = 'password to be used for administration' msg += ' (use -a "<recycle>" to reuse the last password))' parser.add_option('-a', '--password', default='<ask>', dest='password', help=msg) parser.add_option('-c', '--ssl_certificate', default='', dest='ssl_certificate', help='file that contains ssl certificate') parser.add_option('-k', '--ssl_private_key', default='', dest='ssl_private_key', help='file that contains ssl private key') parser.add_option('-d', '--pid_filename', default='httpserver.pid', dest='pid_filename', help='file to store the pid of the server') parser.add_option('-l', '--log_filename', default='httpserver.log', dest='log_filename', help='file to log connections') parser.add_option('-n', '--numthreads', default=None, type='int', dest='numthreads', help='number of threads (deprecated)') parser.add_option('--minthreads', default=None, type='int', dest='minthreads', help='minimum number of server threads') parser.add_option('--maxthreads', default=None, type='int', dest='maxthreads', help='maximum number of server threads') parser.add_option('-s', '--server_name', default=socket.gethostname(), dest='server_name', help='server name for the web server') msg = 'max number of queued requests when server unavailable' parser.add_option('-q', '--request_queue_size', default='5', type='int', dest='request_queue_size', help=msg) parser.add_option('-o', '--timeout', default='10', type='int', dest='timeout', help='timeout for individual request (10 seconds)') parser.add_option('-z', '--shutdown_timeout', default='5', type='int', dest='shutdown_timeout', help='timeout on shutdown of server (5 seconds)') parser.add_option('-f', '--folder', default=os.getcwd(), dest='folder', help='folder from which to run web2py') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='increase --test verbosity') parser.add_option('-Q', '--quiet', action='store_true', dest='quiet', default=False, help='disable all output') msg = 'set debug output level (0-100, 0 means all, 100 means none;' msg += ' default is 30)' parser.add_option('-D', '--debug', dest='debuglevel', default=30, type='int', help=msg) msg = 'run web2py in interactive shell or IPython (if installed) with' msg += ' specified appname (if app does not exist it will be created).' msg += ' APPNAME like a/c/f (c,f optional)' parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', help=msg) msg = 'run web2py in interactive shell or bpython (if installed) with' msg += ' specified appname (if app does not exist it will be created).' msg += '\n Use combined with --shell' parser.add_option('-B', '--bpython', action='store_true', default=False, dest='bpython', help=msg) msg = 'only use plain python shell; should be used with --shell option' parser.add_option('-P', '--plain', action='store_true', default=False, dest='plain', help=msg) msg = 'auto import model files; default is False; should be used' msg += ' with --shell option' parser.add_option('-M', '--import_models', action='store_true', default=False, dest='import_models', help=msg) msg = 'run PYTHON_FILE in web2py environment;' msg += ' should be used with --shell option' parser.add_option('-R', '--run', dest='run', metavar='PYTHON_FILE', default='', help=msg) msg = 'run doctests in web2py environment; ' +\ 'TEST_PATH like a/c/f (c,f optional)' parser.add_option('-T', '--test', dest='test', metavar='TEST_PATH', default=None, help=msg) parser.add_option('-W', '--winservice', dest='winservice', default='', help='-W install|start|stop as Windows service') msg = 'trigger a cron run manually; usually invoked from a system crontab' parser.add_option('-C', '--cron', action='store_true', dest='extcron', default=False, help=msg) msg = 'triggers the use of softcron' parser.add_option('--softcron', action='store_true', dest='softcron', default=False, help=msg) parser.add_option('-N', '--no-cron', action='store_true', dest='nocron', default=False, help='do not start cron automatically') parser.add_option('-J', '--cronjob', action='store_true', dest='cronjob', default=False, help='identify cron-initiated command') parser.add_option('-L', '--config', dest='config', default='', help='config file') parser.add_option('-F', '--profiler', dest='profiler_filename', default=None, help='profiler filename') parser.add_option('-t', '--taskbar', action='store_true', dest='taskbar', default=False, help='use web2py gui and run in taskbar (system tray)') parser.add_option('', '--nogui', action='store_true', default=False, dest='nogui', help='text-only, no GUI') parser.add_option('-A', '--args', action='store', dest='args', default=None, help='should be followed by a list of arguments to be passed to script, to be used with -S, -A must be the last option') parser.add_option('--no-banner', action='store_true', default=False, dest='nobanner', help='Do not print header banner') msg = 'listen on multiple addresses: "ip:port:cert:key;ip2:port2:cert2:key2;..." (:cert:key optional; no spaces)' parser.add_option('--interfaces', action='store', dest='interfaces', default=None, help=msg) if '-A' in sys.argv: k = sys.argv.index('-A') elif '--args' in sys.argv: k = sys.argv.index('--args') else: k=len(sys.argv) sys.argv, other_args = sys.argv[:k], sys.argv[k+1:] (options, args) = parser.parse_args() options.args = [options.run] + other_args global_settings.cmd_options = options global_settings.cmd_args = args if options.quiet: capture = cStringIO.StringIO() sys.stdout = capture logger.setLevel(logging.CRITICAL + 1) else: logger.setLevel(options.debuglevel) if options.config[-3:] == '.py': options.config = options.config[:-3] if options.cronjob: global_settings.cronjob = True # tell the world options.nocron = True # don't start cron jobs options.plain = True # cronjobs use a plain shell options.folder = os.path.abspath(options.folder) # accept --interfaces in the form "ip:port:cert:key;ip2:port2;ip3:port3:cert3:key3" # (no spaces; optional cert:key indicate SSL) # if isinstance(options.interfaces, str): options.interfaces = [interface.split(':') for interface in options.interfaces.split(';')] for interface in options.interfaces: interface[1] = int(interface[1]) # numeric port options.interfaces = [tuple(interface) for interface in options.interfaces] if options.numthreads is not None and options.minthreads is None: options.minthreads = options.numthreads # legacy if not options.cronjob: # If we have the applications package or if we should upgrade if not os.path.exists('applications/__init__.py'): write_file('applications/__init__.py', '') if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'): try: w2p_pack('welcome.w2p','applications/welcome') os.unlink('NEWINSTALL') except: msg = "New installation: unable to create welcome.w2p file" sys.stderr.write(msg) return (options, args) def start(cron=True): """ Start server """ # ## get command line arguments (options, args) = console() if not options.nobanner: print ProgramName print ProgramAuthor print ProgramVersion from dal import drivers if not options.nobanner: print 'Database drivers available: %s' % ', '.join(drivers) # ## if -L load options from options.config file if options.config: try: options2 = __import__(options.config, {}, {}, '') except Exception: try: # Jython doesn't like the extra stuff options2 = __import__(options.config) except Exception: print 'Cannot import config file [%s]' % options.config sys.exit(1) for key in dir(options2): if hasattr(options,key): setattr(options,key,getattr(options2,key)) # ## if -T run doctests (no cron) if hasattr(options,'test') and options.test: test(options.test, verbose=options.verbose) return # ## if -S start interactive shell (also no cron) if options.shell: if options.args!=None: sys.argv[:] = options.args run(options.shell, plain=options.plain, bpython=options.bpython, import_models=options.import_models, startfile=options.run) return # ## if -C start cron run (extcron) and exit # ## if -N or not cron disable cron in this *process* # ## if --softcron use softcron # ## use hardcron in all other cases if options.extcron: print 'Starting extcron...' global_settings.web2py_crontype = 'external' extcron = newcron.extcron(options.folder) extcron.start() extcron.join() return elif cron and not options.nocron and options.softcron: print 'Using softcron (but this is not very efficient)' global_settings.web2py_crontype = 'soft' elif cron and not options.nocron: print 'Starting hardcron...' global_settings.web2py_crontype = 'hard' newcron.hardcron(options.folder).start() # ## if -W install/start/stop web2py as service if options.winservice: if os.name == 'nt': web2py_windows_service_handler(['', options.winservice], options.config) else: print 'Error: Windows services not supported on this platform' sys.exit(1) return # ## if no password provided and havetk start Tk interface # ## or start interface if we want to put in taskbar (system tray) try: options.taskbar except: options.taskbar = False if options.taskbar and os.name != 'nt': print 'Error: taskbar not supported on this platform' sys.exit(1) root = None if not options.nogui: try: import Tkinter havetk = True except ImportError: logger.warn('GUI not available because Tk library is not installed') havetk = False if options.password == '<ask>' and havetk or options.taskbar and havetk: try: root = Tkinter.Tk() except: pass if root: root.focus_force() if not options.quiet: presentation(root) master = web2pyDialog(root, options) signal.signal(signal.SIGTERM, lambda a, b: master.quit()) try: root.mainloop() except: master.quit() sys.exit() # ## if no tk and no password, ask for a password if not root and options.password == '<ask>': options.password = raw_input('choose a password:') if not options.password and not options.nobanner: print 'no password, no admin interface' # ## start server (ip, port) = (options.ip, int(options.port)) if not options.nobanner: print 'please visit:' print '\thttp://%s:%s' % (ip, port) print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid() server = main.HttpServer(ip=ip, port=port, password=options.password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_filename=options.profiler_filename, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=options.request_queue_size, timeout=options.timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder, interfaces=options.interfaces) try: server.start() except KeyboardInterrupt: server.stop() logging.shutdown()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- # portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. # Requires python 1.5.2 or better. """ Cross-platform (posix/nt) API for flock-style file locking. Synopsis: import portalocker file = open(\"somefile\", \"r+\") portalocker.lock(file, portalocker.LOCK_EX) file.seek(12) file.write(\"foo\") file.close() If you know what you're doing, you may choose to portalocker.unlock(file) before closing the file, but why? Methods: lock( file, flags ) unlock( file ) Constants: LOCK_EX LOCK_SH LOCK_NB I learned the win32 technique for locking files from sample code provided by John Nielsen <nielsenjf@my-deja.com> in the documentation that accompanies the win32 modules. Author: Jonathan Feinberg <jdf@pobox.com> Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $ """ import os import logging import platform logger = logging.getLogger("web2py") os_locking = None try: import fcntl os_locking = 'posix' except: pass try: import win32con import win32file import pywintypes os_locking = 'windows' except: pass if os_locking == 'windows': LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK LOCK_SH = 0 # the default LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # is there any reason not to reuse the following structure? __overlapped = pywintypes.OVERLAPPED() def lock(file, flags): hfile = win32file._get_osfhandle(file.fileno()) win32file.LockFileEx(hfile, flags, 0, 0x7fff0000, __overlapped) def unlock(file): hfile = win32file._get_osfhandle(file.fileno()) win32file.UnlockFileEx(hfile, 0, 0x7fff0000, __overlapped) elif os_locking == 'posix': LOCK_EX = fcntl.LOCK_EX LOCK_SH = fcntl.LOCK_SH LOCK_NB = fcntl.LOCK_NB def lock(file, flags): fcntl.flock(file.fileno(), flags) def unlock(file): fcntl.flock(file.fileno(), fcntl.LOCK_UN) else: if platform.system() == 'Windows': logger.error('no file locking, you must install the win32 extensions from: http://sourceforge.net/projects/pywin32/files/') else: logger.debug('no file locking, this will cause problems') LOCK_EX = None LOCK_SH = None LOCK_NB = None def lock(file, flags): pass def unlock(file): pass if __name__ == '__main__': from time import time, strftime, localtime import sys log = open('log.txt', 'a+') lock(log, LOCK_EX) timestamp = strftime('%m/%d/%Y %H:%M:%S\n', localtime(time())) log.write(timestamp) print 'Wrote lines. Hit enter to release lock.' dummy = sys.stdin.readline() log.close()
Python
""" This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ from storage import Storage global_settings = Storage() settings = global_settings # legacy compatibility
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ __all__ = ['HTTP', 'redirect'] defined_status = { 200: 'OK', 201: 'CREATED', 202: 'ACCEPTED', 203: 'NON-AUTHORITATIVE INFORMATION', 204: 'NO CONTENT', 205: 'RESET CONTENT', 206: 'PARTIAL CONTENT', 301: 'MOVED PERMANENTLY', 302: 'FOUND', 303: 'SEE OTHER', 304: 'NOT MODIFIED', 305: 'USE PROXY', 307: 'TEMPORARY REDIRECT', 400: 'BAD REQUEST', 401: 'UNAUTHORIZED', 403: 'FORBIDDEN', 404: 'NOT FOUND', 405: 'METHOD NOT ALLOWED', 406: 'NOT ACCEPTABLE', 407: 'PROXY AUTHENTICATION REQUIRED', 408: 'REQUEST TIMEOUT', 409: 'CONFLICT', 410: 'GONE', 411: 'LENGTH REQUIRED', 412: 'PRECONDITION FAILED', 413: 'REQUEST ENTITY TOO LARGE', 414: 'REQUEST-URI TOO LONG', 415: 'UNSUPPORTED MEDIA TYPE', 416: 'REQUESTED RANGE NOT SATISFIABLE', 417: 'EXPECTATION FAILED', 500: 'INTERNAL SERVER ERROR', 501: 'NOT IMPLEMENTED', 502: 'BAD GATEWAY', 503: 'SERVICE UNAVAILABLE', 504: 'GATEWAY TIMEOUT', 505: 'HTTP VERSION NOT SUPPORTED', } # If web2py is executed with python2.4 we need # to use Exception instead of BaseException try: BaseException except NameError: BaseException = Exception class HTTP(BaseException): def __init__( self, status, body='', **headers ): self.status = status self.body = body self.headers = headers def to(self, responder): if self.status in defined_status: status = '%d %s' % (self.status, defined_status[self.status]) else: status = str(self.status) + ' ' if not 'Content-Type' in self.headers: self.headers['Content-Type'] = 'text/html; charset=UTF-8' body = self.body if status[:1] == '4': if not body: body = status if isinstance(body, str): if len(body)<512 and self.headers['Content-Type'].startswith('text/html'): body += '<!-- %s //-->' % ('x'*512) ### trick IE self.headers['Content-Length'] = len(body) headers = [] for (k, v) in self.headers.items(): if isinstance(v, list): for item in v: headers.append((k, str(item))) else: headers.append((k, str(v))) responder(status, headers) if hasattr(body, '__iter__') and not isinstance(self.body, str): return body return [str(body)] @property def message(self): ''' compose a message describing this exception "status defined_status [web2py_error]" message elements that are not defined are omitted ''' msg = '%(status)d' if self.status in defined_status: msg = '%(status)d %(defined_status)s' if 'web2py_error' in self.headers: msg += ' [%(web2py_error)s]' return msg % dict(status=self.status, defined_status=defined_status.get(self.status), web2py_error=self.headers.get('web2py_error')) def __str__(self): "stringify me" return self.message def redirect(location, how=303): location = location.replace('\r', '%0D').replace('\n', '%0A') raise HTTP(how, 'You are being redirected <a href="%s">here</a>' % location, Location=location)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>, limodou <limodou@gmail.com> and srackham <srackham@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import logging import pdb import Queue import sys logger = logging.getLogger("web2py") class Pipe(Queue.Queue): def __init__(self, name, mode='r', *args, **kwargs): self.__name = name Queue.Queue.__init__(self, *args, **kwargs) def write(self, data): logger.debug("debug %s writting %s" % (self.__name, data)) self.put(data) def flush(self): # mark checkpoint (complete message) logger.debug("debug %s flushing..." % self.__name) self.put(None) # wait until it is processed self.join() logger.debug("debug %s flush done" % self.__name) def read(self, count=None, timeout=None): logger.debug("debug %s reading..." % (self.__name, )) data = self.get(block=True, timeout=timeout) # signal that we are ready self.task_done() logger.debug("debug %s read %s" % (self.__name, data)) return data def readline(self): logger.debug("debug %s readline..." % (self.__name, )) return self.read() pipe_in = Pipe('in') pipe_out = Pipe('out') debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,) def set_trace(): "breakpoint shortcut (like pdb)" logger.info("DEBUG: set_trace!") debugger.set_trace(sys._getframe().f_back) def stop_trace(): "stop waiting for the debugger (called atexit)" # this should prevent communicate is wait forever a command result # and the main thread has finished logger.info("DEBUG: stop_trace!") pipe_out.write("debug finished!") pipe_out.write(None) #pipe_out.flush() def communicate(command=None): "send command to debbuger, wait result" if command is not None: logger.info("DEBUG: sending command %s" % command) pipe_in.write(command) #pipe_in.flush() result = [] while True: data = pipe_out.read() if data is None: break result.append(data) logger.info("DEBUG: result %s" % repr(result)) return ''.join(result)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This file specifically includes utilities for security. """ import hashlib import hmac import uuid import random import time import os import logging logger = logging.getLogger("web2py") def md5_hash(text): """ Generate a md5 hash with the given text """ return hashlib.md5(text).hexdigest() def simple_hash(text, digest_alg = 'md5'): """ Generates hash with the given text using the specified digest hashing algorithm """ if not digest_alg: raise RuntimeError, "simple_hash with digest_alg=None" elif not isinstance(digest_alg,str): h = digest_alg(text) else: h = hashlib.new(digest_alg) h.update(text) return h.hexdigest() def get_digest(value): """ Returns a hashlib digest algorithm from a string """ if not isinstance(value,str): return value value = value.lower() if value == "md5": return hashlib.md5 elif value == "sha1": return hashlib.sha1 elif value == "sha224": return hashlib.sha224 elif value == "sha256": return hashlib.sha256 elif value == "sha384": return hashlib.sha384 elif value == "sha512": return hashlib.sha512 else: raise ValueError("Invalid digest algorithm") def hmac_hash(value, key, digest_alg='md5', salt=None): if ':' in key: digest_alg, key = key.split(':') digest_alg = get_digest(digest_alg) d = hmac.new(key,value,digest_alg) if salt: d.update(str(salt)) return d.hexdigest() ### compute constant ctokens def initialize_urandom(): """ This function and the web2py_uuid follow from the following discussion: http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09 At startup web2py compute a unique ID that identifies the machine by adding uuid.getnode() + int(time.time() * 1e3) This is a 48-bit number. It converts the number into 16 8-bit tokens. It uses this value to initialize the entropy source ('/dev/urandom') and to seed random. If os.random() is not supported, it falls back to using random and issues a warning. """ node_id = uuid.getnode() microseconds = int(time.time() * 1e6) ctokens = [((node_id + microseconds) >> ((i%6)*8)) % 256 for i in range(16)] random.seed(node_id + microseconds) try: os.urandom(1) try: # try to add process-specific entropy frandom = open('/dev/urandom','wb') try: frandom.write(''.join(chr(t) for t in ctokens)) finally: frandom.close() except IOError: # works anyway pass except NotImplementedError: logger.warning( """Cryptographically secure session management is not possible on your system because your system does not provide a cryptographically secure entropy source. This is not specific to web2py; consider deploying on a different operating system.""") return ctokens ctokens = initialize_urandom() def web2py_uuid(): """ This function follows from the following discussion: http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09 It works like uuid.uuid4 except that tries to use os.urandom() if possible and it XORs the output with the tokens uniquely associated with this machine. """ bytes = [random.randrange(256) for i in range(16)] try: ubytes = [ord(c) for c in os.urandom(16)] # use /dev/urandom if possible bytes = [bytes[i] ^ ubytes[i] for i in range(16)] except NotImplementedError: pass ## xor bytes with constant ctokens bytes = ''.join(chr(c ^ ctokens[i]) for i,c in enumerate(bytes)) return str(uuid.UUID(bytes=bytes, version=4))
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Provides: - List; like list but returns None instead of IndexOutOfBounds - Storage; like dictionary allowing also for `obj.foo` for `obj['foo']` """ import cPickle import portalocker __all__ = ['List', 'Storage', 'Settings', 'Messages', 'StorageList', 'load_storage', 'save_storage'] class List(list): """ Like a regular python list but a[i] if i is out of bounds return None instead of IndexOutOfBounds """ def __call__(self, i, default=None): if 0<=i<len(self): return self[i] else: return default class Storage(dict): """ A Storage object is like a dictionary except `obj.foo` can be used in addition to `obj['foo']`. >>> o = Storage(a=1) >>> print o.a 1 >>> o['a'] 1 >>> o.a = 2 >>> print o['a'] 2 >>> del o.a >>> print o.a None """ def __getattr__(self, key): if key in self: return self[key] else: return None def __setattr__(self, key, value): if value == None: if key in self: del self[key] else: self[key] = value def __delattr__(self, key): if key in self: del self[key] else: raise AttributeError, "missing key=%s" % key def __repr__(self): return '<Storage ' + dict.__repr__(self) + '>' def __getstate__(self): return dict(self) def __setstate__(self, value): for (k, v) in value.items(): self[k] = v def getlist(self, key): """Return a Storage value as a list. If the value is a list it will be returned as-is. If object is None, an empty list will be returned. Otherwise, [value] will be returned. Example output for a query string of ?x=abc&y=abc&y=def >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getlist('x') ['abc'] >>> request.vars.getlist('y') ['abc', 'def'] >>> request.vars.getlist('z') [] """ value = self.get(key, None) if isinstance(value, (list, tuple)): return value elif value is None: return [] return [value] def getfirst(self, key): """Return the first or only value when given a request.vars-style key. If the value is a list, its first item will be returned; otherwise, the value will be returned as-is. Example output for a query string of ?x=abc&y=abc&y=def >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getfirst('x') 'abc' >>> request.vars.getfirst('y') 'abc' >>> request.vars.getfirst('z') """ value = self.getlist(key) if len(value): return value[0] return None def getlast(self, key): """Returns the last or only single value when given a request.vars-style key. If the value is a list, the last item will be returned; otherwise, the value will be returned as-is. Simulated output with a query string of ?x=abc&y=abc&y=def >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getlast('x') 'abc' >>> request.vars.getlast('y') 'def' >>> request.vars.getlast('z') """ value = self.getlist(key) if len(value): return value[-1] return None class StorageList(Storage): """ like Storage but missing elements default to [] instead of None """ def __getattr__(self, key): if key in self: return self[key] else: self[key] = [] return self[key] def load_storage(filename): fp = open(filename, 'rb') try: portalocker.lock(fp, portalocker.LOCK_EX) storage = cPickle.load(fp) portalocker.unlock(fp) finally: fp.close() return Storage(storage) def save_storage(storage, filename): fp = open(filename, 'wb') try: portalocker.lock(fp, portalocker.LOCK_EX) cPickle.dump(dict(storage), fp) portalocker.unlock(fp) finally: fp.close() class Settings(Storage): def __setattr__(self, key, value): if key != 'lock_keys' and self.get('lock_keys', None)\ and not key in self: raise SyntaxError, 'setting key \'%s\' does not exist' % key if key != 'lock_values' and self.get('lock_values', None): raise SyntaxError, 'setting value cannot be changed: %s' % key self[key] = value class Messages(Storage): def __init__(self, T): self['T'] = T def __setattr__(self, key, value): if key != 'lock_keys' and self.get('lock_keys', None)\ and not key in self: raise SyntaxError, 'setting key \'%s\' does not exist' % key if key != 'lock_values' and self.get('lock_values', None): raise SyntaxError, 'setting value cannot be changed: %s' % key self[key] = value def __getattr__(self, key): value = self[key] if isinstance(value, str): return str(self['T'](value)) return value if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import re # pattern to find defined tables regex_tables = re.compile(\ """^[\w]+\.define_table\(\s*[\'\"](?P<name>[\w_]+)[\'\"]""", flags=re.M) # pattern to find exposed functions in controller regex_expose = re.compile(\ '^def\s+(?P<name>(?:[a-zA-Z0-9]\w*)|(?:_[a-zA-Z0-9]\w*))\(\)\s*:', flags=re.M) regex_include = re.compile(\ '(?P<all>\{\{\s*include\s+[\'"](?P<name>[^\'"]*)[\'"]\s*\}\})') regex_extend = re.compile(\ '^\s*(?P<all>\{\{\s*extend\s+[\'"](?P<name>[^\'"]+)[\'"]\s*\}\})',re.MULTILINE)
Python
#!/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Thanks to * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support * Denes * Chris Clark * clach05 * Denes Lengyel * and many others who have contributed to current and previous versions This file contains the DAL support for many relational databases, including: - SQLite - MySQL - Postgres - Oracle - MS SQL - DB2 - Interbase - Ingres - SapDB (experimental) - Cubrid (experimental) - CouchDB (experimental) - MongoDB (in progress) - Google:nosql - Google:sql Example of usage: >>> # from dal import DAL, Field ### create DAL connection (and create DB if not exists) >>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None) ### define a table 'person' (create/aster as necessary) >>> person = db.define_table('person',Field('name','string')) ### insert a record >>> id = person.insert(name='James') ### retrieve it by id >>> james = person(id) ### retrieve it by name >>> james = person(name='James') ### retrieve it by arbitrary query >>> query = (person.name=='James')&(person.name.startswith('J')) >>> james = db(query).select(person.ALL)[0] ### update one record >>> james.update_record(name='Jim') ### update multiple records by query >>> db(person.name.like('J%')).update(name='James') 1 ### delete records by query >>> db(person.name.lower()=='jim').delete() 0 ### retrieve multiple records (rows) >>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100)) ### further filter them >>> james = people.find(lambda row: row.name=='James').first() >>> print james.id, james.name 1 James ### check aggrgates >>> counter = person.id.count() >>> print db(person).select(counter).first()(counter) 1 ### delete one record >>> james.delete_record() 1 ### delete (drop) entire database table >>> person.drop() Supported field types: id string text boolean integer double decimal password upload blob time date datetime, Supported DAL URI strings: 'sqlite://test.db' 'sqlite:memory' 'jdbc:sqlite://test.db' 'mysql://root:none@localhost/test' 'postgres://mdipierro:none@localhost/test' 'jdbc:postgres://mdipierro:none@localhost/test' 'mssql://web2py:none@A64X2/web2py_test' 'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 'oracle://username:password@database' 'firebird://user:password@server:3050/database' 'db2://DSN=dsn;UID=user;PWD=pass' 'firebird://username:password@hostname/database' 'firebird_embedded://username:password@c://path' 'informix://user:password@server:3050/database' 'informixu://user:password@server:3050/database' # unicode informix 'google:datastore' # for google app engine datastore 'google:sql' # for google app engine with sql (mysql compatible) 'teradata://DSN=dsn;UID=user;PWD=pass' # experimental For more info: help(DAL) help(Field) """ ################################################################################### # this file orly exposes DAL and Field ################################################################################### __all__ = ['DAL', 'Field'] MAXCHARLENGTH = 512 INFINITY = 2**15 # not quite but reasonable default max char length import re import sys import locale import os import types import cPickle import datetime import threading import time import cStringIO import csv import copy import socket import logging import copy_reg import base64 import shutil import marshal import decimal import struct import urllib import hashlib import uuid import glob CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) ################################################################################### # following checks allows running of dal without web2py as a standalone module ################################################################################### try: from utils import web2py_uuid except ImportError: import uuid def web2py_uuid(): return str(uuid.uuid4()) try: import portalocker have_portalocker = True except ImportError: have_portalocker = False try: import serializers have_serializers = True except ImportError: have_serializers = False try: import validators have_validators = True except ImportError: have_validators = False logger = logging.getLogger("web2py.dal") DEFAULT = lambda:0 sql_locker = threading.RLock() thread = threading.local() # internal representation of tables with field # <table>.<field>, tables and fields may only be [a-zA-Z0-0_] regex_dbname = re.compile('^(\w+)(\:\w+)*') table_field = re.compile('^[\w_]+\.[\w_]+$') regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') regex_cleanup_fn = re.compile('[\'"\s;]+') string_unpack=re.compile('(?<!\|)\|(?!\|)') regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') # list of drivers will be built on the fly # and lists only what is available drivers = [] try: from new import classobj from google.appengine.ext import db as gae from google.appengine.api import namespace_manager, rdbms from google.appengine.api.datastore_types import Key ### needed for belongs on ID from google.appengine.ext.db.polymodel import PolyModel drivers.append('google') except ImportError: pass if not 'google' in drivers: try: from pysqlite2 import dbapi2 as sqlite3 drivers.append('pysqlite2') except ImportError: try: from sqlite3 import dbapi2 as sqlite3 drivers.append('SQLite3') except ImportError: logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') try: import contrib.pymysql as pymysql drivers.append('pymysql') except ImportError: logger.debug('no pymysql driver') try: import psycopg2 drivers.append('PostgreSQL') except ImportError: logger.debug('no psycopg2 driver') try: import cx_Oracle drivers.append('Oracle') except ImportError: logger.debug('no cx_Oracle driver') try: import pyodbc drivers.append('MSSQL/DB2') except ImportError: logger.debug('no MSSQL/DB2 driver') try: import kinterbasdb drivers.append('Interbase') except ImportError: logger.debug('no kinterbasdb driver') try: import firebirdsql drivers.append('Firebird') except ImportError: logger.debug('no Firebird driver') try: import informixdb drivers.append('Informix') logger.warning('Informix support is experimental') except ImportError: logger.debug('no informixdb driver') try: import sapdb drivers.append('SAPDB') logger.warning('SAPDB support is experimental') except ImportError: logger.debug('no sapdb driver') try: import cubriddb drivers.append('Cubrid') logger.warning('Cubrid support is experimental') except ImportError: logger.debug('no cubriddb driver') try: from com.ziclix.python.sql import zxJDBC import java.sql # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ from org.sqlite import JDBC # required by java.sql; ensure we have it drivers.append('zxJDBC') logger.warning('zxJDBC support is experimental') is_jdbc = True except ImportError: logger.debug('no zxJDBC driver') is_jdbc = False try: import ingresdbi drivers.append('Ingres') except ImportError: logger.debug('no Ingres driver') # NOTE could try JDBC....... try: import couchdb drivers.append('CouchDB') except ImportError: logger.debug('no couchdb driver') try: import pymongo drivers.append('mongoDB') except: logger.debug('no mongoDB driver') if 'google' in drivers: is_jdbc = False class GAEDecimalProperty(gae.Property): """ GAE decimal implementation """ data_type = decimal.Decimal def __init__(self, precision, scale, **kwargs): super(GAEDecimalProperty, self).__init__(self, **kwargs) d = '1.' for x in range(scale): d += '0' self.round = decimal.Decimal(d) def get_value_for_datastore(self, model_instance): value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) if value: return str(value) else: return None def make_value_from_datastore(self, value): if value: return decimal.Decimal(value).quantize(self.round) else: return None def validate(self, value): value = super(GAEDecimalProperty, self).validate(value) if value is None or isinstance(value, decimal.Decimal): return value elif isinstance(value, basestring): return decimal.Decimal(value) raise gae.BadValueError("Property %s must be a Decimal or string." % self.name) ################################################################################### # class that handles connection pooling (all adapters derived form this one) ################################################################################### class ConnectionPool(object): pools = {} @staticmethod def set_folder(folder): thread.folder = folder # ## this allows gluon to commit/rollback all dbs in this thread @staticmethod def close_all_instances(action): """ to close cleanly databases in a multithreaded environment """ if not hasattr(thread,'instances'): return while thread.instances: instance = thread.instances.pop() getattr(instance,action)() # ## if you want pools, recycle this connection really = True if instance.pool_size: sql_locker.acquire() pool = ConnectionPool.pools[instance.uri] if len(pool) < instance.pool_size: pool.append(instance.connection) really = False sql_locker.release() if really: getattr(instance,'close')() return def find_or_make_work_folder(self): """ this actually does not make the folder. it has to be there """ if hasattr(thread,'folder'): self.folder = thread.folder else: self.folder = thread.folder = '' # Creating the folder if it does not exist if False and self.folder and not os.path.exists(self.folder): os.mkdir(self.folder) def pool_connection(self, f): if not self.pool_size: self.connection = f() else: uri = self.uri sql_locker.acquire() if not uri in ConnectionPool.pools: ConnectionPool.pools[uri] = [] if ConnectionPool.pools[uri]: self.connection = ConnectionPool.pools[uri].pop() sql_locker.release() else: sql_locker.release() self.connection = f() if not hasattr(thread,'instances'): thread.instances = [] thread.instances.append(self) ################################################################################### # this is a generic adapter that does nothing; all others are derived form this one ################################################################################### class BaseAdapter(ConnectionPool): driver = None maxcharlength = INFINITY commit_on_alter_table = False support_distributed_transaction = False uploads_in_blob = False types = { 'boolean': 'CHAR(1)', 'string': 'CHAR(%(length)s)', 'text': 'TEXT', 'password': 'CHAR(%(length)s)', 'blob': 'BLOB', 'upload': 'CHAR(%(length)s)', 'integer': 'INTEGER', 'double': 'DOUBLE', 'decimal': 'DOUBLE', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', } def integrity_error(self): return self.driver.IntegrityError def file_exists(self, filename): """ to be used ONLY for files that on GAE may not be on filesystem """ return os.path.exists(filename) def file_open(self, filename, mode='rb', lock=True): """ to be used ONLY for files that on GAE may not be on filesystem """ fileobj = open(filename,mode) if have_portalocker and lock: if mode in ('r','rb'): portalocker.lock(fileobj,portalocker.LOCK_SH) elif mode in ('w','wb','a'): portalocker.lock(fileobj,portalocker.LOCK_EX) else: fileobj.close() raise RuntimeError, "Unsupported file_open mode" return fileobj def file_close(self, fileobj, unlock=True): """ to be used ONLY for files that on GAE may not be on filesystem """ if fileobj: if have_portalocker and unlock: portalocker.unlock(fileobj) fileobj.close() def file_delete(self, filename): os.unlink(filename) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "None" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec class Dummy(object): lastrowid = 1 def __getattr__(self, value): return lambda *a, **b: [] self.connection = Dummy() self.cursor = Dummy() def sequence_name(self,tablename): return '%s_sequence' % tablename def trigger_name(self,tablename): return '%s_sequence' % tablename def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): fields = [] sql_fields = {} sql_fields_aux = {} TFK = {} tablename = table._tablename sortable = 0 for field in table: sortable += 1 k = field.name if isinstance(field.type,SQLCustomType): ftype = field.type.native or field.type.type elif field.type.startswith('reference'): referenced = field.type[10:].strip() constraint_name = self.constraint_name(tablename, field.name) if hasattr(table,'_primarykey'): rtablename,rfieldname = referenced.split('.') rtable = table._db[rtablename] rfield = rtable[rfieldname] # must be PK reference or unique if rfieldname in rtable._primarykey or rfield.unique: ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) # multicolumn primary key reference? if not rfield.unique and len(rtable._primarykey)>1 : # then it has to be a table level FK if rtablename not in TFK: TFK[rtablename] = {} TFK[rtablename][rfieldname] = field.name else: ftype = ftype + \ self.types['reference FK'] %dict(\ constraint_name=constraint_name, table_name=tablename, field_name=field.name, foreign_key='%s (%s)'%(rtablename, rfieldname), on_delete_action=field.ondelete) else: # make a guess here for circular references id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id' ftype = self.types[field.type[:9]]\ % dict(table_name=tablename, field_name=field.name, constraint_name=constraint_name, foreign_key=referenced + ('(%s)' % id_fieldname), on_delete_action=field.ondelete) elif field.type.startswith('list:reference'): ftype = self.types[field.type[:14]] elif field.type.startswith('decimal'): precision, scale = map(int,field.type[8:-1].split(',')) ftype = self.types[field.type[:7]] % \ dict(precision=precision,scale=scale) elif not field.type in self.types: raise SyntaxError, 'Field: unknown field type: %s for %s' % \ (field.type, field.name) else: ftype = self.types[field.type]\ % dict(length=field.length) if not field.type.startswith('id') and not field.type.startswith('reference'): if field.notnull: ftype += ' NOT NULL' else: ftype += self.ALLOW_NULL() if field.unique: ftype += ' UNIQUE' # add to list of fields sql_fields[field.name] = dict(sortable=sortable, type=str(field.type), sql=ftype) if isinstance(field.default,(str,int,float)): # caveat: sql_fields and sql_fields_aux differ for default values # sql_fields is used to trigger migrations and sql_fields_aux # are used for create table # the reason is that we do not want to trigger a migration simply # because a default value changes not_null = self.NOT_NULL(field.default,field.type) ftype = ftype.replace('NOT NULL',not_null) sql_fields_aux[field.name] = dict(sql=ftype) fields.append('%s %s' % (field.name, ftype)) other = ';' # backend-specific extensions to fields if self.dbengine == 'mysql': if not hasattr(table, "_primarykey"): fields.append('PRIMARY KEY(%s)' % table._id.name) other = ' ENGINE=InnoDB CHARACTER SET utf8;' fields = ',\n '.join(fields) for rtablename in TFK: rfields = TFK[rtablename] pkeys = table._db[rtablename]._primarykey fkeys = [ rfields[k] for k in pkeys ] fields = fields + ',\n ' + \ self.types['reference TFK'] %\ dict(table_name=tablename, field_name=', '.join(fkeys), foreign_table=rtablename, foreign_key=', '.join(pkeys), on_delete_action=field.ondelete) if hasattr(table,'_primarykey'): query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) else: query = '''CREATE TABLE %s(\n %s\n)%s''' % \ (tablename, fields, other) if self.uri.startswith('sqlite:///'): path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) else: dbpath = self.folder if not migrate: return query elif self.uri.startswith('sqlite:memory'): table._dbt = None elif isinstance(migrate, str): table._dbt = os.path.join(dbpath, migrate) else: table._dbt = os.path.join(dbpath, '%s_%s.table' \ % (table._db._uri_hash, tablename)) if table._dbt: table._loggername = os.path.join(dbpath, 'sql.log') logfile = self.file_open(table._loggername, 'a') else: logfile = None if not table._dbt or not self.file_exists(table._dbt): if table._dbt: logfile.write('timestamp: %s\n' % datetime.datetime.today().isoformat()) logfile.write(query + '\n') if not fake_migrate: self.create_sequence_and_triggers(query,table) table._db.commit() if table._dbt: tfile = self.file_open(table._dbt, 'w') cPickle.dump(sql_fields, tfile) self.file_close(tfile) if fake_migrate: logfile.write('faked!\n') else: logfile.write('success!\n') else: tfile = self.file_open(table._dbt, 'r') try: sql_fields_old = cPickle.load(tfile) except EOFError: self.file_close(tfile) self.file_close(logfile) raise RuntimeError, 'File %s appears corrupted' % table._dbt self.file_close(tfile) if sql_fields != sql_fields_old: self.migrate_table(table, sql_fields, sql_fields_old, sql_fields_aux, logfile, fake_migrate=fake_migrate) self.file_close(logfile) return query def migrate_table( self, table, sql_fields, sql_fields_old, sql_fields_aux, logfile, fake_migrate=False, ): tablename = table._tablename def fix(item): k,v=item if not isinstance(v,dict): v=dict(type='unkown',sql=v) return k.lower(),v ### make sure all field names are lower case to avoid conflicts sql_fields = dict(map(fix,sql_fields.items())) sql_fields_old = dict(map(fix,sql_fields_old.items())) sql_fields_aux = dict(map(fix,sql_fields_aux.items())) keys = sql_fields.keys() for key in sql_fields_old: if not key in keys: keys.append(key) if self.dbengine == 'mssql': new_add = '; ALTER TABLE %s ADD ' % tablename else: new_add = ', ADD ' metadata_change = False sql_fields_current = copy.copy(sql_fields_old) for key in keys: query = None if not key in sql_fields_old: sql_fields_current[key] = sql_fields[key] query = ['ALTER TABLE %s ADD %s %s;' % \ (tablename, key, sql_fields_aux[key]['sql'].replace(', ', new_add))] metadata_change = True elif self.dbengine == 'sqlite': if key in sql_fields: sql_fields_current[key] = sql_fields[key] metadata_change = True elif not key in sql_fields: del sql_fields_current[key] if not self.dbengine in ('firebird',): query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] else: query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] metadata_change = True elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ and not isinstance(table[key].type, SQLCustomType) \ and not (table[key].type.startswith('reference') and \ sql_fields[key]['sql'].startswith('INT,') and \ sql_fields_old[key]['sql'].startswith('INT NOT NULL,')): sql_fields_current[key] = sql_fields[key] t = tablename tt = sql_fields_aux[key]['sql'].replace(', ', new_add) if not self.dbengine in ('firebird',): query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] else: query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 'ALTER TABLE %s DROP %s;' % (t, key), 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] metadata_change = True elif sql_fields[key]['type'] != sql_fields_old[key]['type']: sql_fields_current[key] = sql_fields[key] metadata_change = True if query: logfile.write('timestamp: %s\n' % datetime.datetime.today().isoformat()) table._db['_lastsql'] = '\n'.join(query) for sub_query in query: logfile.write(sub_query + '\n') if not fake_migrate: self.execute(sub_query) # caveat. mysql, oracle and firebird do not allow multiple alter table # in one transaction so we must commit partial transactions and # update table._dbt after alter table. if table._db._adapter.commit_on_alter_table: table._db.commit() tfile = self.file_open(table._dbt, 'w') cPickle.dump(sql_fields_current, tfile) self.file_close(tfile) logfile.write('success!\n') else: logfile.write('faked!\n') elif metadata_change: tfile = self.file_open(table._dbt, 'w') cPickle.dump(sql_fields_current, tfile) self.file_close(tfile) if metadata_change and \ not (query and self.dbengine in ('mysql','oracle','firebird')): table._db.commit() tfile = self.file_open(table._dbt, 'w') cPickle.dump(sql_fields_current, tfile) self.file_close(tfile) def LOWER(self,first): return 'LOWER(%s)' % self.expand(first) def UPPER(self,first): return 'UPPER(%s)' % self.expand(first) def EXTRACT(self,first,what): return "EXTRACT(%s FROM %s)" % (what, self.expand(first)) def AGGREGATE(self,first,what): return "%s(%s)" % (what,self.expand(first)) def JOIN(self): return 'JOIN' def LEFT_JOIN(self): return 'LEFT JOIN' def RANDOM(self): return 'Random()' def NOT_NULL(self,default,field_type): return 'NOT NULL DEFAULT %s' % self.represent(default,field_type) def COALESCE_ZERO(self,first): return 'COALESCE(%s,0)' % self.expand(first) def ALLOW_NULL(self): return '' def SUBSTRING(self,field,parameters): return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def PRIMARY_KEY(self,key): return 'PRIMARY KEY(%s)' % key def _drop(self,table,mode): return ['DROP TABLE %s;' % table] def drop(self, table, mode=''): if table._dbt: logfile = self.file_open(table._loggername, 'a') queries = self._drop(table, mode) for query in queries: if table._dbt: logfile.write(query + '\n') self.execute(query) table._db.commit() del table._db[table._tablename] del table._db.tables[table._db.tables.index(table._tablename)] table._db._update_referenced_by(table._tablename) if table._dbt: self.file_delete(table._dbt) logfile.write('success!\n') def _insert(self,table,fields): keys = ','.join(f.name for f,v in fields) values = ','.join(self.expand(v,f.type) for f,v in fields) return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) def insert(self,table,fields): query = self._insert(table,fields) try: self.execute(query) except Exception, e: if isinstance(e,self.integrity_error_class()): return None raise e if hasattr(table,'_primarykey'): return dict([(k[0].name, k[1]) for k in fields \ if k[0].name in table._primarykey]) id = self.lastrowid(table) if not isinstance(id,int): return id rid = Reference(id) (rid._table, rid._record) = (table, None) return rid def bulk_insert(self,table,items): return [self.insert(table,item) for item in items] def NOT(self,first): return '(NOT %s)' % self.expand(first) def AND(self,first,second): return '(%s AND %s)' % (self.expand(first),self.expand(second)) def OR(self,first,second): return '(%s OR %s)' % (self.expand(first),self.expand(second)) def BELONGS(self,first,second): if isinstance(second,str): return '(%s IN (%s))' % (self.expand(first),second[:-1]) elif second==[] or second==(): return '(0)' items =','.join(self.expand(item,first.type) for item in second) return '(%s IN (%s))' % (self.expand(first),items) def LIKE(self,first,second): return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string')) def STARTSWITH(self,first,second): return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string')) def ENDSWITH(self,first,second): return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string')) def CONTAINS(self,first,second): if first.type in ('string','text'): key = '%'+str(second).replace('%','%%')+'%' elif first.type.startswith('list:'): key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string')) def EQ(self,first,second=None): if second is None: return '(%s IS NULL)' % self.expand(first) return '(%s = %s)' % (self.expand(first),self.expand(second,first.type)) def NE(self,first,second=None): if second is None: return '(%s IS NOT NULL)' % self.expand(first) return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type)) def LT(self,first,second=None): return '(%s < %s)' % (self.expand(first),self.expand(second,first.type)) def LE(self,first,second=None): return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type)) def GT(self,first,second=None): return '(%s > %s)' % (self.expand(first),self.expand(second,first.type)) def GE(self,first,second=None): return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type)) def ADD(self,first,second): return '(%s + %s)' % (self.expand(first),self.expand(second,first.type)) def SUB(self,first,second): return '(%s - %s)' % (self.expand(first),self.expand(second,first.type)) def MUL(self,first,second): return '(%s * %s)' % (self.expand(first),self.expand(second,first.type)) def DIV(self,first,second): return '(%s / %s)' % (self.expand(first),self.expand(second,first.type)) def MOD(self,first,second): return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type)) def AS(self,first,second): return '%s AS %s' % (self.expand(first),second) def ON(self,first,second): return '%s ON %s' % (self.expand(first),self.expand(second)) def INVERT(self,first): return '%s DESC' % self.expand(first) def COMMA(self,first,second): return '%s, %s' % (self.expand(first),self.expand(second)) def expand(self,expression,field_type=None): if isinstance(expression,Field): return str(expression) elif isinstance(expression, (Expression, Query)): if not expression.second is None: return expression.op(expression.first, expression.second) elif not expression.first is None: return expression.op(expression.first) else: return expression.op() elif field_type: return self.represent(expression,field_type) elif isinstance(expression,(list,tuple)): return ','.join([self.represent(item,field_type) for item in expression]) else: return str(expression) def alias(self,table,alias): """ given a table object, makes a new table object with alias name. """ other = copy.copy(table) other['_ot'] = other._tablename other['ALL'] = SQLALL(other) other['_tablename'] = alias for fieldname in other.fields: other[fieldname] = copy.copy(other[fieldname]) other[fieldname]._tablename = alias other[fieldname].tablename = alias other[fieldname].table = other table._db[alias] = other return other def _truncate(self,table,mode = ''): tablename = table._tablename return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')] def truncate(self,table,mode= ' '): # Prepare functions "write_to_logfile" and "close_logfile" if table._dbt: logfile = self.file_open(table._loggername, 'a') else: class Logfile(object): def write(self, value): pass def close(self): pass logfile = Logfile() try: queries = table._db._adapter._truncate(table, mode) for query in queries: logfile.write(query + '\n') self.execute(query) table._db.commit() logfile.write('success!\n') finally: logfile.close() def _update(self,tablename,query,fields): if query: sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields]) return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w) def update(self,tablename,query,fields): sql = self._update(tablename,query,fields) self.execute(sql) try: return self.cursor.rowcount except: return None def _delete(self,tablename, query): if query: sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' return 'DELETE FROM %s%s;' % (tablename, sql_w) def delete(self,tablename,query): sql = self._delete(tablename,query) ### special code to handle CASCADE in SQLite db = self.db table = db[tablename] if self.dbengine=='sqlite' and table._referenced_by: deleted = [x[table._id.name] for x in db(query).select(table._id)] ### end special code to handle CASCADE in SQLite self.execute(sql) try: counter = self.cursor.rowcount except: counter = None ### special code to handle CASCADE in SQLite if self.dbengine=='sqlite' and counter: for tablename,fieldname in table._referenced_by: f = db[tablename][fieldname] if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': db(db[tablename][fieldname].belongs(deleted)).delete() ### end special code to handle CASCADE in SQLite return counter def get_table(self,query): tablenames = self.tables(query) if len(tablenames)==1: return tablenames[0] elif len(tablenames)<1: raise RuntimeError, "No table selected" else: raise RuntimeError, "Too many tables selected" def _select(self, query, fields, attributes): for key in set(attributes.keys())-set(('orderby','groupby','limitby', 'required','cache','left', 'distinct','having', 'join')): raise SyntaxError, 'invalid select attribute: %s' % key # ## if not fields specified take them all from the requested tables new_fields = [] for item in fields: if isinstance(item,SQLALL): new_fields += item.table else: new_fields.append(item) fields = new_fields tablenames = self.tables(query) query = self.filter_tenant(query,tablenames) if not fields: for table in tablenames: for field in self.db[table]: fields.append(field) else: for field in fields: if isinstance(field,basestring) and table_field.match(field): tn,fn = field.split('.') field = self.db[tn][fn] for tablename in self.tables(field): if not tablename in tablenames: tablenames.append(tablename) if len(tablenames) < 1: raise SyntaxError, 'Set: no tables selected' sql_f = ', '.join(map(self.expand,fields)) self._colnames = [c.strip() for c in sql_f.split(', ')] if query: sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' sql_o = '' sql_s = '' left = attributes.get('left', False) inner_join = attributes.get('join', False) distinct = attributes.get('distinct', False) groupby = attributes.get('groupby', False) orderby = attributes.get('orderby', False) having = attributes.get('having', False) limitby = attributes.get('limitby', False) if distinct is True: sql_s += 'DISTINCT' elif distinct: sql_s += 'DISTINCT ON (%s)' % distinct if inner_join: icommand = self.JOIN() if not isinstance(inner_join, (tuple, list)): inner_join = [inner_join] ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] ijoinon = [t for t in inner_join if isinstance(t, Expression)] ijoinont = [t.first._tablename for t in ijoinon] iexcluded = [t for t in tablenames if not t in ijoint + ijoinont] if left: join = attributes['left'] command = self.LEFT_JOIN() if not isinstance(join, (tuple, list)): join = [join] joint = [t._tablename for t in join if not isinstance(t,Expression)] joinon = [t for t in join if isinstance(t, Expression)] #patch join+left patch (solves problem with ordering in left joins) tables_to_merge={} [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] joinont = [t.first._tablename for t in joinon] [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] important_tablenames = joint + joinont + tables_to_merge.keys() excluded = [t for t in tablenames if not t in important_tablenames ] def alias(t): return str(self.db[t]) if inner_join and not left: sql_t = ', '.join(alias(t) for t in iexcluded) for t in ijoinon: sql_t += ' %s %s' % (icommand, str(t)) elif not inner_join and left: sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()]) if joint: sql_t += ' %s %s' % (command, ','.join([t for t in joint])) for t in joinon: sql_t += ' %s %s' % (command, str(t)) elif inner_join and left: sql_t = ','.join([alias(t) for t in excluded + \ tables_to_merge.keys() if t in iexcluded ]) for t in ijoinon: sql_t += ' %s %s' % (icommand, str(t)) if joint: sql_t += ' %s %s' % (command, ','.join([t for t in joint])) for t in joinon: sql_t += ' %s %s' % (command, str(t)) else: sql_t = ', '.join(alias(t) for t in tablenames) if groupby: if isinstance(groupby, (list, tuple)): groupby = xorify(groupby) sql_o += ' GROUP BY %s' % self.expand(groupby) if having: sql_o += ' HAVING %s' % attributes['having'] if orderby: if isinstance(orderby, (list, tuple)): orderby = xorify(orderby) if str(orderby) == '<random>': sql_o += ' ORDER BY %s' % self.RANDOM() else: sql_o += ' ORDER BY %s' % self.expand(orderby) if limitby: if not orderby and tablenames: sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) # oracle does not support limitby return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def select(self,query,fields,attributes): """ Always returns a Rows object, even if it may be empty """ def response(sql): self.execute(sql) return self.cursor.fetchall() sql = self._select(query,fields,attributes) if attributes.get('cache', None): (cache_model, time_expire) = attributes['cache'] del attributes['cache'] key = self.uri + '/' + sql key = (key<=200) and key or hashlib.md5(key).hexdigest() rows = cache_model(key, lambda: response(sql), time_expire) else: rows = response(sql) if isinstance(rows,tuple): rows = list(rows) limitby = attributes.get('limitby',None) or (0,) rows = self.rowslice(rows,limitby[0],None) return self.parse(rows,self._colnames) def _count(self,query,distinct=None): tablenames = self.tables(query) if query: sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' sql_t = ','.join(tablenames) if distinct: if isinstance(distinct,(list,tuple)): distinct = xorify(distinct) sql_d = self.expand(distinct) return 'SELECT count(DISTINCT %s) FROM %s%s' % (sql_d, sql_t, sql_w) return 'SELECT count(*) FROM %s%s' % (sql_t, sql_w) def count(self,query,distinct=None): self.execute(self._count(query,distinct)) return self.cursor.fetchone()[0] def tables(self,query): tables = set() if isinstance(query, Field): tables.add(query.tablename) elif isinstance(query, (Expression, Query)): if query.first!=None: tables = tables.union(self.tables(query.first)) if query.second!=None: tables = tables.union(self.tables(query.second)) return list(tables) def commit(self): return self.connection.commit() def rollback(self): return self.connection.rollback() def close(self): return self.connection.close() def distributed_transaction_begin(self,key): return def prepare(self,key): self.connection.prepare() def commit_prepared(self,key): self.connection.commit() def rollback_prepared(self,key): self.connection.rollback() def concat_add(self,table): return ', ADD ' def constraint_name(self, table, fieldname): return '%s_%s__constraint' % (table,fieldname) def create_sequence_and_triggers(self, query, table, **args): self.execute(query) def log_execute(self,*a,**b): self.db._lastsql = a[0] t0 = time.time() ret = self.cursor.execute(*a,**b) self.db._timings.append((a[0],time.time()-t0)) return ret def execute(self,*a,**b): return self.log_execute(*a, **b) def represent(self, obj, fieldtype): if isinstance(obj,CALLABLETYPES): obj = obj() if isinstance(fieldtype, SQLCustomType): return fieldtype.encoder(obj) if isinstance(obj, (Expression, Field)): return str(obj) if fieldtype.startswith('list:'): if not obj: obj = [] if not isinstance(obj, (list, tuple)): obj = [obj] if isinstance(obj, (list, tuple)): obj = bar_encode(obj) if obj is None: return 'NULL' if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: return 'NULL' r = self.represent_exceptions(obj,fieldtype) if r != None: return r if fieldtype == 'boolean': if obj and not str(obj)[:1].upper() in ['F', '0']: return "'T'" else: return "'F'" if fieldtype == 'id' or fieldtype == 'integer': return str(int(obj)) if fieldtype.startswith('decimal'): return str(obj) elif fieldtype.startswith('reference'): # reference if fieldtype.find('.')>0: return repr(obj) elif isinstance(obj, (Row, Reference)): return str(obj['id']) return str(int(obj)) elif fieldtype == 'double': return repr(float(obj)) if isinstance(obj, unicode): obj = obj.encode(self.db_codec) if fieldtype == 'blob': obj = base64.b64encode(str(obj)) elif fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T',' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) elif fieldtype == 'time': if isinstance(obj, datetime.time): obj = obj.isoformat()[:10] else: obj = str(obj) if not isinstance(obj,str): obj = str(obj) try: obj.decode(self.db_codec) except: obj = obj.decode('latin1').encode(self.db_codec) return "'%s'" % obj.replace("'", "''") def represent_exceptions(self, obj, fieldtype): return None def lastrowid(self,table): return None def integrity_error_class(self): return type(None) def rowslice(self,rows,minimum=0,maximum=None): """ by default this function does nothing, overload when db does not do slicing """ return rows def parse(self, rows, colnames, blob_decode=True): db = self.db virtualtables = [] new_rows = [] for (i,row) in enumerate(rows): new_row = Row() for j,colname in enumerate(colnames): value = row[j] if not table_field.match(colnames[j]): if not '_extra' in new_row: new_row['_extra'] = Row() new_row['_extra'][colnames[j]] = value select_as_parser = re.compile("\s+AS\s+(\S+)") new_column_name = select_as_parser.search(colnames[j]) if not new_column_name is None: column_name = new_column_name.groups(0) setattr(new_row,column_name[0],value) continue (tablename, fieldname) = colname.split('.') table = db[tablename] field = table[fieldname] field_type = field.type if field.type != 'blob' and isinstance(value, str): try: value = value.decode(db._db_codec) except Exception: pass if isinstance(value, unicode): value = value.encode('utf-8') if not tablename in new_row: colset = new_row[tablename] = Row() if tablename not in virtualtables: virtualtables.append(tablename) else: colset = new_row[tablename] if isinstance(field_type, SQLCustomType): colset[fieldname] = field_type.decoder(value) # field_type = field_type.type elif not isinstance(field_type, str) or value is None: colset[fieldname] = value elif isinstance(field_type, str) and \ field_type.startswith('reference'): referee = field_type[10:].strip() if not '.' in referee: colset[fieldname] = rid = Reference(value) (rid._table, rid._record) = (db[referee], None) else: ### reference not by id colset[fieldname] = value elif field_type == 'boolean': if value == True or str(value)[:1].lower() == 't': colset[fieldname] = True else: colset[fieldname] = False elif field_type == 'date' \ and (not isinstance(value, datetime.date)\ or isinstance(value, datetime.datetime)): (y, m, d) = map(int, str(value)[:10].strip().split('-')) colset[fieldname] = datetime.date(y, m, d) elif field_type == 'time' \ and not isinstance(value, datetime.time): time_items = map(int,str(value)[:8].strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] colset[fieldname] = datetime.time(h, mi, s) elif field_type == 'datetime'\ and not isinstance(value, datetime.datetime): (y, m, d) = map(int,str(value)[:10].strip().split('-')) time_items = map(int,str(value)[11:19].strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) elif field_type == 'blob' and blob_decode: colset[fieldname] = base64.b64decode(str(value)) elif field_type.startswith('decimal'): decimals = int(field_type[8:-1].split(',')[-1]) if self.dbengine == 'sqlite': value = ('%.' + str(decimals) + 'f') % value if not isinstance(value, decimal.Decimal): value = decimal.Decimal(str(value)) colset[fieldname] = value elif field_type.startswith('list:integer'): if not self.dbengine=='google:datastore': colset[fieldname] = bar_decode_integer(value) else: colset[fieldname] = value elif field_type.startswith('list:reference'): if not self.dbengine=='google:datastore': colset[fieldname] = bar_decode_integer(value) else: colset[fieldname] = value elif field_type.startswith('list:string'): if not self.dbengine=='google:datastore': colset[fieldname] = bar_decode_string(value) else: colset[fieldname] = value else: colset[fieldname] = value if field_type == 'id': id = colset[field.name] colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() for (referee_table, referee_name) in \ table._referenced_by: s = db[referee_table][referee_name] if not referee_table in colset: # for backward compatibility colset[referee_table] = Set(db, s == id) ### add new feature? ### colset[referee_table+'_by_'+refree_name] = Set(db, s == id) colset['id'] = id new_rows.append(new_row) rowsobj = Rows(db, new_rows, colnames, rawrows=rows) for tablename in virtualtables: for item in db[tablename].virtualfields: try: rowsobj = rowsobj.setvirtualfields(**{tablename:item}) except KeyError: # to avoid breaking virtualfields when partial select pass return rowsobj def filter_tenant(self,query,tablenames): fieldname = self.db._request_tenant for tablename in tablenames: table = self.db[tablename] if fieldname in table: default = table[fieldname].default if default!=None: query = query&(table[fieldname]==default) return query ################################################################################### # List of all the available adapters, they all extend BaseAdapter ################################################################################### class SQLiteAdapter(BaseAdapter): driver = globals().get('sqlite3',None) def EXTRACT(self,field,what): return "web2py_extract('%s',%s)" % (what,self.expand(field)) @staticmethod def web2py_extract(lookup, s): table = { 'year': (0, 4), 'month': (5, 7), 'day': (8, 10), 'hour': (11, 13), 'minute': (14, 16), 'second': (17, 19), } try: (i, j) = table[lookup] return int(s[i:j]) except: return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "sqlite" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' if uri.startswith('sqlite:memory'): dbpath = ':memory:' else: dbpath = uri.split('://')[1] if dbpath[0] != '/': dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) if not 'check_same_thread' in driver_args: driver_args['check_same_thread'] = False def connect(dbpath=dbpath, driver_args=driver_args): return self.driver.Connection(dbpath, **driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) def _truncate(self,table,mode = ''): tablename = table._tablename return ['DELETE FROM %s;' % tablename, "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename] def lastrowid(self,table): return self.cursor.lastrowid class JDBCSQLiteAdapter(SQLiteAdapter): driver = globals().get('zxJDBC',None) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "sqlite" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' if uri.startswith('sqlite:memory'): dbpath = ':memory:' else: dbpath = uri.split('://')[1] if dbpath[0] != '/': dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) def connect(dbpath=dbpath,driver_args=driver_args): return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs # self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) def execute(self,a): return self.log_execute(a[:-1]) class MySQLAdapter(BaseAdapter): driver = globals().get('pymysql',None) maxcharlength = 255 commit_on_alter_table = True support_distributed_transaction = True types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'LONGTEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'LONGBLOB', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'double': 'DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'DATETIME', 'id': 'INT AUTO_INCREMENT NOT NULL', 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'LONGTEXT', 'list:string': 'LONGTEXT', 'list:reference': 'LONGTEXT', } def RANDOM(self): return 'RAND()' def SUBSTRING(self,field,parameters): return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def _drop(self,table,mode): # breaks db integrity but without this mysql does not drop table return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;'] def distributed_transaction_begin(self,key): self.execute('XA START;') def prepare(self,key): self.execute("XA END;") self.execute("XA PREPARE;") def commit_prepared(self,ley): self.execute("XA COMMIT;") def rollback_prepared(self,key): self.execute("XA ROLLBACK;") def concat_add(self,table): return '; ALTER TABLE %s ADD ' % table def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "mysql" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) if not m: raise SyntaxError, \ "Invalid URI string in DAL: %s" % self.uri user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' port = int(m.group('port') or '3306') charset = m.group('charset') or 'utf8' driver_args.update(dict(db=db, user=credential_decoder(user), passwd=credential_decoder(password), host=host, port=port, charset=charset)) def connect(driver_args=driver_args): return self.driver.connect(**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() self.execute('SET FOREIGN_KEY_CHECKS=1;') self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") def lastrowid(self,table): self.execute('select last_insert_id();') return int(self.cursor.fetchone()[0]) class PostgreSQLAdapter(BaseAdapter): driver = globals().get('psycopg2',None) support_distributed_transaction = True types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'TEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'BYTEA', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'SERIAL PRIMARY KEY', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', } def sequence_name(self,table): return '%s_id_Seq' % table def RANDOM(self): return 'RANDOM()' def distributed_transaction_begin(self,key): return def prepare(self,key): self.execute("PREPARE TRANSACTION '%s';" % key) def commit_prepared(self,key): self.execute("COMMIT PREPARED '%s';" % key) def rollback_prepared(self,key): self.execute("ROLLBACK PREPARED '%s';" % key) def create_sequence_and_triggers(self, query, table, **args): # following lines should only be executed if table._sequence_name does not exist # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ # % (table._tablename, table._fieldname, table._sequence_name)) self.execute(query) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "postgres" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) if not m: raise SyntaxError, "Invalid URI string in DAL" user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' port = m.group('port') or '5432' sslmode = m.group('sslmode') if sslmode: msg = ("dbname='%s' user='%s' host='%s'" "port=%s password='%s' sslmode='%s'") \ % (db, user, host, port, password, sslmode) else: msg = ("dbname='%s' user='%s' host='%s'" "port=%s password='%s'") \ % (db, user, host, port, password) def connect(msg=msg,driver_args=driver_args): return self.driver.connect(msg,**driver_args) self.pool_connection(connect) self.connection.set_client_encoding('UTF8') self.cursor = self.connection.cursor() self.execute('BEGIN;') self.execute("SET CLIENT_ENCODING TO 'UNICODE';") self.execute("SET standard_conforming_strings=on;") def lastrowid(self,table): self.execute("select currval('%s')" % table._sequence_name) return int(self.cursor.fetchone()[0]) def LIKE(self,first,second): return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string')) def STARTSWITH(self,first,second): return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string')) def ENDSWITH(self,first,second): return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string')) def CONTAINS(self,first,second): if first.type in ('string','text'): key = '%'+str(second).replace('%','%%')+'%' elif first.type.startswith('list:'): key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string')) class JDBCPostgreSQLAdapter(PostgreSQLAdapter): def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "postgres" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) if not m: raise SyntaxError, "Invalid URI string in DAL" user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' port = m.group('port') or '5432' msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) def connect(msg=msg,driver_args=driver_args): return self.driver.connect(*msg,**driver_args) self.pool_connection(connect) self.connection.set_client_encoding('UTF8') self.cursor = self.connection.cursor() self.execute('BEGIN;') self.execute("SET CLIENT_ENCODING TO 'UNICODE';") class OracleAdapter(BaseAdapter): driver = globals().get('cx_Oracle',None) commit_on_alter_table = False types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR2(%(length)s)', 'text': 'CLOB', 'password': 'VARCHAR2(%(length)s)', 'blob': 'CLOB', 'upload': 'VARCHAR2(%(length)s)', 'integer': 'INT', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'CHAR(8)', 'datetime': 'DATE', 'id': 'NUMBER PRIMARY KEY', 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', } def sequence_name(self,tablename): return '%s_sequence' % tablename def trigger_name(self,tablename): return '%s_trigger' % tablename def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'dbms_random.value' def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def _drop(self,table,mode): sequence_name = table._sequence_name return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name] def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby if len(sql_w) > 1: sql_w_row = sql_w + ' AND w_row > %i' % lmin else: sql_w_row = 'WHERE w_row > %i' % lmin return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def constraint_name(self, tablename, fieldname): constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) if len(constraint_name)>30: constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) return constraint_name def represent_exceptions(self, obj, fieldtype): if fieldtype == 'blob': obj = base64.b64encode(str(obj)) return ":CLOB('%s')" % obj elif fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) return "to_date('%s','yyyy-mm-dd')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T',' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "oracle" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] if not 'threaded' in driver_args: driver_args['threaded']=True def connect(uri=uri,driver_args=driver_args): return self.driver.connect(uri,**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") def execute(self, command): args = [] i = 1 while True: m = self.oracle_fix.match(command) if not m: break command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] args.append(m.group('clob')[6:-2].replace("''", "'")) i += 1 return self.log_execute(command[:-1], args) def create_sequence_and_triggers(self, query, table, **args): tablename = table._tablename sequence_name = table._sequence_name trigger_name = table._trigger_name self.execute(query) self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name)) def lastrowid(self,table): sequence_name = table._sequence_name self.execute('SELECT %s.currval FROM dual;' % sequence_name) return int(self.cursor.fetchone()[0]) class MSSQLAdapter(BaseAdapter): driver = globals().get('pyodbc',None) types = { 'boolean': 'BIT', 'string': 'VARCHAR(%(length)s)', 'text': 'TEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'IMAGE', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATETIME', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'INT IDENTITY PRIMARY KEY', 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', } def EXTRACT(self,field,what): return "DATEPART(%s,%s)" % (what, self.expand(field)) def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'NEWID()' def ALLOW_NULL(self): return ' NULL' def SUBSTRING(self,field,parameters): return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def PRIMARY_KEY(self,key): return 'PRIMARY KEY CLUSTERED (%s)' % key def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_s += ' TOP %i' % lmax return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def represent_exceptions(self, obj, fieldtype): if fieldtype == 'boolean': if obj and not str(obj)[0].upper() == 'F': return '1' else: return '0' return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}, fake_connect=False): self.db = db self.dbengine = "mssql" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 uri = uri.split('://')[1] if '@' not in uri: try: m = re.compile('^(?P<dsn>.+)$').match(uri) if not m: raise SyntaxError, \ 'Parsing uri string(%s) has no result' % self.uri dsn = m.group('dsn') if not dsn: raise SyntaxError, 'DSN required' except SyntaxError, e: logger.error('NdGpatch error') raise e cnxn = 'DSN=%s' % dsn else: m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) if not m: raise SyntaxError, \ "Invalid URI string in DAL: %s" % uri user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' port = m.group('port') or '1433' # Parse the optional url name-value arg pairs after the '?' # (in the form of arg1=value1&arg2=value2&...) # Default values (drivers like FreeTDS insist on uppercase parameter keys) argsdict = { 'DRIVER':'{SQL Server}' } urlargs = m.group('urlargs') or '' argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') for argmatch in argpattern.finditer(urlargs): argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ % (host, port, db, user, password, urlargs) def connect(cnxn=cnxn,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) if not fake_connect: self.pool_connection(connect) self.cursor = self.connection.cursor() def lastrowid(self,table): #self.execute('SELECT @@IDENTITY;') self.execute('SELECT SCOPE_IDENTITY();') return int(self.cursor.fetchone()[0]) def integrity_error_class(self): return pyodbc.IntegrityError def rowslice(self,rows,minimum=0,maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] class MSSQL2Adapter(MSSQLAdapter): types = { 'boolean': 'CHAR(1)', 'string': 'NVARCHAR(%(length)s)', 'text': 'NTEXT', 'password': 'NVARCHAR(%(length)s)', 'blob': 'IMAGE', 'upload': 'NVARCHAR(%(length)s)', 'integer': 'INT', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATETIME', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'INT IDENTITY PRIMARY KEY', 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 'list:integer': 'NTEXT', 'list:string': 'NTEXT', 'list:reference': 'NTEXT', } def represent(self, obj, fieldtype): value = BaseAdapter.represent(self, obj, fieldtype) if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'": value = 'N'+value return value def execute(self,a): return self.log_execute(a.decode('utf8')) class FireBirdAdapter(BaseAdapter): driver = globals().get('pyodbc',None) commit_on_alter_table = False support_distributed_transaction = True types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'BLOB SUB_TYPE 1', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB SUB_TYPE 0', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'double': 'DOUBLE PRECISION', 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INTEGER PRIMARY KEY', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'BLOB SUB_TYPE 1', 'list:string': 'BLOB SUB_TYPE 1', 'list:reference': 'BLOB SUB_TYPE 1', } def sequence_name(self,tablename): return 'genid_%s' % tablename def trigger_name(self,tablename): return 'trg_id_%s' % tablename def RANDOM(self): return 'RAND()' def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def SUBSTRING(self,field,parameters): return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1]) def _drop(self,table,mode): sequence_name = table._sequence_name return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name] def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def _truncate(self,table,mode = ''): return ['DELETE FROM %s;' % table._tablename, 'SET GENERATOR %s TO 0;' % table._sequence_name] def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "firebird" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) if not m: raise SyntaxError, "Invalid URI string in DAL: %s" % uri user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' port = int(m.group('port') or 3050) db = m.group('db') if not db: raise SyntaxError, 'Database name required' charset = m.group('charset') or 'UTF8' driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), user = credential_decoder(user), password = credential_decoder(password), charset = charset)) if adapter_args.has_key('driver_name'): if adapter_args['driver_name'] == 'kinterbasdb': self.driver = kinterbasdb elif adapter_args['driver_name'] == 'firebirdsql': self.driver = firebirdsql else: self.driver = kinterbasdb def connect(driver_args=driver_args): return self.driver.connect(**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() def create_sequence_and_triggers(self, query, table, **args): tablename = table._tablename sequence_name = table._sequence_name trigger_name = table._trigger_name self.execute(query) self.execute('create generator %s;' % sequence_name) self.execute('set generator %s to 0;' % sequence_name) self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name)) def lastrowid(self,table): sequence_name = table._sequence_name self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) return int(self.cursor.fetchone()[0]) class FireBirdEmbeddedAdapter(FireBirdAdapter): def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "firebird" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) if not m: raise SyntaxError, \ "Invalid URI string in DAL: %s" % self.uri user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' pathdb = m.group('path') if not pathdb: raise SyntaxError, 'Path required' charset = m.group('charset') if not charset: charset = 'UTF8' host = '' driver_args.update(dict(host=host, database=pathdb, user=credential_decoder(user), password=credential_decoder(password), charset=charset)) #def connect(driver_args=driver_args): # return kinterbasdb.connect(**driver_args) if adapter_args.has_key('driver_name'): if adapter_args['driver_name'] == 'kinterbasdb': self.driver = kinterbasdb elif adapter_args['driver_name'] == 'firebirdsql': self.driver = firebirdsql else: self.driver = kinterbasdb def connect(driver_args=driver_args): return self.driver.connect(**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() class InformixAdapter(BaseAdapter): driver = globals().get('informixdb',None) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'BLOB SUB_TYPE 1', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB SUB_TYPE 0', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'SERIAL', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 'list:integer': 'BLOB SUB_TYPE 1', 'list:string': 'BLOB SUB_TYPE 1', 'list:reference': 'BLOB SUB_TYPE 1', } def RANDOM(self): return 'Random()' def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby fetch_amt = lmax - lmin dbms_version = int(self.connection.dbms_version.split('.')[0]) if lmin and (dbms_version >= 10): # Requires Informix 10.0+ sql_s += ' SKIP %d' % (lmin, ) if fetch_amt and (dbms_version >= 9): # Requires Informix 9.0+ sql_s += ' FIRST %d' % (fetch_amt, ) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def represent_exceptions(self, obj, fieldtype): if fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) return "to_date('%s','yyyy-mm-dd')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T',' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "informix" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) if not m: raise SyntaxError, \ "Invalid URI string in DAL: %s" % self.uri user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' user = credential_decoder(user) password = credential_decoder(password) dsn = '%s@%s' % (db,host) driver_args.update(dict(user=user,password=password,autocommit=True)) def connect(dsn=dsn,driver_args=driver_args): return self.driver.connect(dsn,**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() def execute(self,command): if command[-1:]==';': command = command[:-1] return self.log_execute(command) def lastrowid(self,table): return self.cursor.sqlerrd[1] def integrity_error_class(self): return informixdb.IntegrityError class DB2Adapter(BaseAdapter): driver = globals().get('pyodbc',None) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'CLOB', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'double': 'DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', } def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'RAND()' def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def represent_exceptions(self, obj, fieldtype): if fieldtype == 'blob': obj = base64.b64encode(str(obj)) return "BLOB('%s')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T','-').replace(':','.') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+'-00.00.00' return "'%s'" % obj return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "db2" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() cnxn = uri.split('://', 1)[1] def connect(cnxn=cnxn,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() def execute(self,command): if command[-1:]==';': command = command[:-1] return self.log_execute(command) def lastrowid(self,table): self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) return int(self.cursor.fetchone()[0]) def rowslice(self,rows,minimum=0,maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] class TeradataAdapter(DB2Adapter): driver = globals().get('pyodbc',None) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'CLOB', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'double': 'DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', } def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "teradata" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() cnxn = uri.split('://', 1)[1] def connect(cnxn=cnxn,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name # (ANSI-SQL wants this form of name # to be a delimited identifier) class IngresAdapter(BaseAdapter): driver = globals().get('ingresdbi',None) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'CLOB', 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 'integer': 'INTEGER4', # or int8... 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'ANSIDATE', 'time': 'TIME WITHOUT TIME ZONE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', } def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'RANDOM()' def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby fetch_amt = lmax - lmin if fetch_amt: sql_s += ' FIRST %d ' % (fetch_amt, ) if lmin: # Requires Ingres 9.2+ sql_o += ' OFFSET %d' % (lmin, ) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "ingres" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() connstr = self._uri.split(':', 1)[1] # Simple URI processing connstr = connstr.lstrip() while connstr.startswith('/'): connstr = connstr[1:] database_name=connstr # Assume only (local) dbname is passed in vnode = '(local)' servertype = 'ingres' trace = (0, None) # No tracing driver_args.update(dict(database=database_name, vnode=vnode, servertype=servertype, trace=trace)) def connect(driver_args=driver_args): return self.driver.connect(**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() def create_sequence_and_triggers(self, query, table, **args): # post create table auto inc code (if needed) # modify table to btree for performance.... # Older Ingres releases could use rule/trigger like Oracle above. if hasattr(table,'_primarykey'): modify_tbl_sql = 'modify %s to btree unique on %s' % \ (table._tablename, ', '.join(["'%s'" % x for x in table.primarykey])) self.execute(modify_tbl_sql) else: tmp_seqname='%s_iisq' % table._tablename query=query.replace(INGRES_SEQNAME, tmp_seqname) self.execute('create sequence %s' % tmp_seqname) self.execute(query) self.execute('modify %s to btree unique on %s' % (table._tablename, 'id')) def lastrowid(self,table): tmp_seqname='%s_iisq' % table self.execute('select current value for %s' % tmp_seqname) return int(self.cursor.fetchone()[0]) # don't really need int type cast here... def integrity_error_class(self): return ingresdbi.IntegrityError class IngresUnicodeAdapter(IngresAdapter): types = { 'boolean': 'CHAR(1)', 'string': 'NVARCHAR(%(length)s)', 'text': 'NCLOB', 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 'integer': 'INTEGER4', # or int8... 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'ANSIDATE', 'time': 'TIME WITHOUT TIME ZONE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME, 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 'list:integer': 'NCLOB', 'list:string': 'NCLOB', 'list:reference': 'NCLOB', } class SAPDBAdapter(BaseAdapter): driver = globals().get('sapdb',None) support_distributed_transaction = False types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'LONG', 'password': 'VARCHAR(%(length)s)', 'blob': 'LONG', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'double': 'FLOAT', 'decimal': 'FIXED(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INT PRIMARY KEY', 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'LONG', 'list:string': 'LONG', 'list:reference': 'LONG', } def sequence_name(self,table): return '%s_id_Seq' % table def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby if len(sql_w) > 1: sql_w_row = sql_w + ' AND w_row > %i' % lmin else: sql_w_row = 'WHERE w_row > %i' % lmin return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def create_sequence_and_triggers(self, query, table, **args): # following lines should only be executed if table._sequence_name does not exist self.execute('CREATE SEQUENCE %s;' % table._sequence_name) self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ % (table._tablename, table._id.name, table._sequence_name)) self.execute(query) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "sapdb" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) if not m: raise SyntaxError, "Invalid URI string in DAL" user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' def connect(user=user,password=password,database=db, host=host,driver_args=driver_args): return self.driver.Connection(user,password,database, host,**driver_args) self.pool_connection(connect) # self.connection.set_client_encoding('UTF8') self.cursor = self.connection.cursor() def lastrowid(self,table): self.execute("select %s.NEXTVAL from dual" % table._sequence_name) return int(self.cursor.fetchone()[0]) class CubridAdapter(MySQLAdapter): driver = globals().get('cubriddb',None) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "cubrid" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.find_or_make_work_folder() uri = uri.split('://')[1] m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) if not m: raise SyntaxError, \ "Invalid URI string in DAL: %s" % self.uri user = credential_decoder(m.group('user')) if not user: raise SyntaxError, 'User required' password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError, 'Host name required' db = m.group('db') if not db: raise SyntaxError, 'Database name required' port = int(m.group('port') or '30000') charset = m.group('charset') or 'utf8' user=credential_decoder(user), passwd=credential_decoder(password), def connect(host,port,db,user,passwd,driver_args=driver_args): return self.driver.connect(host,port,db,user,passwd,**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() self.execute('SET FOREIGN_KEY_CHECKS=1;') self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") ######## GAE MySQL ########## class DatabaseStoredFile: web2py_filesystem = False def __init__(self,db,filename,mode): if db._adapter.dbengine != 'mysql': raise RuntimeError, "only MySQL can store metadata .table files in database for now" self.db = db self.filename = filename self.mode = mode if not self.web2py_filesystem: self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;") DatabaseStoredFile.web2py_filesystem = True self.p=0 self.data = '' if mode in ('r','rw','a'): query = "SELECT content FROM web2py_filesystem WHERE path='%s'" % filename rows = self.db.executesql(query) if rows: self.data = rows[0][0] elif os.path.exists(filename): datafile = open(filename, 'r') try: self.data = datafile.read() finally: datafile.close() elif mode in ('r','rw'): raise RuntimeError, "File %s does not exist" % filename def read(self, bytes): data = self.data[self.p:self.p+bytes] self.p += len(data) return data def readline(self): i = self.data.find('\n',self.p)+1 if i>0: data, self.p = self.data[self.p:i], i else: data, self.p = self.data[self.p:], len(self.data) return data def write(self,data): self.data += data def close(self): self.db.executesql("DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')" % \ (self.filename, self.data.replace("'","''")) self.db.executesql(query) self.db.commit() @staticmethod def exists(db,filename): if os.path.exists(filename): return True query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename if db.executesql(query): return True return False class UseDatabaseStoredFile: def file_exists(self, filename): return DatabaseStoredFile.exists(self.db,filename) def file_open(self, filename, mode='rb', lock=True): return DatabaseStoredFile(self.db,filename,mode) def file_close(self, fileobj, unlock=True): fileobj.close() def file_delete(self,filename): query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename self.db.executesql(query) self.db.commit() class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): def __init__(self, db, uri='google:sql://realm:domain/database', pool_size=0, folder=None, db_codec='UTF-8', check_reserved=None, migrate=True, fake_migrate=False, credential_decoder = lambda x:x, driver_args={}, adapter_args={}): self.db = db self.dbengine = "mysql" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1] m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):]) if not m: raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri instance = credential_decoder(m.group('instance')) db = credential_decoder(m.group('db')) driver_args['instance'] = instance if not migrate: driver_args['database'] = db def connect(driver_args=driver_args): return rdbms.connect(**driver_args) self.pool_connection(connect) self.cursor = self.connection.cursor() if migrate: # self.execute('DROP DATABASE %s' % db) self.execute('CREATE DATABASE IF NOT EXISTS %s' % db) self.execute('USE %s' % db) self.execute("SET FOREIGN_KEY_CHECKS=1;") self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") class NoSQLAdapter(BaseAdapter): @staticmethod def to_unicode(obj): if isinstance(obj, str): return obj.decode('utf8') elif not isinstance(obj, unicode): return unicode(obj) return obj def represent(self, obj, fieldtype): if isinstance(obj,CALLABLETYPES): obj = obj() if isinstance(fieldtype, SQLCustomType): return fieldtype.encoder(obj) if isinstance(obj, (Expression, Field)): raise SyntaxError, "non supported on GAE" if self.dbengine=='google:datastore' in globals(): if isinstance(fieldtype, gae.Property): return obj if fieldtype.startswith('list:'): if not obj: obj = [] if not isinstance(obj, (list, tuple)): obj = [obj] if obj == '' and not fieldtype[:2] in ['st','te','pa','up']: return None if obj != None: if isinstance(obj, list) and not fieldtype.startswith('list'): obj = [self.represent(o, fieldtype) for o in obj] elif fieldtype in ('integer','id'): obj = long(obj) elif fieldtype == 'double': obj = float(obj) elif fieldtype.startswith('reference'): if isinstance(obj, (Row, Reference)): obj = obj['id'] obj = long(obj) elif fieldtype == 'boolean': if obj and not str(obj)[0].upper() == 'F': obj = True else: obj = False elif fieldtype == 'date': if not isinstance(obj, datetime.date): (y, m, d) = map(int,str(obj).strip().split('-')) obj = datetime.date(y, m, d) elif isinstance(obj,datetime.datetime): (y, m, d) = (obj.year, obj.month, obj.day) obj = datetime.date(y, m, d) elif fieldtype == 'time': if not isinstance(obj, datetime.time): time_items = map(int,str(obj).strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] obj = datetime.time(h, mi, s) elif fieldtype == 'datetime': if not isinstance(obj, datetime.datetime): (y, m, d) = map(int,str(obj)[:10].strip().split('-')) time_items = map(int,str(obj)[11:].strip().split(':')[:3]) while len(time_items)<3: time_items.append(0) (h, mi, s) = time_items obj = datetime.datetime(y, m, d, h, mi, s) elif fieldtype == 'blob': pass elif fieldtype.startswith('list:string'): return map(self.to_unicode,obj) elif fieldtype.startswith('list:'): return map(int,obj) else: obj = self.to_unicode(obj) return obj def _insert(self,table,fields): return 'insert %s in %s' % (fields, table) def _count(self,query,distinct=None): return 'count %s' % repr(query) def _select(self,query,fields,attributes): return 'select %s where %s' % (repr(fields), repr(query)) def _delete(self,tablename, query): return 'delete %s where %s' % (repr(tablename),repr(query)) def _update(self,tablename,query,fields): return 'update %s (%s) where %s' % (repr(tablename), repr(fields),repr(query)) def commit(self): """ remember: no transactions on many NoSQL """ pass def rollback(self): """ remember: no transactions on many NoSQL """ pass def close(self): """ remember: no transactions on many NoSQL """ pass # these functions should never be called! def OR(self,first,second): raise SyntaxError, "Not supported" def AND(self,first,second): raise SyntaxError, "Not supported" def AS(self,first,second): raise SyntaxError, "Not supported" def ON(self,first,second): raise SyntaxError, "Not supported" def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported" def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported" def ADD(self,first,second): raise SyntaxError, "Not supported" def SUB(self,first,second): raise SyntaxError, "Not supported" def MUL(self,first,second): raise SyntaxError, "Not supported" def DIV(self,first,second): raise SyntaxError, "Not supported" def LOWER(self,first): raise SyntaxError, "Not supported" def UPPER(self,first): raise SyntaxError, "Not supported" def EXTRACT(self,first,what): raise SyntaxError, "Not supported" def AGGREGATE(self,first,what): raise SyntaxError, "Not supported" def LEFT_JOIN(self): raise SyntaxError, "Not supported" def RANDOM(self): raise SyntaxError, "Not supported" def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported" def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported" def LIKE(self,first,second): raise SyntaxError, "Not supported" def drop(self,table,mode): raise SyntaxError, "Not supported" def alias(self,table,alias): raise SyntaxError, "Not supported" def migrate_table(self,*a,**b): raise SyntaxError, "Not supported" def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported" def prepare(self,key): raise SyntaxError, "Not supported" def commit_prepared(self,key): raise SyntaxError, "Not supported" def rollback_prepared(self,key): raise SyntaxError, "Not supported" def concat_add(self,table): raise SyntaxError, "Not supported" def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported" def create_sequence_and_triggers(self, query, table, **args): pass def log_execute(self,*a,**b): raise SyntaxError, "Not supported" def execute(self,*a,**b): raise SyntaxError, "Not supported" def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported" def lastrowid(self,table): raise SyntaxError, "Not supported" def integrity_error_class(self): raise SyntaxError, "Not supported" def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported" class GAEF(object): def __init__(self,name,op,value,apply): self.name=name=='id' and '__key__' or name self.op=op self.value=value self.apply=apply def __repr__(self): return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value)) class GoogleDatastoreAdapter(NoSQLAdapter): uploads_in_blob = True types = {} def file_exists(self, filename): pass def file_open(self, filename, mode='rb', lock=True): pass def file_close(self, fileobj, unlock=True): pass def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.types.update({ 'boolean': gae.BooleanProperty, 'string': (lambda: gae.StringProperty(multiline=True)), 'text': gae.TextProperty, 'password': gae.StringProperty, 'blob': gae.BlobProperty, 'upload': gae.StringProperty, 'integer': gae.IntegerProperty, 'double': gae.FloatProperty, 'decimal': GAEDecimalProperty, 'date': gae.DateProperty, 'time': gae.TimeProperty, 'datetime': gae.DateTimeProperty, 'id': None, 'reference': gae.IntegerProperty, 'list:string': (lambda: gae.StringListProperty(default=None)), 'list:integer': (lambda: gae.ListProperty(int,default=None)), 'list:reference': (lambda: gae.ListProperty(int,default=None)), }) self.db = db self.uri = uri self.dbengine = 'google:datastore' self.folder = folder db['_lastsql'] = '' self.db_codec = 'UTF-8' self.pool_size = 0 match = re.compile('.*://(?P<namespace>.+)').match(uri) if match: namespace_manager.set_namespace(match.group('namespace')) def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None): myfields = {} for k in table.fields: if isinstance(polymodel,Table) and k in polymodel.fields(): continue field = table[k] attr = {} if isinstance(field.type, SQLCustomType): ftype = self.types[field.type.native or field.type.type](**attr) elif isinstance(field.type, gae.Property): ftype = field.type elif field.type.startswith('id'): continue elif field.type.startswith('decimal'): precision, scale = field.type[7:].strip('()').split(',') precision = int(precision) scale = int(scale) ftype = GAEDecimalProperty(precision, scale, **attr) elif field.type.startswith('reference'): if field.notnull: attr = dict(required=True) referenced = field.type[10:].strip() ftype = self.types[field.type[:9]](table._db[referenced]) elif field.type.startswith('list:reference'): if field.notnull: attr = dict(required=True) referenced = field.type[15:].strip() ftype = self.types[field.type[:14]](**attr) elif field.type.startswith('list:'): ftype = self.types[field.type](**attr) elif not field.type in self.types\ or not self.types[field.type]: raise SyntaxError, 'Field: unknown field type: %s' % field.type else: ftype = self.types[field.type](**attr) myfields[field.name] = ftype if not polymodel: table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) elif polymodel==True: table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) elif isinstance(polymodel,Table): table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) else: raise SyntaxError, "polymodel must be None, True, a table or a tablename" return None def expand(self,expression,field_type=None): if isinstance(expression,Field): if expression.type in ('text','blob'): raise SyntaxError, 'AppEngine does not index by: %s' % expression.type return expression.name elif isinstance(expression, (Expression, Query)): if not expression.second is None: return expression.op(expression.first, expression.second) elif not expression.first is None: return expression.op(expression.first) else: return expression.op() elif field_type: return self.represent(expression,field_type) elif isinstance(expression,(list,tuple)): return ','.join([self.represent(item,field_type) for item in expression]) else: return str(expression) ### TODO from gql.py Expression def AND(self,first,second): a = self.expand(first) b = self.expand(second) if b[0].name=='__key__' and a[0].name!='__key__': return b+a return a+b def EQ(self,first,second=None): if isinstance(second, Key): return [GAEF(first.name,'=',second,lambda a,b:a==b)] return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)] def NE(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'!=',second,lambda a,b:a!=b)] def LT(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'<',second,lambda a,b:a<b)] def LE(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'<=',second,lambda a,b:a<=b)] def GT(self,first,second=None): if first.type != 'id' or second==0 or second == '0': return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'>',second,lambda a,b:a>b)] def GE(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'>=',second,lambda a,b:a>=b)] def INVERT(self,first): return '-%s' % first.name def COMMA(self,first,second): return '%s, %s' % (self.expand(first),self.expand(second)) def BELONGS(self,first,second=None): if not isinstance(second,(list, tuple)): raise SyntaxError, "Not supported" if first.type != 'id': return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] else: second = [Key.from_path(first._tablename, i) for i in second] return [GAEF(first.name,'in',second,lambda a,b:a in b)] def CONTAINS(self,first,second): if not first.type.startswith('list:'): raise SyntaxError, "Not supported" return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)] def NOT(self,first): nops = { self.EQ: self.NE, self.NE: self.EQ, self.LT: self.GE, self.GT: self.LE, self.LE: self.GT, self.GE: self.LT} if not isinstance(first,Query): raise SyntaxError, "Not suported" nop = nops.get(first.op,None) if not nop: raise SyntaxError, "Not suported %s" % first.op.__name__ first.op = nop return self.expand(first) def truncate(self,table,mode): self.db(table._id > 0).delete() def select_raw(self,query,fields=[],attributes={}): new_fields = [] for item in fields: if isinstance(item,SQLALL): new_fields += item.table else: new_fields.append(item) fields = new_fields if query: tablename = self.get_table(query) elif fields: tablename = fields[0].tablename query = fields[0].table._id>0 else: raise SyntaxError, "Unable to determine a tablename" query = self.filter_tenant(query,[tablename]) tableobj = self.db[tablename]._tableobj items = tableobj.all() filters = self.expand(query) for filter in filters: if filter.name=='__key__' and filter.op=='>' and filter.value==0: continue elif filter.name=='__key__' and filter.op=='=': if filter.value==0: items = [] elif isinstance(filter.value, Key): item = tableobj.get(filter.value) items = (item and [item]) or [] else: item = tableobj.get_by_id(filter.value) items = (item and [item]) or [] elif isinstance(items,list): # i.e. there is a single record! items = [i for i in items if filter.apply(getattr(item,filter.name), filter.value)] else: if filter.name=='__key__': items.order('__key__') items = items.filter('%s %s' % (filter.name,filter.op),filter.value) if not isinstance(items,list): if attributes.get('left', None): raise SyntaxError, 'Set: no left join in appengine' if attributes.get('groupby', None): raise SyntaxError, 'Set: no groupby in appengine' orderby = attributes.get('orderby', False) if orderby: ### THIS REALLY NEEDS IMPROVEMENT !!! if isinstance(orderby, (list, tuple)): orderby = xorify(orderby) if isinstance(orderby,Expression): orderby = self.expand(orderby) orders = orderby.split(', ') for order in orders: order={'-id':'-__key__','id':'__key__'}.get(order,order) items = items.order(order) if attributes.get('limitby', None): (lmin, lmax) = attributes['limitby'] (limit, offset) = (lmax - lmin, lmin) items = items.fetch(limit, offset=offset) fields = self.db[tablename].fields return (items, tablename, fields) def select(self,query,fields,attributes): (items, tablename, fields) = self.select_raw(query,fields,attributes) # self.db['_lastsql'] = self._select(query,fields,attributes) rows = [ [t=='id' and int(item.key().id()) or getattr(item, t) for t in fields] for item in items] colnames = ['%s.%s' % (tablename, t) for t in fields] return self.parse(rows, colnames, False) def count(self,query,distinct=None): if distinct: raise RuntimeError, "COUNT DISTINCT not supported" (items, tablename, fields) = self.select_raw(query) # self.db['_lastsql'] = self._count(query) try: return len(items) except TypeError: return items.count(limit=None) def delete(self,tablename, query): """ This function was changed on 2010-05-04 because according to http://code.google.com/p/googleappengine/issues/detail?id=3119 GAE no longer support deleting more than 1000 records. """ # self.db['_lastsql'] = self._delete(tablename,query) (items, tablename, fields) = self.select_raw(query) # items can be one item or a query if not isinstance(items,list): counter = items.count(limit=None) leftitems = items.fetch(1000) while len(leftitems): gae.delete(leftitems) leftitems = items.fetch(1000) else: counter = len(items) gae.delete(items) return counter def update(self,tablename,query,update_fields): # self.db['_lastsql'] = self._update(tablename,query,update_fields) (items, tablename, fields) = self.select_raw(query) counter = 0 for item in items: for field, value in update_fields: setattr(item, field.name, self.represent(value,field.type)) item.put() counter += 1 logger.info(str(counter)) return counter def insert(self,table,fields): dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) # table._db['_lastsql'] = self._insert(table,fields) tmp = table._tableobj(**dfields) tmp.put() rid = Reference(tmp.key().id()) (rid._table, rid._record) = (table, None) return rid def bulk_insert(self,table,items): parsed_items = [] for item in items: dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) parsed_items.append(table._tableobj(**dfields)) gae.put(parsed_items) return True def uuid2int(uuidv): return uuid.UUID(uuidv).int def int2uuid(n): return str(uuid.UUID(int=n)) class CouchDBAdapter(NoSQLAdapter): uploads_in_blob = True types = { 'boolean': bool, 'string': str, 'text': str, 'password': str, 'blob': str, 'upload': str, 'integer': long, 'double': float, 'date': datetime.date, 'time': datetime.time, 'datetime': datetime.datetime, 'id': long, 'reference': long, 'list:string': list, 'list:integer': list, 'list:reference': list, } def file_exists(self, filename): pass def file_open(self, filename, mode='rb', lock=True): pass def file_close(self, fileobj, unlock=True): pass def expand(self,expression,field_type=None): if isinstance(expression,Field): if expression.type=='id': return "%s._id" % expression.tablename return BaseAdapter.expand(self,expression,field_type) def AND(self,first,second): return '(%s && %s)' % (self.expand(first),self.expand(second)) def OR(self,first,second): return '(%s || %s)' % (self.expand(first),self.expand(second)) def EQ(self,first,second): if second is None: return '(%s == null)' % self.expand(first) return '(%s == %s)' % (self.expand(first),self.expand(second,first.type)) def NE(self,first,second): if second is None: return '(%s != null)' % self.expand(first) return '(%s != %s)' % (self.expand(first),self.expand(second,first.type)) def COMMA(self,first,second): return '%s + %s' % (self.expand(first),self.expand(second)) def represent(self, obj, fieldtype): value = NoSQLAdapter.represent(self, obj, fieldtype) if fieldtype=='id': return repr(str(int(value))) return repr(not isinstance(value,unicode) and value or value.encode('utf8')) def __init__(self,db,uri='couchdb://127.0.0.1:5984', pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.uri = uri self.dbengine = 'couchdb' self.folder = folder db['_lastsql'] = '' self.db_codec = 'UTF-8' self.pool_size = pool_size url='http://'+uri[10:] def connect(url=url,driver_args=driver_args): return couchdb.Server(url,**driver_args) self.pool_connection(connect) def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): if migrate: try: self.connection.create(table._tablename) except: pass def insert(self,table,fields): id = uuid2int(web2py_uuid()) ctable = self.connection[table._tablename] values = dict((k.name,NoSQLAdapter.represent(self,v,k.type)) for k,v in fields) values['_id'] = str(id) ctable.save(values) return id def _select(self,query,fields,attributes): if not isinstance(query,Query): raise SyntaxError, "Not Supported" for key in set(attributes.keys())-set(('orderby','groupby','limitby', 'required','cache','left', 'distinct','having')): raise SyntaxError, 'invalid select attribute: %s' % key new_fields=[] for item in fields: if isinstance(item,SQLALL): new_fields += item.table else: new_fields.append(item) def uid(fd): return fd=='id' and '_id' or fd def get(row,fd): return fd=='id' and int(row['_id']) or row.get(fd,None) fields = new_fields tablename = self.get_table(query) fieldnames = [f.name for f in (fields or self.db[tablename])] colnames = ['%s.%s' % (tablename,k) for k in fieldnames] fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\ dict(t=tablename, query=self.expand(query), order='%s._id' % tablename, fields=fields) return fn, colnames def select(self,query,fields,attributes): if not isinstance(query,Query): raise SyntaxError, "Not Supported" fn, colnames = self._select(query,fields,attributes) tablename = colnames[0].split('.')[0] ctable = self.connection[tablename] rows = [cols['value'] for cols in ctable.query(fn)] return self.parse(rows, colnames, False) def delete(self,tablename,query): if not isinstance(query,Query): raise SyntaxError, "Not Supported" if query.first.type=='id' and query.op==self.EQ: id = query.second tablename = query.first.tablename assert(tablename == query.first.tablename) ctable = self.connection[tablename] try: del ctable[str(id)] return 1 except couchdb.http.ResourceNotFound: return 0 else: tablename = self.get_table(query) rows = self.select(query,[self.db[tablename]._id],{}) ctable = self.connection[tablename] for row in rows: del ctable[str(row.id)] return len(rows) def update(self,tablename,query,fields): if not isinstance(query,Query): raise SyntaxError, "Not Supported" if query.first.type=='id' and query.op==self.EQ: id = query.second tablename = query.first.tablename ctable = self.connection[tablename] try: doc = ctable[str(id)] for key,value in fields: doc[key.name] = NoSQLAdapter.represent(self,value,self.db[tablename][key.name].type) ctable.save(doc) return 1 except couchdb.http.ResourceNotFound: return 0 else: tablename = self.get_table(query) rows = self.select(query,[self.db[tablename]._id],{}) ctable = self.connection[tablename] table = self.db[tablename] for row in rows: doc = ctable[str(row.id)] for key,value in fields: doc[key.name] = NoSQLAdapter.represent(self,value,table[key.name].type) ctable.save(doc) return len(rows) def count(self,query,distinct=None): if distinct: raise RuntimeError, "COUNT DISTINCT not supported" if not isinstance(query,Query): raise SyntaxError, "Not Supported" tablename = self.get_table(query) rows = self.select(query,[self.db[tablename]._id],{}) return len(rows) def cleanup(text): """ validates that the given text is clean: only contains [0-9a-zA-Z_] """ if re.compile('[^0-9a-zA-Z_]').findall(text): raise SyntaxError, \ 'only [0-9a-zA-Z_] allowed in table and field names, received %s' \ % text return text class MongoDBAdapter(NoSQLAdapter): uploads_in_blob = True types = { 'boolean': bool, 'string': str, 'text': str, 'password': str, 'blob': str, 'upload': str, 'integer': long, 'double': float, 'date': datetime.date, 'time': datetime.time, 'datetime': datetime.datetime, 'id': long, 'reference': long, 'list:string': list, 'list:integer': list, 'list:reference': list, } def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=lambda x:x, driver_args={}, adapter_args={}): self.db = db self.uri = uri self.dbengine = 'mongodb' self.folder = folder db['_lastsql'] = '' self.db_codec = 'UTF-8' self.pool_size = pool_size m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self._uri[10:]) if not m: raise SyntaxError, "Invalid URI string in DAL: %s" % self._uri host = m.group('host') if not host: raise SyntaxError, 'mongodb: host name required' dbname = m.group('db') if not dbname: raise SyntaxError, 'mongodb: db name required' port = m.group('port') or 27017 driver_args.update(dict(host=host,port=port)) def connect(dbname=dbname,driver_args=driver_args): return pymongo.Connection(**driver_args)[dbname] self.pool_connection(connect) def insert(self,table,fields): ctable = self.connection[table._tablename] values = dict((k,self.represent(v,table[k].type)) for k,v in fields) ctable.insert(values) return uuid2int(id) def count(self,query): raise RuntimeError, "Not implemented" def select(self,query,fields,attributes): raise RuntimeError, "Not implemented" def delete(self,tablename, query): raise RuntimeError, "Not implemented" def update(self,tablename,query,fields): raise RuntimeError, "Not implemented" ######################################################################## # end of adapters ######################################################################## ADAPTERS = { 'sqlite': SQLiteAdapter, 'sqlite:memory': SQLiteAdapter, 'mysql': MySQLAdapter, 'postgres': PostgreSQLAdapter, 'oracle': OracleAdapter, 'mssql': MSSQLAdapter, 'mssql2': MSSQL2Adapter, 'db2': DB2Adapter, 'teradata': TeradataAdapter, 'informix': InformixAdapter, 'firebird': FireBirdAdapter, 'firebird_embedded': FireBirdAdapter, 'ingres': IngresAdapter, 'ingresu': IngresUnicodeAdapter, 'sapdb': SAPDBAdapter, 'cubrid': CubridAdapter, 'jdbc:sqlite': JDBCSQLiteAdapter, 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 'jdbc:postgres': JDBCPostgreSQLAdapter, 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 'google:datastore': GoogleDatastoreAdapter, 'google:sql': GoogleSQLAdapter, 'couchdb': CouchDBAdapter, 'mongodb': MongoDBAdapter, } def sqlhtml_validators(field): """ Field type validation, using web2py's validators mechanism. makes sure the content of a field is in line with the declared fieldtype """ if not have_validators: return [] field_type, field_length = field.type, field.length if isinstance(field_type, SQLCustomType): if hasattr(field_type, 'validator'): return field_type.validator else: field_type = field_type.type elif not isinstance(field_type,str): return [] requires=[] def ff(r,id): row=r(id) if not row: return id elif hasattr(r, '_format') and isinstance(r._format,str): return r._format % row elif hasattr(r, '_format') and callable(r._format): return r._format(row) else: return id if field_type == 'string': requires.append(validators.IS_LENGTH(field_length)) elif field_type == 'text': requires.append(validators.IS_LENGTH(field_length)) elif field_type == 'password': requires.append(validators.IS_LENGTH(field_length)) elif field_type == 'double': requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) elif field_type == 'integer': requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) elif field_type.startswith('decimal'): requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) elif field_type == 'date': requires.append(validators.IS_DATE()) elif field_type == 'time': requires.append(validators.IS_TIME()) elif field_type == 'datetime': requires.append(validators.IS_DATETIME()) elif field.db and field_type.startswith('reference') and \ field_type.find('.') < 0 and \ field_type[10:] in field.db.tables: referenced = field.db[field_type[10:]] def repr_ref(id, r=referenced, f=ff): return f(r, id) field.represent = field.represent or repr_ref if hasattr(referenced, '_format') and referenced._format: requires = validators.IS_IN_DB(field.db,referenced._id, referenced._format) if field.unique: requires._and = validators.IS_NOT_IN_DB(field.db,field) if field.tablename == field_type[10:]: return validators.IS_EMPTY_OR(requires) return requires elif field.db and field_type.startswith('list:reference') and \ field_type.find('.') < 0 and \ field_type[15:] in field.db.tables: referenced = field.db[field_type[15:]] def list_ref_repr(ids, r=referenced, f=ff): if not ids: return None refs = r._db(r._id.belongs(ids)).select(r._id) return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '') field.represent = field.represent or list_ref_repr if hasattr(referenced, '_format') and referenced._format: requires = validators.IS_IN_DB(field.db,referenced._id, referenced._format,multiple=True) else: requires = validators.IS_IN_DB(field.db,referenced._id, multiple=True) if field.unique: requires._and = validators.IS_NOT_IN_DB(field.db,field) return requires elif field_type.startswith('list:'): def repr_list(values): return', '.join(str(v) for v in (values or [])) field.represent = field.represent or repr_list if field.unique: requires.insert(0,validators.IS_NOT_IN_DB(field.db,field)) sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] if field.notnull and not field_type[:2] in sff: requires.insert(0, validators.IS_NOT_EMPTY()) elif not field.notnull and field_type[:2] in sff and requires: requires[-1] = validators.IS_EMPTY_OR(requires[-1]) return requires def bar_escape(item): return str(item).replace('|', '||') def bar_encode(items): return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip()) def bar_decode_integer(value): return [int(x) for x in value.split('|') if x.strip()] def bar_decode_string(value): return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()] class Row(dict): """ a dictionary that lets you do d['a'] as well as d.a this is only used to store a Row """ def __getitem__(self, key): key=str(key) if key in self.get('_extra',{}): return self._extra[key] return dict.__getitem__(self, key) def __call__(self,key): return self.__getitem__(key) def __setitem__(self, key, value): dict.__setitem__(self, str(key), value) def __getattr__(self, key): return self[key] def __setattr__(self, key, value): self[key] = value def __repr__(self): return '<Row ' + dict.__repr__(self) + '>' def __int__(self): return dict.__getitem__(self,'id') def __eq__(self,other): try: return self.as_dict() == other.as_dict() except AttributeError: return False def __ne__(self,other): return not (self == other) def __copy__(self): return Row(dict(self)) def as_dict(self,datetime_to_str=False): SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list) d = dict(self) for k in copy.copy(d.keys()): v=d[k] if d[k] is None: continue elif isinstance(v,Row): d[k]=v.as_dict() elif isinstance(v,Reference): d[k]=int(v) elif isinstance(v,decimal.Decimal): d[k]=float(v) elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): if datetime_to_str: d[k] = v.isoformat().replace('T',' ')[:19] elif not isinstance(v,SERIALIZABLE_TYPES): del d[k] return d def Row_unpickler(data): return Row(cPickle.loads(data)) def Row_pickler(data): return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),) copy_reg.pickle(Row, Row_pickler, Row_unpickler) ################################################################################ # Everything below should be independent on the specifics of the # database and should for RDBMs and some NoSQL databases ################################################################################ class SQLCallableList(list): def __call__(self): return copy.copy(self) class DAL(dict): """ an instance of this class represents a database connection Example:: db = DAL('sqlite://test.db') db.define_table('tablename', Field('fieldname1'), Field('fieldname2')) """ @staticmethod def set_folder(folder): """ # ## this allows gluon to set a folder for this thread # ## <<<<<<<<< Should go away as new DAL replaces old sql.py """ BaseAdapter.set_folder(folder) @staticmethod def distributed_transaction_begin(*instances): if not instances: return thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] instances = enumerate(instances) for (i, db) in instances: if not db._adapter.support_distributed_transaction(): raise SyntaxError, \ 'distributed transaction not suported by %s' % db._dbname for (i, db) in instances: db._adapter.distributed_transaction_begin(keys[i]) @staticmethod def distributed_transaction_commit(*instances): if not instances: return instances = enumerate(instances) thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] for (i, db) in instances: if not db._adapter.support_distributed_transaction(): raise SyntaxError, \ 'distributed transaction not suported by %s' % db._dbanme try: for (i, db) in instances: db._adapter.prepare(keys[i]) except: for (i, db) in instances: db._adapter.rollback_prepared(keys[i]) raise RuntimeError, 'failure to commit distributed transaction' else: for (i, db) in instances: db._adapter.commit_prepared(keys[i]) return def __init__(self, uri='sqlite://dummy.db', pool_size=0, folder=None, db_codec='UTF-8', check_reserved=None, migrate=True, fake_migrate=False, migrate_enabled=True, fake_migrate_all=False, decode_credentials=False, driver_args=None, adapter_args={}, attempts=5, auto_import=False): """ Creates a new Database Abstraction Layer instance. Keyword arguments: :uri: string that contains information for connecting to a database. (default: 'sqlite://dummy.db') :pool_size: How many open connections to make to the database object. :folder: <please update me> :db_codec: string encoding of the database (default: 'UTF-8') :check_reserved: list of adapters to check tablenames and column names against sql reserved keywords. (Default None) * 'common' List of sql keywords that are common to all database types such as "SELECT, INSERT". (recommended) * 'all' Checks against all known SQL keywords. (not recommended) <adaptername> Checks against the specific adapters list of keywords (recommended) * '<adaptername>_nonreserved' Checks against the specific adapters list of nonreserved keywords. (if available) :migrate (defaults to True) sets default migrate behavior for all tables :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables :migrate_enabled (defaults to True). If set to False disables ALL migrations :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables :attempts (defaults to 5). Number of times to attempt connecting """ if not decode_credentials: credential_decoder = lambda cred: cred else: credential_decoder = lambda cred: urllib.unquote(cred) if folder: self.set_folder(folder) self._uri = uri self._pool_size = pool_size self._db_codec = db_codec self._lastsql = '' self._timings = [] self._pending_references = {} self._request_tenant = 'request_tenant' self._common_fields = [] if not str(attempts).isdigit() or attempts < 0: attempts = 5 if uri: uris = isinstance(uri,(list,tuple)) and uri or [uri] error = '' connected = False for k in range(attempts): for uri in uris: try: if is_jdbc and not uri.startswith('jdbc:'): uri = 'jdbc:'+uri self._dbname = regex_dbname.match(uri).group() if not self._dbname in ADAPTERS: raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname # notice that driver args or {} else driver_args defaults to {} global, not correct args = (self,uri,pool_size,folder,db_codec,credential_decoder,driver_args or {}, adapter_args) self._adapter = ADAPTERS[self._dbname](*args) connected = True break except SyntaxError: raise except Exception, error: sys.stderr.write('DEBUG_c: Exception %r' % ((Exception, error,),)) if connected: break else: time.sleep(1) if not connected: raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, error) else: args = (self,'None',0,folder,db_codec) self._adapter = BaseAdapter(*args) migrate = fake_migrate = False adapter = self._adapter self._uri_hash = hashlib.md5(adapter.uri).hexdigest() self.tables = SQLCallableList() self.check_reserved = check_reserved if self.check_reserved: from reserved_sql_keywords import ADAPTERS as RSK self.RSK = RSK self._migrate = migrate self._fake_migrate = fake_migrate self._migrate_enabled = migrate_enabled self._fake_migrate_all = fake_migrate_all if auto_import: self.import_table_definitions(adapter.folder) def import_table_definitions(self,path,migrate=False,fake_migrate=False): pattern = os.path.join(path,self._uri_hash+'_*.table') for filename in glob.glob(pattern): tfile = self._adapter.file_open(filename, 'r') try: sql_fields = cPickle.load(tfile) name = filename[len(pattern)-7:-6] mf = [(value['sortable'],Field(key,type=value['type'])) \ for key, value in sql_fields.items()] mf.sort(lambda a,b: cmp(a[0],b[0])) self.define_table(name,*[item[1] for item in mf], **dict(migrate=migrate,fake_migrate=fake_migrate)) finally: self._adapter.file_close(tfile) def check_reserved_keyword(self, name): """ Validates ``name`` against SQL keywords Uses self.check_reserve which is a list of operators to use. self.check_reserved ['common', 'postgres', 'mysql'] self.check_reserved ['all'] """ for backend in self.check_reserved: if name.upper() in self.RSK[backend]: raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper()) def __contains__(self, tablename): if self.has_key(tablename): return True else: return False def parse_as_rest(self,patterns,args,vars,query=None,nested_select=True): """ EXAMPLE: db.define_table('person',Field('name'),Field('info')) db.define_table('pet',Field('person',db.person),Field('name'),Field('info')) @request.restful() def index(): def GET(*kargs,**kvars): patterns = [ "/persons[person]", "/{person.name.startswith}", "/{person.name}/:field", "/{person.name}/pets[pet.person]", "/{person.name}/pet[pet.person]/{pet.name}", "/{person.name}/pet[pet.person]/{pet.name}/:field" ] parser = db.parse_as_rest(patterns,kargs,kvars) if parser.status == 200: return dict(content=parser.response) else: raise HTTP(parser.status,parser.error) def POST(table_name,**kvars): if table_name == 'person': return db.person.validate_and_insert(**kvars) elif table_name == 'pet': return db.pet.validate_and_insert(**kvars) else: raise HTTP(400) return locals() """ db = self re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') re2 = re.compile('^.+\[.+\]$') def auto_table(table,base='',depth=0): patterns = [] for field in db[table].fields: if base: tag = '%s/%s' % (base,field.replace('_','-')) else: tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) f = db[table][field] if not f.readable: continue if f.type=='id' or 'slug' in field or f.type.startswith('reference'): tag += '/{%s.%s}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('boolean'): tag += '/{%s.%s}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('double') or f.type.startswith('integer'): tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('list:'): tag += '/{%s.%s.contains}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type in ('date','datetime'): tag+= '/{%s.%s.year}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.month}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.day}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') if f.type in ('datetime','time'): tag+= '/{%s.%s.hour}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.minute}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.second}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') if depth>0: for rtable,rfield in db[table]._referenced_by: tag+='/%s[%s.%s]' % (rtable,rtable,rfield) patterns.append(tag) patterns += auto_table(rtable,base=tag,depth=depth-1) return patterns if patterns=='auto': patterns=[] for table in db.tables: if not table.startswith('auth_'): patterns += auto_table(table,base='',depth=1) else: i = 0 while i<len(patterns): pattern = patterns[i] tokens = pattern.split('/') if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],'/'.join(tokens[:-1])) patterns = patterns[:i]+new_patterns+patterns[i+1:] i += len(new_patterns) else: i += 1 if '/'.join(args) == 'patterns': return Row({'status':200,'pattern':'list', 'error':None,'response':patterns}) for pattern in patterns: otable=table=None dbset=db(query) i=0 tags = pattern[1:].split('/') # print pattern if len(tags)!=len(args): continue for tag in tags: # print i, tag, args[i] if re1.match(tag): # print 're1:'+tag tokens = tag[1:-1].split('.') table, field = tokens[0], tokens[1] if not otable or table == otable: if len(tokens)==2 or tokens[2]=='eq': query = db[table][field]==args[i] elif tokens[2]=='ne': query = db[table][field]!=args[i] elif tokens[2]=='lt': query = db[table][field]<args[i] elif tokens[2]=='gt': query = db[table][field]>args[i] elif tokens[2]=='ge': query = db[table][field]>=args[i] elif tokens[2]=='le': query = db[table][field]<=args[i] elif tokens[2]=='year': query = db[table][field].year()==args[i] elif tokens[2]=='month': query = db[table][field].month()==args[i] elif tokens[2]=='day': query = db[table][field].day()==args[i] elif tokens[2]=='hour': query = db[table][field].hour()==args[i] elif tokens[2]=='minute': query = db[table][field].minutes()==args[i] elif tokens[2]=='second': query = db[table][field].seconds()==args[i] elif tokens[2]=='startswith': query = db[table][field].startswith(args[i]) elif tokens[2]=='contains': query = db[table][field].contains(args[i]) else: raise RuntimeError, "invalid pattern: %s" % pattern if len(tokens)==4 and tokens[3]=='not': query = ~query elif len(tokens)>=4: raise RuntimeError, "invalid pattern: %s" % pattern dbset=dbset(query) else: raise RuntimeError, "missing relation in pattern: %s" % pattern elif otable and re2.match(tag) and args[i]==tag[:tag.find('[')]: # print 're2:'+tag ref = tag[tag.find('[')+1:-1] if '.' in ref: table,field = ref.split('.') # print table,field if nested_select: try: dbset=db(db[table][field].belongs(dbset._select(db[otable]._id))) except ValueError: return Row({'status':400,'pattern':pattern, 'error':'invalid path','response':None}) else: items = [item.id for item in dbset.select(db[otable]._id)] dbset=db(db[table][field].belongs(items)) else: dbset=dbset(db[ref]) elif tag==':field' and table: # # print 're3:'+tag field = args[i] if not field in db[table]: break try: item = dbset.select(db[table][field],limitby=(0,1)).first() except ValueError: return Row({'status':400,'pattern':pattern, 'error':'invalid path','response':None}) if not item: return Row({'status':404,'pattern':pattern, 'error':'record not found','response':None}) else: return Row({'status':200,'response':item[field], 'pattern':pattern}) elif tag != args[i]: break otable = table i += 1 if i==len(tags) and table: otable,ofield = vars.get('order','%s.%s' % (table,field)).split('.',1) try: if otable[:1]=='~': orderby = ~db[otable[1:]][ofield] else: orderby = db[otable][ofield] except KeyError: return Row({'status':400,'error':'invalid orderby','response':None}) fields = [field for field in db[table] if field.readable] count = dbset.count() try: limits = (int(vars.get('min',0)),int(vars.get('max',1000))) if limits[0]<0 or limits[1]<limits[0]: raise ValueError except ValueError: Row({'status':400,'error':'invalid limits','response':None}) if count > limits[1]-limits[0]: Row({'status':400,'error':'too many records','response':None}) try: response = dbset.select(limitby=limits,orderby=orderby,*fields) except ValueError: return Row({'status':400,'pattern':pattern, 'error':'invalid path','response':None}) return Row({'status':200,'response':response,'pattern':pattern}) return Row({'status':400,'error':'no matching pattern','response':None}) def define_table( self, tablename, *fields, **args ): for key in args: if key not in [ 'migrate', 'primarykey', 'fake_migrate', 'format', 'trigger_name', 'sequence_name', 'polymodel']: raise SyntaxError, 'invalid table "%s" attribute: %s' % (tablename, key) migrate = self._migrate_enabled and args.get('migrate',self._migrate) fake_migrate = self._fake_migrate_all or args.get('fake_migrate',self._fake_migrate) format = args.get('format',None) trigger_name = args.get('trigger_name', None) sequence_name = args.get('sequence_name', None) primarykey=args.get('primarykey',None) polymodel=args.get('polymodel',None) if not isinstance(tablename,str): raise SyntaxError, "missing table name" tablename = cleanup(tablename) lowertablename = tablename.lower() if tablename.startswith('_') or hasattr(self,lowertablename) or \ regex_python_keywords.match(tablename): raise SyntaxError, 'invalid table name: %s' % tablename elif lowertablename in self.tables: raise SyntaxError, 'table already defined: %s' % tablename elif self.check_reserved: self.check_reserved_keyword(tablename) if self._common_fields: fields = [f for f in fields] + [f for f in self._common_fields] t = self[tablename] = Table(self, tablename, *fields, **dict(primarykey=primarykey, trigger_name=trigger_name, sequence_name=sequence_name)) # db magic if self._uri in (None,'None'): return t t._create_references() if migrate or self._adapter.dbengine=='google:datastore': try: sql_locker.acquire() self._adapter.create_table(t,migrate=migrate, fake_migrate=fake_migrate, polymodel=polymodel) finally: sql_locker.release() else: t._dbt = None self.tables.append(tablename) t._format = format return t def __iter__(self): for tablename in self.tables: yield self[tablename] def __getitem__(self, key): return dict.__getitem__(self, str(key)) def __setitem__(self, key, value): dict.__setitem__(self, str(key), value) def __getattr__(self, key): return self[key] def __setattr__(self, key, value): if key[:1]!='_' and key in self: raise SyntaxError, \ 'Object %s exists and cannot be redefined' % key self[key] = value def __repr__(self): return '<DAL ' + dict.__repr__(self) + '>' def __call__(self, query=None): if isinstance(query,Table): query = query._id>0 elif isinstance(query,Field): query = query!=None return Set(self, query) def commit(self): self._adapter.commit() def rollback(self): self._adapter.rollback() def executesql(self, query, placeholders=None, as_dict=False): """ placeholders is optional and will always be None when using DAL if using raw SQL with placeholders, placeholders may be a sequence of values to be substituted in or, *if supported by the DB driver*, a dictionary with keys matching named placeholders in your SQL. Added 2009-12-05 "as_dict" optional argument. Will always be None when using DAL. If using raw SQL can be set to True and the results cursor returned by the DB driver will be converted to a sequence of dictionaries keyed with the db field names. Tested with SQLite but should work with any database since the cursor.description used to get field names is part of the Python dbi 2.0 specs. Results returned with as_dict = True are the same as those returned when applying .to_list() to a DAL query. [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] --bmeredyk """ if placeholders: self._adapter.execute(query, placeholders) else: self._adapter.execute(query) if as_dict: if not hasattr(self._adapter.cursor,'description'): raise RuntimeError, "database does not support executesql(...,as_dict=True)" # Non-DAL legacy db query, converts cursor results to dict. # sequence of 7-item sequences. each sequence tells about a column. # first item is always the field name according to Python Database API specs columns = self._adapter.cursor.description # reduce the column info down to just the field names fields = [f[0] for f in columns] # will hold our finished resultset in a list data = self._adapter.cursor.fetchall() # convert the list for each row into a dictionary so it's # easier to work with. row['field_name'] rather than row[0] return [dict(zip(fields,row)) for row in data] # see if any results returned from database try: return self._adapter.cursor.fetchall() except: return None def _update_referenced_by(self, other): for tablename in self.tables: by = self[tablename]._referenced_by by[:] = [item for item in by if not item[0] == other] def export_to_csv_file(self, ofile, *args, **kwargs): for table in self.tables: ofile.write('TABLE %s\r\n' % table) self(self[table]._id > 0).select().export_to_csv_file(ofile, *args, **kwargs) ofile.write('\r\n\r\n') ofile.write('END') def import_from_csv_file(self, ifile, id_map={}, null='<NULL>', unique='uuid', *args, **kwargs): for line in ifile: line = line.strip() if not line: continue elif line == 'END': return elif not line.startswith('TABLE ') or not line[6:] in self.tables: raise SyntaxError, 'invalid file format' else: tablename = line[6:] self[tablename].import_from_csv_file(ifile, id_map, null, unique, *args, **kwargs) class SQLALL(object): """ Helper class providing a comma-separated string having all the field names (prefixed by table name and '.') normally only called from within gluon.sql """ def __init__(self, table): self.table = table def __str__(self): return ', '.join([str(field) for field in self.table]) class Reference(int): def __allocate(self): if not self._record: self._record = self._table[int(self)] if not self._record: raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self)) def __getattr__(self, key): if key == 'id': return int(self) self.__allocate() return self._record.get(key, None) def __setattr__(self, key, value): if key.startswith('_'): int.__setattr__(self, key, value) return self.__allocate() self._record[key] = value def __getitem__(self, key): if key == 'id': return int(self) self.__allocate() return self._record.get(key, None) def __setitem__(self,key,value): self.__allocate() self._record[key] = value def Reference_unpickler(data): return marshal.loads(data) def Reference_pickler(data): try: marshal_dump = marshal.dumps(int(data)) except AttributeError: marshal_dump = 'i%s' % struct.pack('<i', int(data)) return (Reference_unpickler, (marshal_dump,)) copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler) class Table(dict): """ an instance of this class represents a database table Example:: db = DAL(...) db.define_table('users', Field('name')) db.users.insert(name='me') # print db.users._insert(...) to see SQL db.users.drop() """ def __init__( self, db, tablename, *fields, **args ): """ Initializes the table and performs checking on the provided fields. Each table will have automatically an 'id'. If a field is of type Table, the fields (excluding 'id') from that table will be used instead. :raises SyntaxError: when a supplied field is of incorrect type. """ self._tablename = tablename self._sequence_name = args.get('sequence_name',None) or \ db and db._adapter.sequence_name(tablename) self._trigger_name = args.get('trigger_name',None) or \ db and db._adapter.trigger_name(tablename) primarykey = args.get('primarykey', None) fieldnames,newfields=set(),[] if primarykey: if not isinstance(primarykey,list): raise SyntaxError, \ "primarykey must be a list of fields from table '%s'" \ % tablename self._primarykey = primarykey elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: field = Field('id', 'id') newfields.append(field) fieldnames.add('id') self._id = field for field in fields: if not isinstance(field, (Field, Table)): raise SyntaxError, \ 'define_table argument is not a Field or Table: %s' % field elif isinstance(field, Field) and not field.name in fieldnames: if hasattr(field, '_db'): field = copy.copy(field) newfields.append(field) fieldnames.add(field.name) if field.type=='id': self._id = field elif isinstance(field, Table): table = field for field in table: if not field.name in fieldnames and not field.type=='id': newfields.append(copy.copy(field)) fieldnames.add(field.name) else: # let's ignore new fields with duplicated names!!! pass fields = newfields self._db = db tablename = tablename self.fields = SQLCallableList() self.virtualfields = [] fields = list(fields) if db and self._db._adapter.uploads_in_blob==True: for field in fields: if isinstance(field, Field) and field.type == 'upload'\ and field.uploadfield is True: tmp = field.uploadfield = '%s_blob' % field.name fields.append(self._db.Field(tmp, 'blob', default='')) lower_fieldnames = set() reserved = dir(Table) + ['fields'] for field in fields: if db and db.check_reserved: db.check_reserved_keyword(field.name) elif field.name in reserved: raise SyntaxError, "field name %s not allowed" % field.name if field.name.lower() in lower_fieldnames: raise SyntaxError, "duplicate field %s in table %s" \ % (field.name, tablename) else: lower_fieldnames.add(field.name.lower()) self.fields.append(field.name) self[field.name] = field if field.type == 'id': self['id'] = field field.tablename = field._tablename = tablename field.table = field._table = self field.db = field._db = self._db if self._db and field.type!='text' and \ self._db._adapter.maxcharlength < field.length: field.length = self._db._adapter.maxcharlength if field.requires == DEFAULT: field.requires = sqlhtml_validators(field) self.ALL = SQLALL(self) if hasattr(self,'_primarykey'): for k in self._primarykey: if k not in self.fields: raise SyntaxError, \ "primarykey must be a list of fields from table '%s " % tablename else: self[k].notnull = True def _validate(self,**vars): errors = Row() for key,value in vars.items(): value,error = self[key].validate(value) if error: errors[key] = error return errors def _create_references(self): pr = self._db._pending_references self._referenced_by = [] for fieldname in self.fields: field=self[fieldname] if isinstance(field.type,str) and field.type[:10] == 'reference ': ref = field.type[10:].strip() if not ref.split(): raise SyntaxError, 'Table: reference to nothing: %s' %ref refs = ref.split('.') rtablename = refs[0] if not rtablename in self._db: pr[rtablename] = pr.get(rtablename,[]) + [field] continue rtable = self._db[rtablename] if len(refs)==2: rfieldname = refs[1] if not hasattr(rtable,'_primarykey'): raise SyntaxError,\ 'keyed tables can only reference other keyed tables (for now)' if rfieldname not in rtable.fields: raise SyntaxError,\ "invalid field '%s' for referenced table '%s' in table '%s'" \ % (rfieldname, rtablename, self._tablename) rtable._referenced_by.append((self._tablename, field.name)) for referee in pr.get(self._tablename,[]): self._referenced_by.append((referee._tablename,referee.name)) def _filter_fields(self, record, id=False): return dict([(k, v) for (k, v) in record.items() if k in self.fields and (self[k].type!='id' or id)]) def _build_query(self,key): """ for keyed table only """ query = None for k,v in key.iteritems(): if k in self._primarykey: if query: query = query & (self[k] == v) else: query = (self[k] == v) else: raise SyntaxError, \ 'Field %s is not part of the primary key of %s' % \ (k,self._tablename) return query def __getitem__(self, key): if not key: return None elif isinstance(key, dict): """ for keyed table """ query = self._build_query(key) rows = self._db(query).select() if rows: return rows[0] return None elif str(key).isdigit(): return self._db(self._id == key).select(limitby=(0,1)).first() elif key: return dict.__getitem__(self, str(key)) def __call__(self, key=DEFAULT, **kwargs): if key!=DEFAULT: if isinstance(key, Query): record = self._db(key).select(limitby=(0,1)).first() elif not str(key).isdigit(): record = None else: record = self._db(self._id == key).select(limitby=(0,1)).first() if record: for k,v in kwargs.items(): if record[k]!=v: return None return record elif kwargs: query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()]) return self._db(query).select(limitby=(0,1)).first() else: return None def __setitem__(self, key, value): if isinstance(key, dict) and isinstance(value, dict): """ option for keyed table """ if set(key.keys()) == set(self._primarykey): value = self._filter_fields(value) kv = {} kv.update(value) kv.update(key) if not self.insert(**kv): query = self._build_query(key) self._db(query).update(**self._filter_fields(value)) else: raise SyntaxError,\ 'key must have all fields from primary key: %s'%\ (self._primarykey) elif str(key).isdigit(): if key == 0: self.insert(**self._filter_fields(value)) elif not self._db(self._id == key)\ .update(**self._filter_fields(value)): raise SyntaxError, 'No such record: %s' % key else: if isinstance(key, dict): raise SyntaxError,\ 'value must be a dictionary: %s' % value dict.__setitem__(self, str(key), value) def __delitem__(self, key): if isinstance(key, dict): query = self._build_query(key) if not self._db(query).delete(): raise SyntaxError, 'No such record: %s' % key elif not str(key).isdigit() or not self._db(self._id == key).delete(): raise SyntaxError, 'No such record: %s' % key def __getattr__(self, key): return self[key] def __setattr__(self, key, value): if key in self: raise SyntaxError, 'Object exists and cannot be redefined: %s' % key self[key] = value def __iter__(self): for fieldname in self.fields: yield self[fieldname] def __repr__(self): return '<Table ' + dict.__repr__(self) + '>' def __str__(self): if self.get('_ot', None): return '%s AS %s' % (self._ot, self._tablename) return self._tablename def _drop(self, mode = ''): return self._db._adapter._drop(self, mode) def drop(self, mode = ''): return self._db._adapter.drop(self,mode) def _listify(self,fields,update=False): new_fields = [] new_fields_names = [] for name in fields: if not name in self.fields: if name != 'id': raise SyntaxError, 'Field %s does not belong to the table' % name else: new_fields.append((self[name],fields[name])) new_fields_names.append(name) for ofield in self: if not ofield.name in new_fields_names: if not update and ofield.default!=None: new_fields.append((ofield,ofield.default)) elif update and ofield.update!=None: new_fields.append((ofield,ofield.update)) for ofield in self: if not ofield.name in new_fields_names and ofield.compute: try: new_fields.append((ofield,ofield.compute(Row(fields)))) except KeyError: pass if not update and ofield.required and not ofield.name in new_fields_names: raise SyntaxError,'Table: missing required field: %s' % ofield.name return new_fields def _insert(self, **fields): return self._db._adapter._insert(self,self._listify(fields)) def insert(self, **fields): return self._db._adapter.insert(self,self._listify(fields)) def validate_and_insert(self,**fields): response = Row() response.errors = self._validate(**fields) if not response.errors: response.id = self.insert(**fields) else: response.id = None return response def update_or_insert(self, key=DEFAULT, **values): if key==DEFAULT: record = self(**values) else: record = self(key) if record: record.update_record(**values) newid = None else: newid = self.insert(**values) return newid def bulk_insert(self, items): """ here items is a list of dictionaries """ items = [self._listify(item) for item in items] return self._db._adapter.bulk_insert(self,items) def _truncate(self, mode = None): return self._db._adapter._truncate(self, mode) def truncate(self, mode = None): return self._db._adapter.truncate(self, mode) def import_from_csv_file( self, csvfile, id_map=None, null='<NULL>', unique='uuid', *args, **kwargs ): """ import records from csv file. Column headers must have same names as table fields. field 'id' is ignored. If column names read 'table.file' the 'table.' prefix is ignored. 'unique' argument is a field which must be unique (typically a uuid field) """ delimiter = kwargs.get('delimiter', ',') quotechar = kwargs.get('quotechar', '"') quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) colnames = None if isinstance(id_map, dict): if not self._tablename in id_map: id_map[self._tablename] = {} id_map_self = id_map[self._tablename] def fix(field, value, id_map): if value == null: value = None elif field.type=='blob': value = base64.b64decode(value) elif field.type=='double': if not value.strip(): value = None else: value = float(value) elif field.type=='integer': if not value.strip(): value = None else: value = int(value) elif field.type.startswith('list:string'): value = bar_decode_string(value) elif field.type.startswith('list:reference'): ref_table = field.type[10:].strip() value = [id_map[ref_table][int(v)] \ for v in bar_decode_string(value)] elif field.type.startswith('list:'): value = bar_decode_integer(value) elif id_map and field.type.startswith('reference'): try: value = id_map[field.type[9:].strip()][value] except KeyError: pass return (field.name, value) def is_id(colname): if colname in self: return self[colname].type == 'id' else: return False for line in reader: if not line: break if not colnames: colnames = [x.split('.',1)[-1] for x in line][:len(line)] cols, cid = [], [] for i,colname in enumerate(colnames): if is_id(colname): cid = i else: cols.append(i) if colname == unique: unique_idx = i else: items = [fix(self[colnames[i]], line[i], id_map) \ for i in cols if colnames[i] in self.fields] # Validation. Check for duplicate of 'unique' &, # if present, update instead of insert. if not unique or unique not in colnames: new_id = self.insert(**dict(items)) else: unique_value = line[unique_idx] query = self._db[self][unique] == unique_value record = self._db(query).select().first() if record: record.update_record(**dict(items)) new_id = record[self._id.name] else: new_id = self.insert(**dict(items)) if id_map and cid != []: id_map_self[line[cid]] = new_id def with_alias(self, alias): return self._db._adapter.alias(self,alias) def on(self, query): return Expression(self._db,self._db._adapter.ON,self,query) class Expression(object): def __init__( self, db, op, first=None, second=None, type=None, ): self.db = db self.op = op self.first = first self.second = second ### self._tablename = first._tablename ## CHECK if not type and first and hasattr(first,'type'): self.type = first.type else: self.type = type def sum(self): return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type) def max(self): return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type) def min(self): return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type) def len(self): return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer') def lower(self): return Expression(self.db, self.db._adapter.LOWER, self, None, self.type) def upper(self): return Expression(self.db, self.db._adapter.UPPER, self, None, self.type) def year(self): return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer') def month(self): return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer') def day(self): return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer') def hour(self): return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer') def minutes(self): return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer') def coalesce_zero(self): return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type) def seconds(self): return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer') def __getslice__(self, start, stop): if start < 0: pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) else: pos0 = start + 1 if stop < 0: length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) elif stop == sys.maxint: length = self.len() else: length = '(%s - %s)' % (stop + 1, pos0) return Expression(self.db,self.db._adapter.SUBSTRING, self, (pos0, length), self.type) def __getitem__(self, i): return self[i:i + 1] def __str__(self): return self.db._adapter.expand(self,self.type) def __or__(self, other): # for use in sortby return Expression(self.db,self.db._adapter.COMMA,self,other,self.type) def __invert__(self): if hasattr(self,'_op') and self.op == self.db._adapter.INVERT: return self.first return Expression(self.db,self.db._adapter.INVERT,self,type=self.type) def __add__(self, other): return Expression(self.db,self.db._adapter.ADD,self,other,self.type) def __sub__(self, other): if self.type == 'integer': result_type = 'integer' elif self.type in ['date','time','datetime','double']: result_type = 'double' else: raise SyntaxError, "subtraction operation not supported for type" return Expression(self.db,self.db._adapter.SUB,self,other, result_type) def __mul__(self, other): return Expression(self.db,self.db._adapter.MUL,self,other,self.type) def __div__(self, other): return Expression(self.db,self.db._adapter.DIV,self,other,self.type) def __mod__(self, other): return Expression(self.db,self.db._adapter.MOD,self,other,self.type) def __eq__(self, value): return Query(self.db, self.db._adapter.EQ, self, value) def __ne__(self, value): return Query(self.db, self.db._adapter.NE, self, value) def __lt__(self, value): return Query(self.db, self.db._adapter.LT, self, value) def __le__(self, value): return Query(self.db, self.db._adapter.LE, self, value) def __gt__(self, value): return Query(self.db, self.db._adapter.GT, self, value) def __ge__(self, value): return Query(self.db, self.db._adapter.GE, self, value) def like(self, value): return Query(self.db, self.db._adapter.LIKE, self, value) def belongs(self, value): return Query(self.db, self.db._adapter.BELONGS, self, value) def startswith(self, value): if not self.type in ('string', 'text'): raise SyntaxError, "startswith used with incompatible field type" return Query(self.db, self.db._adapter.STARTSWITH, self, value) def endswith(self, value): if not self.type in ('string', 'text'): raise SyntaxError, "endswith used with incompatible field type" return Query(self.db, self.db._adapter.ENDSWITH, self, value) def contains(self, value): if not self.type in ('string', 'text') and not self.type.startswith('list:'): raise SyntaxError, "contains used with incompatible field type" return Query(self.db, self.db._adapter.CONTAINS, self, value) def with_alias(self,alias): return Expression(self.db,self.db._adapter.AS,self,alias,self.type) # for use in both Query and sortby class SQLCustomType(object): """ allows defining of custom SQL types Example:: decimal = SQLCustomType( type ='double', native ='integer', encoder =(lambda x: int(float(x) * 100)), decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) ) db.define_table( 'example', Field('value', type=decimal) ) :param type: the web2py type (default = 'string') :param native: the backend type :param encoder: how to encode the value to store it in the backend :param decoder: how to decode the value retrieved from the backend :param validator: what validators to use ( default = None, will use the default validator for type) """ def __init__( self, type='string', native=None, encoder=None, decoder=None, validator=None, _class=None, ): self.type = type self.native = native self.encoder = encoder or (lambda x: x) self.decoder = decoder or (lambda x: x) self.validator = validator self._class = _class or type def startswith(self, dummy=None): return False def __getslice__(self, a=0, b=100): return None def __getitem__(self, i): return None def __str__(self): return self._class class Field(Expression): """ an instance of this class represents a database field example:: a = Field(name, 'string', length=32, default=None, required=False, requires=IS_NOT_EMPTY(), ondelete='CASCADE', notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, uploadfield=True, # True means store on disk, # 'a_field_name' means store in this field in db # False means file content will be discarded. writable=True, readable=True, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False # upload to separate directories by uuid_keys # first 2 character and tablename.fieldname # False - old behavior # True - put uploaded file in # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] # directory) to be used as argument of DAL.define_table allowed field types: string, boolean, integer, double, text, blob, date, time, datetime, upload, password strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) fields should have a default or they will be required in SQLFORMs the requires argument is used to validate the field input in SQLFORMs """ def __init__( self, fieldname, type='string', length=None, default=DEFAULT, required=False, requires=DEFAULT, ondelete='CASCADE', notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False, compute=None, custom_store=None, custom_retrieve=None, custom_delete=None, ): self.db = None self.op = None self.first = None self.second = None if not isinstance(fieldname,str): raise SyntaxError, "missing field name" if fieldname.startswith(':'): fieldname,readable,writable=fieldname[1:],False,False elif fieldname.startswith('.'): fieldname,readable,writable=fieldname[1:],False,False if '=' in fieldname: fieldname,default = fieldname.split('=',1) self.name = fieldname = cleanup(fieldname) if hasattr(Table,fieldname) or fieldname[0] == '_' or \ regex_python_keywords.match(fieldname): raise SyntaxError, 'Field: invalid field name: %s' % fieldname if isinstance(type, Table): type = 'reference ' + type._tablename self.type = type # 'string', 'integer' self.length = (length is None) and MAXCHARLENGTH or length if default==DEFAULT: self.default = update or None else: self.default = default self.required = required # is this field required self.ondelete = ondelete.upper() # this is for reference fields only self.notnull = notnull self.unique = unique self.uploadfield = uploadfield self.uploadfolder = uploadfolder self.uploadseparate = uploadseparate self.widget = widget self.label = label or ' '.join(item.capitalize() for item in fieldname.split('_')) self.comment = comment self.writable = writable self.readable = readable self.update = update self.authorize = authorize self.autodelete = autodelete if not represent and type in ('list:integer','list:string'): represent=lambda x: ', '.join(str(y) for y in x or []) self.represent = represent self.compute = compute self.isattachment = True self.custom_store = custom_store self.custom_retrieve = custom_retrieve self.custom_delete = custom_delete if self.label is None: self.label = ' '.join([x.capitalize() for x in fieldname.split('_')]) if requires is None: self.requires = [] else: self.requires = requires def store(self, file, filename=None, path=None): if self.custom_store: return self.custom_store(file,filename,path) if not filename: filename = file.name filename = os.path.basename(filename.replace('/', os.sep)\ .replace('\\', os.sep)) m = re.compile('\.(?P<e>\w{1,5})$').search(filename) extension = m and m.group('e') or 'txt' uuid_key = web2py_uuid().replace('-', '')[-16:] encoded_filename = base64.b16encode(filename).lower() newfilename = '%s.%s.%s.%s' % \ (self._tablename, self.name, uuid_key, encoded_filename) newfilename = newfilename[:200] + '.' + extension if isinstance(self.uploadfield,Field): blob_uploadfield_name = self.uploadfield.uploadfield keys={self.uploadfield.name: newfilename, blob_uploadfield_name: file.read()} self.uploadfield.table.insert(**keys) elif self.uploadfield == True: if path: pass elif self.uploadfolder: path = self.uploadfolder elif self.db._adapter.folder: path = os.path.join(self.db._adapter.folder, '..', 'uploads') else: raise RuntimeError, "you must specify a Field(...,uploadfolder=...)" if self.uploadseparate: path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2]) if not os.path.exists(path): os.makedirs(path) pathfilename = os.path.join(path, newfilename) dest_file = open(pathfilename, 'wb') try: shutil.copyfileobj(file, dest_file) finally: dest_file.close() return newfilename def retrieve(self, name, path=None): if self.custom_retrieve: return self.custom_retrieve(name, path) import http if self.authorize or isinstance(self.uploadfield, str): row = self.db(self == name).select().first() if not row: raise http.HTTP(404) if self.authorize and not self.authorize(row): raise http.HTTP(403) try: m = regex_content.match(name) if not m or not self.isattachment: raise TypeError, 'Can\'t retrieve %s' % name filename = base64.b16decode(m.group('name'), True) filename = regex_cleanup_fn.sub('_', filename) except (TypeError, AttributeError): filename = name if isinstance(self.uploadfield, str): # ## if file is in DB return (filename, cStringIO.StringIO(row[self.uploadfield] or '')) elif isinstance(self.uploadfield,Field): blob_uploadfield_name = self.uploadfield.uploadfield query = self.uploadfield == name data = self.uploadfield.table(query)[blob_uploadfield_name] return (filename, cStringIO.StringIO(data)) else: # ## if file is on filesystem if path: pass elif self.uploadfolder: path = self.uploadfolder else: path = os.path.join(self.db._adapter.folder, '..', 'uploads') if self.uploadseparate: t = m.group('table') f = m.group('field') u = m.group('uuidkey') path = os.path.join(path,"%s.%s" % (t,f),u[:2]) return (filename, open(os.path.join(path, name), 'rb')) def formatter(self, value): if value is None or not self.requires: return value if not isinstance(self.requires, (list, tuple)): requires = [self.requires] elif isinstance(self.requires, tuple): requires = list(self.requires) else: requires = copy.copy(self.requires) requires.reverse() for item in requires: if hasattr(item, 'formatter'): value = item.formatter(value) return value def validate(self, value): if not self.requires: return (value, None) requires = self.requires if not isinstance(requires, (list, tuple)): requires = [requires] for validator in requires: (value, error) = validator(value) if error: return (value, error) return (value, None) def count(self): return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer') def __nonzero__(self): return True def __str__(self): try: return '%s.%s' % (self.tablename, self.name) except: return '<no table>.%s' % self.name class Query(object): """ a query object necessary to define a set. it can be stored or can be passed to DAL.__call__() to obtain a Set Example:: query = db.users.name=='Max' set = db(query) records = set.select() """ def __init__( self, db, op, first=None, second=None, ): self.db = db self.op = op self.first = first self.second = second def __str__(self): return self.db._adapter.expand(self) def __and__(self, other): return Query(self.db,self.db._adapter.AND,self,other) def __or__(self, other): return Query(self.db,self.db._adapter.OR,self,other) def __invert__(self): if self.op==self.db._adapter.NOT: return self.first return Query(self.db,self.db._adapter.NOT,self) regex_quotes = re.compile("'[^']*'") def xorify(orderby): if not orderby: return None orderby2 = orderby[0] for item in orderby[1:]: orderby2 = orderby2 | item return orderby2 class Set(object): """ a Set represents a set of records in the database, the records are identified by the query=Query(...) object. normally the Set is generated by DAL.__call__(Query(...)) given a set, for example set = db(db.users.name=='Max') you can: set.update(db.users.name='Massimo') set.delete() # all elements in the set set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) and take subsets: subset = set(db.users.id<5) """ def __init__(self, db, query): self.db = db self._db = db # for backward compatibility self.query = query def __call__(self, query): if isinstance(query,Table): query = query._id>0 elif isinstance(query,Field): query = query!=None if self.query: return Set(self.db, self.query & query) else: return Set(self.db, query) def _count(self,distinct=None): return self.db._adapter._count(self.query,distinct) def _select(self, *fields, **attributes): return self.db._adapter._select(self.query,fields,attributes) def _delete(self): tablename=self.db._adapter.get_table(self.query) return self.db._adapter._delete(tablename,self.query) def _update(self, **update_fields): tablename = self.db._adapter.get_table(self.query) fields = self.db[tablename]._listify(update_fields,update=True) return self.db._adapter._update(tablename,self.query,fields) def isempty(self): return not self.select(limitby=(0,1)) def count(self,distinct=None): return self.db._adapter.count(self.query,distinct) def select(self, *fields, **attributes): return self.db._adapter.select(self.query,fields,attributes) def delete(self): tablename=self.db._adapter.get_table(self.query) self.delete_uploaded_files() return self.db._adapter.delete(tablename,self.query) def update(self, **update_fields): tablename = self.db._adapter.get_table(self.query) fields = self.db[tablename]._listify(update_fields,update=True) if not fields: raise SyntaxError, "No fields to update" self.delete_uploaded_files(update_fields) return self.db._adapter.update(tablename,self.query,fields) def validate_and_update(self, **update_fields): tablename = self.db._adapter.get_table(self.query) response = Row() response.errors = self.db[tablename]._validate(**update_fields) fields = self.db[tablename]._listify(update_fields,update=True) if not fields: raise SyntaxError, "No fields to update" self.delete_uploaded_files(update_fields) if not response.errors: response.updated = self.db._adapter.update(tablename,self.query,fields) else: response.updated = None return response def delete_uploaded_files(self, upload_fields=None): table = self.db[self.db._adapter.tables(self.query)[0]] # ## mind uploadfield==True means file is not in DB if upload_fields: fields = upload_fields.keys() else: fields = table.fields fields = [f for f in fields if table[f].type == 'upload' and table[f].uploadfield == True and table[f].autodelete] if not fields: return for record in self.select(*[table[f] for f in fields]): for fieldname in fields: field = table[fieldname] oldname = record.get(fieldname, None) if not oldname: continue if upload_fields and oldname == upload_fields[fieldname]: continue if field.custom_delete: field.custom_delete(oldname) else: uploadfolder = field.uploadfolder if not uploadfolder: uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads') if field.uploadseparate: items = oldname.split('.') uploadfolder = os.path.join(uploadfolder, "%s.%s" % (items[0], items[1]), items[2][:2]) oldpath = os.path.join(uploadfolder, oldname) if os.path.exists(oldpath): os.unlink(oldpath) def update_record(pack, a={}): (colset, table, id) = pack b = a or dict(colset) c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id']) table._db(table._id==id).update(**c) for (k, v) in c.items(): colset[k] = v class Rows(object): """ A wrapper for the return value of a select. It basically represents a table. It has an iterator and each row is represented as a dictionary. """ # ## TODO: this class still needs some work to care for ID/OID def __init__( self, db=None, records=[], colnames=[], compact=True, rawrows=None ): self.db = db self.records = records self.colnames = colnames self.compact = compact self.response = rawrows def setvirtualfields(self,**keyed_virtualfields): if not keyed_virtualfields: return self for row in self.records: for (tablename,virtualfields) in keyed_virtualfields.items(): attributes = dir(virtualfields) virtualfields.__dict__.update(row) if not tablename in row: box = row[tablename] = Row() else: box = row[tablename] for attribute in attributes: if attribute[0] != '_': method = getattr(virtualfields,attribute) if hasattr(method,'im_func') and method.im_func.func_code.co_argcount: box[attribute]=method() return self def __and__(self,other): if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects' records = self.records+other.records return Rows(self.db,records,self.colnames) def __or__(self,other): if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects' records = self.records records += [record for record in other.records \ if not record in records] return Rows(self.db,records,self.colnames) def __nonzero__(self): if len(self.records): return 1 return 0 def __len__(self): return len(self.records) def __getslice__(self, a, b): return Rows(self.db,self.records[a:b],self.colnames) def __getitem__(self, i): row = self.records[i] keys = row.keys() if self.compact and len(keys) == 1 and keys[0] != '_extra': return row[row.keys()[0]] return row def __iter__(self): """ iterator over records """ for i in xrange(len(self)): yield self[i] def __str__(self): """ serializes the table into a csv file """ s = cStringIO.StringIO() self.export_to_csv_file(s) return s.getvalue() def first(self): if not self.records: return None return self[0] def last(self): if not self.records: return None return self[-1] def find(self,f): """ returns a new Rows object, a subset of the original object, filtered by the function f """ if not self.records: return Rows(self.db, [], self.colnames) records = [] for i in range(0,len(self)): row = self[i] if f(row): records.append(self.records[i]) return Rows(self.db, records, self.colnames) def exclude(self, f): """ removes elements from the calling Rows object, filtered by the function f, and returns a new Rows object containing the removed elements """ if not self.records: return Rows(self.db, [], self.colnames) removed = [] i=0 while i<len(self): row = self[i] if f(row): removed.append(self.records[i]) del self.records[i] else: i += 1 return Rows(self.db, removed, self.colnames) def sort(self, f, reverse=False): """ returns a list of sorted elements (not sorted in place) """ return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames) def as_list(self, compact=True, storage_to_dict=True, datetime_to_str=True): """ returns the data as a list or dictionary. :param storage_to_dict: when True returns a dict, otherwise a list(default True) :param datetime_to_str: convert datetime fields as strings (default True) """ (oc, self.compact) = (self.compact, compact) if storage_to_dict: items = [item.as_dict(datetime_to_str) for item in self] else: items = [item for item in self] self.compact = compact return items def as_dict(self, key='id', compact=True, storage_to_dict=True, datetime_to_str=True): """ returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) :param key: the name of the field to be used as dict key, normally the id :param compact: ? (default True) :param storage_to_dict: when True returns a dict, otherwise a list(default True) :param datetime_to_str: convert datetime fields as strings (default True) """ rows = self.as_list(compact, storage_to_dict, datetime_to_str) if isinstance(key,str) and key.count('.')==1: (table, field) = key.split('.') return dict([(r[table][field],r) for r in rows]) elif isinstance(key,str): return dict([(r[key],r) for r in rows]) else: return dict([(key(r),r) for r in rows]) def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs): """ export data to csv, the first line contains the column names :param ofile: where the csv must be exported to :param null: how null values must be represented (default '<NULL>') :param delimiter: delimiter to separate values (default ',') :param quotechar: character to use to quote string values (default '"') :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) :param represent: use the fields .represent value (default False) :param colnames: list of column names to use (default self.colnames) This will only work when exporting rows objects!!!! DO NOT use this with db.export_to_csv() """ delimiter = kwargs.get('delimiter', ',') quotechar = kwargs.get('quotechar', '"') quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) represent = kwargs.get('represent', False) writer = csv.writer(ofile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) colnames = kwargs.get('colnames', self.colnames) # a proper csv starting with the column names writer.writerow(colnames) def none_exception(value): """ returns a cleaned up value that can be used for csv export: - unicode text is encoded as such - None values are replaced with the given representation (default <NULL>) """ if value is None: return null elif isinstance(value, unicode): return value.encode('utf8') elif isinstance(value,Reference): return int(value) elif hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') elif isinstance(value, (list,tuple)): # for type='list:..' return bar_encode(value) return value for record in self: row = [] for col in colnames: if not table_field.match(col): row.append(record._extra[col]) else: (t, f) = col.split('.') field = self.db[t][f] if isinstance(record.get(t, None), (Row,dict)): value = record[t][f] else: value = record[f] if field.type=='blob' and value!=None: value = base64.b64encode(value) elif represent and field.represent: value = field.represent(value) row.append(none_exception(value)) writer.writerow(row) def xml(self): """ serializes the table using sqlhtml.SQLTABLE (if present) """ import sqlhtml return sqlhtml.SQLTABLE(self).xml() def json(self, mode='object', default=None): """ serializes the table to a JSON list of objects """ mode = mode.lower() if not mode in ['object', 'array']: raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode def inner_loop(record, col): (t, f) = col.split('.') res = None if not table_field.match(col): res = record._extra[col] else: if isinstance(record.get(t, None), Row): res = record[t][f] else: res = record[f] if mode == 'object': return (f, res) else: return res if mode == 'object': items = [dict([inner_loop(record, col) for col in self.colnames]) for record in self] else: items = [[inner_loop(record, col) for col in self.colnames] for record in self] if have_serializers: return serializers.json(items,default=default or serializers.custom_json) else: import simplejson return simplejson.dumps(items) def Rows_unpickler(data): return cPickle.loads(data) def Rows_pickler(data): return Rows_unpickler, \ (cPickle.dumps(data.as_list(storage_to_dict=True, datetime_to_str=False)),) copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler) ################################################################################ # dummy function used to define some doctests ################################################################################ def test_all(): """ >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) >>> tmp = db.define_table('users',\ Field('stringf', 'string', length=32, required=True),\ Field('booleanf', 'boolean', default=False),\ Field('passwordf', 'password', notnull=True),\ Field('uploadf', 'upload'),\ Field('blobf', 'blob'),\ Field('integerf', 'integer', unique=True),\ Field('doublef', 'double', unique=True,notnull=True),\ Field('datef', 'date', default=datetime.date.today()),\ Field('timef', 'time'),\ Field('datetimef', 'datetime'),\ migrate='test_user.table') Insert a field >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ uploadf=None, integerf=5, doublef=3.14,\ datef=datetime.date(2001, 1, 1),\ timef=datetime.time(12, 30, 15),\ datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 1 Drop the table >>> db.users.drop() Examples of insert, select, update, delete >>> tmp = db.define_table('person',\ Field('name'),\ Field('birth','date'),\ migrate='test_person.table') >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') commented len(db().select(db.person.ALL)) commented 2 >>> me = db(db.person.id==person_id).select()[0] # test select >>> me.name 'Massimo' >>> db(db.person.name=='Massimo').update(name='massimo') # test update 1 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 1 Update a single record >>> me.update_record(name=\"Max\") >>> me.name 'Max' Examples of complex search conditions >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 1 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 1 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 1 >>> me = db(db.person.id==person_id).select(db.person.name)[0] >>> me.name 'Max' Examples of search conditions using extract from date/datetime/time >>> len(db(db.person.birth.month()==12).select()) 1 >>> len(db(db.person.birth.year()>1900).select()) 1 Example of usage of NULL >>> len(db(db.person.birth==None).select()) ### test NULL 0 >>> len(db(db.person.birth!=None).select()) ### test NULL 1 Examples of search conditions using lower, upper, and like >>> len(db(db.person.name.upper()=='MAX').select()) 1 >>> len(db(db.person.name.like('%ax')).select()) 1 >>> len(db(db.person.name.upper().like('%AX')).select()) 1 >>> len(db(~db.person.name.upper().like('%AX')).select()) 0 orderby, groupby and limitby >>> people = db().select(db.person.name, orderby=db.person.name) >>> order = db.person.name|~db.person.birth >>> people = db().select(db.person.name, orderby=order) >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) Example of one 2 many relation >>> tmp = db.define_table('dog',\ Field('name'),\ Field('birth','date'),\ Field('owner',db.person),\ migrate='test_dog.table') >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 1 A simple JOIN >>> len(db(db.dog.owner==db.person.id).select()) 1 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 1 Drop tables >>> db.dog.drop() >>> db.person.drop() Example of many 2 many relation and Set >>> tmp = db.define_table('author', Field('name'),\ migrate='test_author.table') >>> tmp = db.define_table('paper', Field('title'),\ migrate='test_paper.table') >>> tmp = db.define_table('authorship',\ Field('author_id', db.author),\ Field('paper_id', db.paper),\ migrate='test_authorship.table') >>> aid = db.author.insert(name='Massimo') >>> pid = db.paper.insert(title='QCD') >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) Define a Set >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) >>> rows = authored_papers.select(db.author.name, db.paper.title) >>> for row in rows: print row.author.name, row.paper.title Massimo QCD Example of search condition using belongs >>> set = (1, 2, 3) >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) >>> print rows[0].title QCD Example of search condition using nested select >>> nested_select = db()._select(db.authorship.paper_id) >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) >>> print rows[0].title QCD Example of expressions >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) >>> db(mynumber.id>0).delete() 0 >>> for i in range(10): tmp = mynumber.insert(x=i) >>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum()) 45 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 5 Output in csv >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() author.name,paper.title\r Massimo,QCD Delete all leftover tables >>> DAL.distributed_transaction_commit(db) >>> db.mynumber.drop() >>> db.authorship.drop() >>> db.author.drop() >>> db.paper.drop() """ ################################################################################ # deprecated since the new DAL; here only for backward compatibility ################################################################################ SQLField = Field SQLTable = Table SQLXorable = Expression SQLQuery = Query SQLSet = Set SQLRows = Rows SQLStorage = Row SQLDB = DAL GQLDB = DAL DAL.Field = Field # was necessary in gluon/globals.py session.connect DAL.Table = Table # was necessary in gluon/globals.py session.connect ################################################################################ # run tests ################################################################################ if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import __builtin__ import os import re import sys import threading # Install the new import function: def custom_import_install(web2py_path): global _web2py_importer global _web2py_path if _web2py_importer: return # Already installed _web2py_path = web2py_path _web2py_importer = _Web2pyImporter(web2py_path) __builtin__.__import__ = _web2py_importer def is_tracking_changes(): """ @return: True: neo_importer is tracking changes made to Python source files. False: neo_import does not reload Python modules. """ global _is_tracking_changes return _is_tracking_changes def track_changes(track=True): """ Tell neo_importer to start/stop tracking changes made to Python modules. @param track: True: Start tracking changes. False: Stop tracking changes. """ global _is_tracking_changes global _web2py_importer global _web2py_date_tracker_importer assert track is True or track is False, "Boolean expected." if track == _is_tracking_changes: return if track: if not _web2py_date_tracker_importer: _web2py_date_tracker_importer = \ _Web2pyDateTrackerImporter(_web2py_path) __builtin__.__import__ = _web2py_date_tracker_importer else: __builtin__.__import__ = _web2py_importer _is_tracking_changes = track _STANDARD_PYTHON_IMPORTER = __builtin__.__import__ # Keep standard importer _web2py_importer = None # The standard web2py importer _web2py_date_tracker_importer = None # The web2py importer with date tracking _web2py_path = None # Absolute path of the web2py directory _is_tracking_changes = False # The tracking mode class _BaseImporter(object): """ The base importer. Dispatch the import the call to the standard Python importer. """ def begin(self): """ Many imports can be made for a single import statement. This method help the management of this aspect. """ def __call__(self, name, globals={}, locals={}, fromlist=[], level=-1): """ The import method itself. """ return _STANDARD_PYTHON_IMPORTER(name, globals, locals, fromlist, level) def end(self): """ Needed for clean up. """ class _DateTrackerImporter(_BaseImporter): """ An importer tracking the date of the module files and reloading them when they have changed. """ _PACKAGE_PATH_SUFFIX = os.path.sep+"__init__.py" def __init__(self): super(_DateTrackerImporter, self).__init__() self._import_dates = {} # Import dates of the files of the modules # Avoid reloading cause by file modifications of reload: self._tl = threading.local() self._tl._modules_loaded = None def begin(self): self._tl._modules_loaded = set() def __call__(self, name, globals={}, locals={}, fromlist=[], level=-1): """ The import method itself. """ call_begin_end = self._tl._modules_loaded == None if call_begin_end: self.begin() try: self._tl.globals = globals self._tl.locals = locals self._tl.level = level # Check the date and reload if needed: self._update_dates(name, fromlist) # Try to load the module and update the dates if it works: result = super(_DateTrackerImporter, self) \ .__call__(name, globals, locals, fromlist, level) # Module maybe loaded for the 1st time so we need to set the date self._update_dates(name, fromlist) return result except Exception, e: raise e # Don't hide something that went wrong finally: if call_begin_end: self.end() def _update_dates(self, name, fromlist): """ Update all the dates associated to the statement import. A single import statement may import many modules. """ self._reload_check(name) if fromlist: for fromlist_name in fromlist: self._reload_check("%s.%s" % (name, fromlist_name)) def _reload_check(self, name): """ Update the date associated to the module and reload the module if the file has changed. """ module = sys.modules.get(name) file = self._get_module_file(module) if file: date = self._import_dates.get(file) new_date = None reload_mod = False mod_to_pack = False # Module turning into a package? (special case) try: new_date = os.path.getmtime(file) except: self._import_dates.pop(file, None) # Clean up # Handle module changing in package and #package changing in module: if file.endswith(".py"): # Get path without file ext: file = os.path.splitext(file)[0] reload_mod = os.path.isdir(file) \ and os.path.isfile(file+self._PACKAGE_PATH_SUFFIX) mod_to_pack = reload_mod else: # Package turning into module? file += ".py" reload_mod = os.path.isfile(file) if reload_mod: new_date = os.path.getmtime(file) # Refresh file date if reload_mod or not date or new_date > date: self._import_dates[file] = new_date if reload_mod or (date and new_date > date): if module not in self._tl._modules_loaded: if mod_to_pack: # Module turning into a package: mod_name = module.__name__ del sys.modules[mod_name] # Delete the module # Reload the module: super(_DateTrackerImporter, self).__call__ \ (mod_name, self._tl.globals, self._tl.locals, [], self._tl.level) else: reload(module) self._tl._modules_loaded.add(module) def end(self): self._tl._modules_loaded = None @classmethod def _get_module_file(cls, module): """ Get the absolute path file associated to the module or None. """ file = getattr(module, "__file__", None) if file: # Make path absolute if not: #file = os.path.join(cls.web2py_path, file) file = os.path.splitext(file)[0]+".py" # Change .pyc for .py if file.endswith(cls._PACKAGE_PATH_SUFFIX): file = os.path.dirname(file) # Track dir for packages return file class _Web2pyImporter(_BaseImporter): """ The standard web2py importer. Like the standard Python importer but it tries to transform import statements as something like "import applications.app_name.modules.x". If the import failed, fall back on _BaseImporter. """ _RE_ESCAPED_PATH_SEP = re.escape(os.path.sep) # os.path.sep escaped for re def __init__(self, web2py_path): """ @param web2py_path: The absolute path of the web2py installation. """ global DEBUG super(_Web2pyImporter, self).__init__() self.web2py_path = web2py_path self.__web2py_path_os_path_sep = self.web2py_path+os.path.sep self.__web2py_path_os_path_sep_len = len(self.__web2py_path_os_path_sep) self.__RE_APP_DIR = re.compile( self._RE_ESCAPED_PATH_SEP.join( \ ( \ #"^" + re.escape(web2py_path), # Not working with Python 2.5 "^(" + "applications", "[^", "]+)", "", ) )) def _matchAppDir(self, file_path): """ Does the file in a directory inside the "applications" directory? """ if file_path.startswith(self.__web2py_path_os_path_sep): file_path = file_path[self.__web2py_path_os_path_sep_len:] return self.__RE_APP_DIR.match(file_path) return False def __call__(self, name, globals={}, locals={}, fromlist=[], level=-1): """ The import method itself. """ self.begin() #try: # if not relative and not from applications: if not name.startswith(".") and level <= 0 \ and not name.startswith("applications.") \ and isinstance(globals, dict): # Get the name of the file do the import caller_file_name = os.path.join(self.web2py_path, \ globals.get("__file__", "")) # Is the path in an application directory? match_app_dir = self._matchAppDir(caller_file_name) if match_app_dir: try: # Get the prefix to add for the import # (like applications.app_name.modules): modules_prefix = \ ".".join((match_app_dir.group(1). \ replace(os.path.sep, "."), "modules")) if not fromlist: # import like "import x" or "import x.y" return self.__import__dot(modules_prefix, name, globals, locals, fromlist, level) else: # import like "from x import a, b, ..." return super(_Web2pyImporter, self) \ .__call__(modules_prefix+"."+name, globals, locals, fromlist, level) except ImportError: pass return super(_Web2pyImporter, self).__call__(name, globals, locals, fromlist, level) #except Exception, e: # raise e # Don't hide something that went wrong #finally: self.end() def __import__dot(self, prefix, name, globals, locals, fromlist, level): """ Here we will import x.y.z as many imports like: from applications.app_name.modules import x from applications.app_name.modules.x import y from applications.app_name.modules.x.y import z. x will be the module returned. """ result = None for name in name.split("."): new_mod = super(_Web2pyImporter, self).__call__(prefix, globals, locals, [name], level) try: result = result or new_mod.__dict__[name] except KeyError: raise ImportError() prefix += "." + name return result class _Web2pyDateTrackerImporter(_Web2pyImporter, _DateTrackerImporter): """ Like _Web2pyImporter but using a _DateTrackerImporter. """
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Basic caching classes and methods ================================= - Cache - The generic caching object interfacing with the others - CacheInRam - providing caching in ram - CacheInDisk - provides caches on disk Memcache is also available via a different module (see gluon.contrib.memcache) When web2py is running on Google App Engine, caching will be provided by the GAE memcache (see gluon.contrib.gae_memcache) """ import time import portalocker import shelve import thread import os import logging import re logger = logging.getLogger("web2py.cache") __all__ = ['Cache'] DEFAULT_TIME_EXPIRE = 300 class CacheAbstract(object): """ Abstract class for cache implementations. Main function is now to provide referenced api documentation. Use CacheInRam or CacheOnDisk instead which are derived from this class. """ cache_stats_name = 'web2py_cache_statistics' def __init__(self, request=None): """ Paremeters ---------- request: the global request object """ raise NotImplementedError def __call__(self, key, f, time_expire = DEFAULT_TIME_EXPIRE): """ Tries retrieve the value corresponding to `key` from the cache of the object exists and if it did not expire, else it called the function `f` and stores the output in the cache corresponding to `key`. In the case the output of the function is returned. :param key: the key of the object to be store or retrieved :param f: the function, whose output is to be cached :param time_expire: expiration of the cache in microseconds - `time_expire` is used to compare the current time with the time when the requested object was last saved in cache. It does not affect future requests. - Setting `time_expire` to 0 or negative value forces the cache to refresh. If the function `f` is `None` the cache is cleared. """ raise NotImplementedError def clear(self, regex=None): """ Clears the cache of all keys that match the provided regular expression. If no regular expression is provided, it clears all entries in cache. Parameters ---------- regex: if provided, only keys matching the regex will be cleared. Otherwise all keys are cleared. """ raise NotImplementedError def increment(self, key, value=1): """ Increments the cached value for the given key by the amount in value Parameters ---------- key: key for the cached object to be incremeneted value: amount of the increment (defaults to 1, can be negative) """ raise NotImplementedError def _clear(self, storage, regex): """ Auxiliary function called by `clear` to search and clear cache entries """ r = re.compile(regex) for (key, value) in storage.items(): if r.match(str(key)): del storage[key] class CacheInRam(CacheAbstract): """ Ram based caching This is implemented as global (per process, shared by all threads) dictionary. A mutex-lock mechanism avoid conflicts. """ locker = thread.allocate_lock() meta_storage = {} def __init__(self, request=None): self.locker.acquire() self.request = request if request: app = request.application else: app = '' if not app in self.meta_storage: self.storage = self.meta_storage[app] = {CacheAbstract.cache_stats_name: { 'hit_total': 0, 'misses': 0, }} else: self.storage = self.meta_storage[app] self.locker.release() def clear(self, regex=None): self.locker.acquire() storage = self.storage if regex == None: storage.clear() else: self._clear(storage, regex) if not CacheAbstract.cache_stats_name in storage.keys(): storage[CacheAbstract.cache_stats_name] = { 'hit_total': 0, 'misses': 0, } self.locker.release() def __call__(self, key, f, time_expire = DEFAULT_TIME_EXPIRE): """ Attention! cache.ram does not copy the cached object. It just stores a reference to it. Turns out the deepcopying the object has some problems: 1) would break backward compatibility 2) would be limiting because people may want to cache live objects 3) would work unless we deepcopy no storage and retrival which would make things slow. Anyway. You can deepcopy explicitly in the function generating the value to be cached. """ dt = time_expire self.locker.acquire() item = self.storage.get(key, None) if item and f == None: del self.storage[key] self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1 self.locker.release() if f is None: return None if item and (dt == None or item[0] > time.time() - dt): return item[1] value = f() self.locker.acquire() self.storage[key] = (time.time(), value) self.storage[CacheAbstract.cache_stats_name]['misses'] += 1 self.locker.release() return value def increment(self, key, value=1): self.locker.acquire() try: if key in self.storage: value = self.storage[key][1] + value self.storage[key] = (time.time(), value) except BaseException, e: self.locker.release() raise e self.locker.release() return value class CacheOnDisk(CacheAbstract): """ Disk based cache This is implemented as a shelve object and it is shared by multiple web2py processes (and threads) as long as they share the same filesystem. The file is locked wen accessed. Disk cache provides persistance when web2py is started/stopped but it slower than `CacheInRam` Values stored in disk cache must be pickable. """ speedup_checks = set() def __init__(self, request, folder=None): self.request = request # Lets test if the cache folder exists, if not # we are going to create it folder = folder or os.path.join(request.folder, 'cache') if not os.path.exists(folder): os.mkdir(folder) ### we need this because of a possible bug in shelve that may ### or may not lock self.locker_name = os.path.join(folder,'cache.lock') self.shelve_name = os.path.join(folder,'cache.shelve') locker, locker_locked = None, False speedup_key = (folder,CacheAbstract.cache_stats_name) if not speedup_key in self.speedup_checks or \ not os.path.exists(self.shelve_name): try: locker = open(self.locker_name, 'a') portalocker.lock(locker, portalocker.LOCK_EX) locker_locked = True storage = shelve.open(self.shelve_name) try: if not storage.has_key(CacheAbstract.cache_stats_name): storage[CacheAbstract.cache_stats_name] = { 'hit_total': 0, 'misses': 0, } storage.sync() finally: storage.close() self.speedup_checks.add(speedup_key) except ImportError: pass # no module _bsddb, ignoring exception now so it makes a ticket only if used except: logger.error('corrupted file %s, will try delete it!' \ % self.shelve_name) try: os.unlink(self.shelve_name) except IOError: logger.warn('unable to delete file %s' % self.shelve_name) if locker_locked: portalocker.unlock(locker) if locker: locker.close() def clear(self, regex=None): locker = open(self.locker_name,'a') portalocker.lock(locker, portalocker.LOCK_EX) storage = shelve.open(self.shelve_name) try: if regex == None: storage.clear() else: self._clear(storage, regex) if not CacheAbstract.cache_stats_name in storage.keys(): storage[CacheAbstract.cache_stats_name] = { 'hit_total': 0, 'misses': 0, } storage.sync() finally: storage.close() portalocker.unlock(locker) locker.close() def __call__(self, key, f, time_expire = DEFAULT_TIME_EXPIRE): dt = time_expire locker = open(self.locker_name,'a') portalocker.lock(locker, portalocker.LOCK_EX) storage = shelve.open(self.shelve_name) item = storage.get(key, None) if item and f == None: del storage[key] storage[CacheAbstract.cache_stats_name] = { 'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'] + 1, 'misses': storage[CacheAbstract.cache_stats_name]['misses'] } storage.sync() portalocker.unlock(locker) locker.close() if f is None: return None if item and (dt == None or item[0] > time.time() - dt): return item[1] value = f() locker = open(self.locker_name,'a') portalocker.lock(locker, portalocker.LOCK_EX) storage[key] = (time.time(), value) storage[CacheAbstract.cache_stats_name] = { 'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'], 'misses': storage[CacheAbstract.cache_stats_name]['misses'] + 1 } storage.sync() storage.close() portalocker.unlock(locker) locker.close() return value def increment(self, key, value=1): locker = open(self.locker_name,'a') portalocker.lock(locker, portalocker.LOCK_EX) storage = shelve.open(self.shelve_name) try: if key in storage: value = storage[key][1] + value storage[key] = (time.time(), value) storage.sync() finally: storage.close() portalocker.unlock(locker) locker.close() return value class Cache(object): """ Sets up generic caching, creating an instance of both CacheInRam and CacheOnDisk. In case of GAE will make use of gluon.contrib.gae_memcache. - self.ram is an instance of CacheInRam - self.disk is an instance of CacheOnDisk """ def __init__(self, request): """ Parameters ---------- request: the global request object """ # GAE will have a special caching import settings if settings.global_settings.web2py_runtime_gae: from contrib.gae_memcache import MemcacheClient self.ram=self.disk=MemcacheClient(request) else: # Otherwise use ram (and try also disk) self.ram = CacheInRam(request) try: self.disk = CacheOnDisk(request) except IOError: logger.warning('no cache.disk (IOError)') except AttributeError: # normally not expected anymore, as GAE has already # been accounted for logger.warning('no cache.disk (AttributeError)') def __call__(self, key = None, time_expire = DEFAULT_TIME_EXPIRE, cache_model = None): """ Decorator function that can be used to cache any function/method. Example:: @cache('key', 5000, cache.ram) def f(): return time.ctime() When the function f is called, web2py tries to retrieve the value corresponding to `key` from the cache of the object exists and if it did not expire, else it calles the function `f` and stores the output in the cache corresponding to `key`. In the case the output of the function is returned. :param key: the key of the object to be store or retrieved :param time_expire: expiration of the cache in microseconds :param cache_model: `cache.ram`, `cache.disk`, or other (like `cache.memcache` if defined). It defaults to `cache.ram`. Notes ----- `time_expire` is used to compare the curret time with the time when the requested object was last saved in cache. It does not affect future requests. Setting `time_expire` to 0 or negative value forces the cache to refresh. If the function `f` is an action, we suggest using `request.env.path_info` as key. """ if not cache_model: cache_model = self.ram def tmp(func): def action(): return cache_model(key, func, time_expire) action.__name___ = func.__name__ action.__doc__ = func.__doc__ return action return tmp
Python
#!/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Contains: - wsgibase: the gluon wsgi application """ import gc import cgi import cStringIO import Cookie import os import re import copy import sys import time import thread import datetime import signal import socket import tempfile import random import string import platform from fileutils import abspath, write_file from settings import global_settings from admin import add_path_first, create_missing_folders, create_missing_app_folders from globals import current from custom_import import custom_import_install from contrib.simplejson import dumps # Remarks: # calling script has inserted path to script directory into sys.path # applications_parent (path to applications/, site-packages/ etc) # defaults to that directory set sys.path to # ("", gluon_parent/site-packages, gluon_parent, ...) # # this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want the path to this file which may be Library.zip # gluon_parent is the directory containing gluon, web2py.py, logging.conf # and the handlers. # applications_parent (web2py_path) is the directory containing applications/ # and routes.py # The two are identical unless web2py_path is changed via the web2py.py -f folder option # main.web2py_path is the same as applications_parent (for backward compatibility) if not hasattr(os, 'mkdir'): global_settings.db_sessions = True if global_settings.db_sessions is not True: global_settings.db_sessions = set() global_settings.gluon_parent = os.environ.get('web2py_path', os.getcwd()) global_settings.applications_parent = global_settings.gluon_parent web2py_path = global_settings.applications_parent # backward compatibility global_settings.app_folders = set() global_settings.debugging = False custom_import_install(web2py_path) create_missing_folders() # set up logging for subsequent imports import logging import logging.config logpath = abspath("logging.conf") if os.path.exists(logpath): logging.config.fileConfig(abspath("logging.conf")) else: logging.basicConfig() logger = logging.getLogger("web2py") from restricted import RestrictedError from http import HTTP, redirect from globals import Request, Response, Session from compileapp import build_environment, run_models_in, \ run_controller_in, run_view_in from fileutils import copystream from contenttype import contenttype from dal import BaseAdapter from settings import global_settings from validators import CRYPT from cache import Cache from html import URL as Url import newcron import rewrite __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0 # gc timer # Security Checks: validate URL and session_id here, # accept_language is validated in languages # pattern used to validate client address regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6 version_info = open(abspath('VERSION', gluon=True), 'r') web2py_version = version_info.read() version_info.close() try: import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') rewrite.load() def get_client(env): """ guess the client address from the environment variables first tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails assume '127.0.0.1' (running locally) """ g = regex_client.search(env.get('http_x_forwarded_for', '')) if g: return g.group() g = regex_client.search(env.get('remote_addr', '')) if g: return g.group() return '127.0.0.1' def copystream_progress(request, chunk_size= 10**5): """ copies request.env.wsgi_input into request.body and stores progress upload status in cache.ram X-Progress-ID:length and X-Progress-ID:uploaded """ if not request.env.content_length: return cStringIO.StringIO() source = request.env.wsgi_input size = int(request.env.content_length) dest = tempfile.TemporaryFile() if not 'X-Progress-ID' in request.vars: copystream(source, dest, size, chunk_size) return dest cache_key = 'X-Progress-ID:'+request.vars['X-Progress-ID'] cache = Cache(request) cache.ram(cache_key+':length', lambda: size, 0) cache.ram(cache_key+':uploaded', lambda: 0, 0) while size > 0: if size < chunk_size: data = source.read(size) cache.ram.increment(cache_key+':uploaded', size) else: data = source.read(chunk_size) cache.ram.increment(cache_key+':uploaded', chunk_size) length = len(data) if length > size: (data, length) = (data[:size], size) size -= length if length == 0: break dest.write(data) if length < chunk_size: break dest.seek(0) cache.ram(cache_key+':length', None) cache.ram(cache_key+':uploaded', None) return dest def serve_controller(request, response, session): """ this function is used to generate a dynamic page. It first runs all models, then runs the function in the controller, and then tries to render the output using a view/template. this function must run from the [application] folder. A typical example would be the call to the url /[application]/[controller]/[function] that would result in a call to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html """ # ################################################## # build environment for controller and view # ################################################## environment = build_environment(request, response, session) # set default view, controller can override it response.view = '%s/%s.%s' % (request.controller, request.function, request.extension) # also, make sure the flash is passed through # ################################################## # process models, controller and view (if required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars = page for key in page: response._view_environment[key] = page[key] run_view_in(response._view_environment) page = response.body.getvalue() # logic to garbage collect after exec, not always, once every 100 requests global requests requests = ('requests' in globals()) and (requests+1) % 100 or 0 if not requests: gc.collect() # end garbage collection logic raise HTTP(response.status, page, **response.headers) def start_response_aux(status, headers, exc_info, response=None): """ in controller you can use:: - request.wsgi.environ - request.wsgi.start_response to call third party WSGI applications """ response.status = str(status).split(' ',1)[0] response.headers = dict(headers) return lambda *args, **kargs: response.write(escape=False,*args,**kargs) def middleware_aux(request, response, *middleware_apps): """ In you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions with WSGI middleware. actions must return strings. uses a simulated environment so it may have weird behavior in some cases """ def middleware(f): def app(environ, start_response): data = f() start_response(response.status,response.headers.items()) if isinstance(data,list): return data return [data] for item in middleware_apps: app=item(app) def caller(app): return app(request.wsgi.environ,request.wsgi.start_response) return lambda caller=caller, app=app: caller(app) return middleware def environ_aux(environ,request): new_environ = copy.copy(environ) new_environ['wsgi.input'] = request.body new_environ['wsgi.version'] = 1 return new_environ def parse_get_post_vars(request, environ): # always parse variables in URL for GET, POST, PUT, DELETE, etc. in get_vars dget = cgi.parse_qsl(request.env.query_string or '', keep_blank_values=1) for (key, value) in dget: if key in request.get_vars: if isinstance(request.get_vars[key], list): request.get_vars[key] += [value] else: request.get_vars[key] = [request.get_vars[key]] + [value] else: request.get_vars[key] = value request.vars[key] = request.get_vars[key] # parse POST variables on POST, PUT, BOTH only in post_vars request.body = copystream_progress(request) ### stores request body if (request.body and request.env.request_method in ('POST', 'PUT', 'BOTH')): dpost = cgi.FieldStorage(fp=request.body,environ=environ,keep_blank_values=1) # The same detection used by FieldStorage to detect multipart POSTs is_multipart = dpost.type[:10] == 'multipart/' request.body.seek(0) isle25 = sys.version_info[1] <= 5 def listify(a): return (not isinstance(a,list) and [a]) or a try: keys = sorted(dpost) except TypeError: keys = [] for key in keys: dpk = dpost[key] # if en element is not a file replace it with its value else leave it alone if isinstance(dpk, list): if not dpk[0].filename: value = [x.value for x in dpk] else: value = [x for x in dpk] elif not dpk.filename: value = dpk.value else: value = dpk pvalue = listify(value) if key in request.vars: gvalue = listify(request.vars[key]) if isle25: value = pvalue + gvalue elif is_multipart: pvalue = pvalue[len(gvalue):] else: pvalue = pvalue[:-len(gvalue)] request.vars[key] = value if len(pvalue): request.post_vars[key] = (len(pvalue)>1 and pvalue) or pvalue[0] def wsgibase(environ, responder): """ this is the gluon wsgi application. the first function called when a page is requested (static or dynamic). it can be called by paste.httpserver or by apache mod_wsgi. - fills request with info - the environment variables, replacing '.' with '_' - adds web2py path and version info - compensates for fcgi missing path_info and query_string - validates the path in url The url path must be either: 1. for static pages: - /<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] - (sub may go several levels deep, currently 3 levels are supported: sub1/sub2/sub3) The naming conventions are: - application, controller, function and extension may only contain [a-zA-Z0-9_] - file and sub may also contain '-', '=', '.' and '/' """ current.__dict__.clear() request = Request() response = Response() session = Session() request.env.web2py_path = global_settings.applications_parent request.env.web2py_version = web2py_version request.env.update(global_settings) static_file = False try: try: try: # ################################################## # handle fcgi missing path_info and query_string # select rewrite parameters # rewrite incoming URL # parse rewritten header variables # parse rewritten URL # serve file if static # ################################################## if not environ.get('PATH_INFO',None) and \ environ.get('REQUEST_URI',None): # for fcgi, get path_info and query_string from request_uri items = environ['REQUEST_URI'].split('?') environ['PATH_INFO'] = items[0] if len(items) > 1: environ['QUERY_STRING'] = items[1] else: environ['QUERY_STRING'] = '' if not environ.get('HTTP_HOST',None): environ['HTTP_HOST'] = '%s:%s' % (environ.get('SERVER_NAME'), environ.get('SERVER_PORT')) (static_file, environ) = rewrite.url_in(request, environ) if static_file: if request.env.get('query_string', '')[:10] == 'attachment': response.headers['Content-Disposition'] = 'attachment' response.stream(static_file, request=request) # ################################################## # fill in request items # ################################################## http_host = request.env.http_host.split(':',1)[0] local_hosts = [http_host,'::1','127.0.0.1','::ffff:127.0.0.1'] if not global_settings.web2py_runtime_gae: local_hosts += [socket.gethostname(), socket.gethostbyname(http_host)] request.client = get_client(request.env) request.folder = abspath('applications', request.application) + os.sep x_req_with = str(request.env.http_x_requested_with).lower() request.ajax = x_req_with == 'xmlhttprequest' request.cid = request.env.http_web2py_component_element request.is_local = request.env.remote_addr in local_hosts request.is_https = request.env.wsgi_url_scheme \ in ['https', 'HTTPS'] or request.env.https == 'on' # ################################################## # compute a request.uuid to be used for tickets and toolbar # ################################################## response.uuid = request.compute_uuid() # ################################################## # access the requested application # ################################################## if not os.path.exists(request.folder): if request.application == rewrite.thread.routes.default_application and request.application != 'welcome': request.application = 'welcome' redirect(Url(r=request)) elif rewrite.thread.routes.error_handler: redirect(Url(rewrite.thread.routes.error_handler['application'], rewrite.thread.routes.error_handler['controller'], rewrite.thread.routes.error_handler['function'], args=request.application)) else: raise HTTP(404, rewrite.thread.routes.error_message % 'invalid request', web2py_error='invalid application') request.url = Url(r=request, args=request.args, extension=request.raw_extension) # ################################################## # build missing folders # ################################################## create_missing_app_folders(request) # ################################################## # get the GET and POST data # ################################################## parse_get_post_vars(request, environ) # ################################################## # expose wsgi hooks for convenience # ################################################## request.wsgi.environ = environ_aux(environ,request) request.wsgi.start_response = lambda status='200', headers=[], \ exec_info=None, response=response: \ start_response_aux(status, headers, exec_info, response) request.wsgi.middleware = lambda *a: middleware_aux(request,response,*a) # ################################################## # load cookies # ################################################## if request.env.http_cookie: try: request.cookies.load(request.env.http_cookie) except Cookie.CookieError, e: pass # invalid cookies # ################################################## # try load session or create new session file # ################################################## session.connect(request, response) # ################################################## # set no-cache headers # ################################################## response.headers['Content-Type'] = contenttype('.'+request.extension) response.headers['Cache-Control'] = \ 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0' response.headers['Expires'] = \ time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()) response.headers['Pragma'] = 'no-cache' # ################################################## # run controller # ################################################## serve_controller(request, response, session) except HTTP, http_response: if static_file: return http_response.to(responder) if request.body: request.body.close() # ################################################## # on success, try store session in database # ################################################## session._try_store_in_db(request, response) # ################################################## # on success, commit database # ################################################## if response._custom_commit: response._custom_commit() else: BaseAdapter.close_all_instances('commit') # ################################################## # if session not in db try store session on filesystem # this must be done after trying to commit database! # ################################################## session._try_store_on_disk(request, response) # ################################################## # store cookies in headers # ################################################## if request.cid: if response.flash and not 'web2py-component-flash' in http_response.headers: http_response.headers['web2py-component-flash'] = \ str(response.flash).replace('\n','') if response.js and not 'web2py-component-command' in http_response.headers: http_response.headers['web2py-component-command'] = \ response.js.replace('\n','') if session._forget and \ response.session_id_name in response.cookies: del response.cookies[response.session_id_name] elif session._secure: response.cookies[response.session_id_name]['secure'] = True if len(response.cookies)>0: http_response.headers['Set-Cookie'] = \ [str(cookie)[11:] for cookie in response.cookies.values()] ticket=None except RestrictedError, e: if request.body: request.body.close() # ################################################## # on application error, rollback database # ################################################## ticket = e.log(request) or 'unknown' if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') http_response = \ HTTP(500, rewrite.thread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if request.body: request.body.close() # ################################################## # on application error, rollback database # ################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '', '', locals()) ticket = e.log(request) or 'unrecoverable' http_response = \ HTTP(500, rewrite.thread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if response and hasattr(response, 'session_file') and response.session_file: response.session_file.close() # if global_settings.debugging: # import gluon.debug # gluon.debug.stop_trace() session._unlock(response) http_response, new_environ = rewrite.try_rewrite_on_error( http_response, request, environ, ticket) if not http_response: return wsgibase(new_environ,responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder) def save_password(password, port): """ used by main() to save the password in the parameters_port.py file. """ password_file = abspath('parameters_%i.py' % port) if password == '<random>': # make up a new password chars = string.letters + string.digits password = ''.join([random.choice(chars) for i in range(8)]) cpassword = CRYPT()(password)[0] print '******************* IMPORTANT!!! ************************' print 'your admin password is "%s"' % password print '*********************************************************' elif password == '<recycle>': # reuse the current password if any if os.path.exists(password_file): return else: password = '' elif password.startswith('<pam_user:'): # use the pam password for specified user cpassword = password[1:-1] else: # use provided password cpassword = CRYPT()(password)[0] fp = open(password_file, 'w') if password: fp.write('password="%s"\n' % cpassword) else: fp.write('password=None\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profilerfilename='profiler.log'): """ generates a wsgi application that does logging and profiling and calls wsgibase .. function:: gluon.main.appfactory( [wsgiapp=wsgibase [, logfilename='httpserver.log' [, profilerfilename='profiler.log']]]) """ if profilerfilename and os.path.exists(profilerfilename): os.unlink(profilerfilename) locker = thread.allocate_lock() def app_with_logging(environ, responder): """ a wsgi app that does logging and profiling and calls wsgibase """ status_headers = [] def responder2(s, h): """ wsgi responder app """ status_headers.append(s) status_headers.append(h) return responder(s, h) time_in = time.time() ret = [0] if not profilerfilename: ret[0] = wsgiapp(environ, responder2) else: import cProfile import pstats logger.warn('profiler is on. this makes web2py slower and serial') locker.acquire() cProfile.runctx('ret[0] = wsgiapp(environ, responder2)', globals(), locals(), profilerfilename+'.tmp') stat = pstats.Stats(profilerfilename+'.tmp') stat.stream = cStringIO.StringIO() stat.strip_dirs().sort_stats("time").print_stats(80) profile_out = stat.stream.getvalue() profile_file = open(profilerfilename, 'a') profile_file.write('%s\n%s\n%s\n%s\n\n' % \ ('='*60, environ['PATH_INFO'], '='*60, profile_out)) profile_file.close() locker.release() try: line = '%s, %s, %s, %s, %s, %s, %f\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass return ret[0] return app_with_logging class HttpServer(object): """ the web2py web server (Rocket) """ def __init__( self, ip='127.0.0.1', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_filename=None, ssl_certificate=None, ssl_private_key=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, shutdown_timeout=None, # Rocket does not use a shutdown timeout path=None, interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string ): """ starts the web server. """ if interfaces: # if interfaces is specified, it must be tested for rocket parameter correctness # not necessarily completely tested (e.g. content of tuples or ip-format) import types if isinstance(interfaces,types.ListType): for i in interfaces: if not isinstance(i,types.TupleType): raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/" else: raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/" if path: # if a path is specified change the global variables so that web2py # runs from there instead of cwd or os.environ['web2py_path'] global web2py_path path = os.path.normpath(path) web2py_path = path global_settings.applications_parent = path os.chdir(path) [add_path_first(p) for p in (path, abspath('site-packages'), "")] save_password(password, port) self.pid_filename = pid_filename if not server_name: server_name = socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME = server_name sock_list = [ip, port] if not ssl_certificate or not ssl_private_key: logger.info('SSL is off') elif not rocket.ssl: logger.warning('Python "ssl" module unavailable. SSL is OFF') elif not os.path.exists(ssl_certificate): logger.warning('unable to open SSL certificate. SSL is OFF') elif not os.path.exists(ssl_private_key): logger.warning('unable to open SSL private key. SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) logger.info('SSL is ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_filename) } self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self): """ start the web server """ try: signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False): """ stop cron and the web server """ newcron.stopcron() self.server.stop(stoplogging) try: os.unlink(self.pid_filename) except: pass
Python
# this file exists for backward compatibility __all__ = ['DAL','Field','drivers'] from dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :: # from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942 # Title: Cross-site scripting (XSS) defense # Submitter: Josh Goldfoot (other recipes) # Last Updated: 2006/08/05 # Version no: 1.0 """ from htmllib import HTMLParser from cgi import escape from urlparse import urlparse from formatter import AbstractFormatter from htmlentitydefs import entitydefs from xml.sax.saxutils import quoteattr __all__ = ['sanitize'] def xssescape(text): """Gets rid of < and > and & and, for good measure, :""" return escape(text, quote=True).replace(':', '&#58;') class XssCleaner(HTMLParser): def __init__( self, permitted_tags=[ 'a', 'b', 'blockquote', 'br/', 'i', 'li', 'ol', 'ul', 'p', 'cite', 'code', 'pre', 'img/', ], allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt' ], 'blockquote': ['type']}, fmt=AbstractFormatter, strip_disallowed = False ): HTMLParser.__init__(self, fmt) self.result = '' self.open_tags = [] self.permitted_tags = [i for i in permitted_tags if i[-1] != '/'] self.requires_no_close = [i[:-1] for i in permitted_tags if i[-1] == '/'] self.permitted_tags += self.requires_no_close self.allowed_attributes = allowed_attributes # The only schemes allowed in URLs (for href and src attributes). # Adding "javascript" or "vbscript" to this list would not be smart. self.allowed_schemes = ['http', 'https', 'ftp'] #to strip or escape disallowed tags? self.strip_disallowed = strip_disallowed self.in_disallowed = False def handle_data(self, data): if data and not self.in_disallowed: self.result += xssescape(data) def handle_charref(self, ref): if self.in_disallowed: return elif len(ref) < 7 and ref.isdigit(): self.result += '&#%s;' % ref else: self.result += xssescape('&#%s' % ref) def handle_entityref(self, ref): if self.in_disallowed: return elif ref in entitydefs: self.result += '&%s;' % ref else: self.result += xssescape('&%s' % ref) def handle_comment(self, comment): if self.in_disallowed: return elif comment: self.result += xssescape('<!--%s-->' % comment) def handle_starttag( self, tag, method, attrs, ): if tag not in self.permitted_tags: if self.strip_disallowed: self.in_disallowed = True else: self.result += xssescape('<%s>' % tag) else: bt = '<' + tag if tag in self.allowed_attributes: attrs = dict(attrs) self.allowed_attributes_here = [x for x in self.allowed_attributes[tag] if x in attrs and len(attrs[x]) > 0] for attribute in self.allowed_attributes_here: if attribute in ['href', 'src', 'background']: if self.url_is_acceptable(attrs[attribute]): bt += ' %s="%s"' % (attribute, attrs[attribute]) else: bt += ' %s=%s' % (xssescape(attribute), quoteattr(attrs[attribute])) if bt == '<a' or bt == '<img': return if tag in self.requires_no_close: bt += ' /' bt += '>' self.result += bt self.open_tags.insert(0, tag) def handle_endtag(self, tag, attrs): bracketed = '</%s>' % tag if tag not in self.permitted_tags: if self.strip_disallowed: self.in_disallowed = False else: self.result += xssescape(bracketed) elif tag in self.open_tags: self.result += bracketed self.open_tags.remove(tag) def unknown_starttag(self, tag, attributes): self.handle_starttag(tag, None, attributes) def unknown_endtag(self, tag): self.handle_endtag(tag, None) def url_is_acceptable(self, url): """ Accepts relative and absolute urls """ parsed = urlparse(url) return (parsed[0] in self.allowed_schemes and '.' in parsed[1]) \ or (parsed[0] == '' and parsed[2].startswith('/')) def strip(self, rawstring, escape=True): """ Returns the argument stripped of potentially harmful HTML or Javascript code @type escape: boolean @param escape: If True (default) it escapes the potentially harmful content, otherwise remove it """ if not isinstance(rawstring, str): return str(rawstring) for tag in self.requires_no_close: rawstring = rawstring.replace("<%s/>" % tag, "<%s />" % tag) if not escape: self.strip_disallowed = True self.result = '' self.feed(rawstring) for endtag in self.open_tags: if endtag not in self.requires_no_close: self.result += '</%s>' % endtag return self.result def xtags(self): """ Returns a printable string informing the user which tags are allowed """ tg = '' for x in sorted(self.permitted_tags): tg += '<' + x if x in self.allowed_attributes: for y in self.allowed_attributes[x]: tg += ' %s=""' % y tg += '> ' return xssescape(tg.strip()) def sanitize(text, permitted_tags=[ 'a', 'b', 'blockquote', 'br/', 'i', 'li', 'ol', 'ul', 'p', 'cite', 'code', 'pre', 'img/', 'h1','h2','h3','h4','h5','h6', 'table','tr','td','div', ], allowed_attributes = { 'a': ['href', 'title'], 'img': ['src', 'alt'], 'blockquote': ['type'], 'td': ['colspan'], }, escape=True): if not isinstance(text, str): return str(text) return XssCleaner(permitted_tags=permitted_tags, allowed_attributes=allowed_attributes).strip(text, escape)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created by Attila Csipa <web2py@csipa.in.rs> Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu> """ import sys import os import threading import logging import time import sched import re import datetime import platform import portalocker import fileutils import cPickle from settings import global_settings logger = logging.getLogger("web2py.cron") _cron_stopping = False def stopcron(): "graceful shutdown of cron" global _cron_stopping _cron_stopping = True class extcron(threading.Thread): def __init__(self, applications_parent): threading.Thread.__init__(self) self.setDaemon(False) self.path = applications_parent crondance(self.path, 'external', startup=True) def run(self): if not _cron_stopping: logger.debug('external cron invocation') crondance(self.path, 'external', startup=False) class hardcron(threading.Thread): def __init__(self, applications_parent): threading.Thread.__init__(self) self.setDaemon(True) self.path = applications_parent crondance(self.path, 'hard', startup=True) def launch(self): if not _cron_stopping: logger.debug('hard cron invocation') crondance(self.path, 'hard', startup = False) def run(self): s = sched.scheduler(time.time, time.sleep) logger.info('Hard cron daemon started') while not _cron_stopping: now = time.time() s.enter(60 - now % 60, 1, self.launch, ()) s.run() class softcron(threading.Thread): def __init__(self, applications_parent): threading.Thread.__init__(self) self.path = applications_parent crondance(self.path, 'soft', startup=True) def run(self): if not _cron_stopping: logger.debug('soft cron invocation') crondance(self.path, 'soft', startup=False) class Token(object): def __init__(self,path): self.path = os.path.join(path, 'cron.master') if not os.path.exists(self.path): fileutils.write_file(self.path, '', 'wb') self.master = None self.now = time.time() def acquire(self,startup=False): """ returns the time when the lock is acquired or None if cron already running lock is implemented by writing a pickle (start, stop) in cron.master start is time when cron job starts and stop is time when cron completed stop == 0 if job started but did not yet complete if a cron job started within less than 60 seconds, acquire returns None if a cron job started before 60 seconds and did not stop, a warning is issue "Stale cron.master detected" """ if portalocker.LOCK_EX == None: logger.warning('WEB2PY CRON: Disabled because no file locking') return None self.master = open(self.path,'rb+') try: ret = None portalocker.lock(self.master,portalocker.LOCK_EX) try: (start, stop) = cPickle.load(self.master) except: (start, stop) = (0, 1) if startup or self.now - start > 59.99: ret = self.now if not stop: # this happens if previous cron job longer than 1 minute logger.warning('WEB2PY CRON: Stale cron.master detected') logger.debug('WEB2PY CRON: Acquiring lock') self.master.seek(0) cPickle.dump((self.now,0),self.master) finally: portalocker.unlock(self.master) if not ret: # do this so no need to release self.master.close() return ret def release(self): """ this function writes into cron.master the time when cron job was completed """ if not self.master.closed: portalocker.lock(self.master,portalocker.LOCK_EX) logger.debug('WEB2PY CRON: Releasing cron lock') self.master.seek(0) (start, stop) = cPickle.load(self.master) if start == self.now: # if this is my lock self.master.seek(0) cPickle.dump((self.now,time.time()),self.master) portalocker.unlock(self.master) self.master.close() def rangetolist(s, period='min'): retval = [] if s.startswith('*'): if period == 'min': s = s.replace('*', '0-59', 1) elif period == 'hr': s = s.replace('*', '0-23', 1) elif period == 'dom': s = s.replace('*', '1-31', 1) elif period == 'mon': s = s.replace('*', '1-12', 1) elif period == 'dow': s = s.replace('*', '0-6', 1) m = re.compile(r'(\d+)-(\d+)/(\d+)') match = m.match(s) if match: for i in range(int(match.group(1)), int(match.group(2)) + 1): if i % int(match.group(3)) == 0: retval.append(i) return retval def parsecronline(line): task = {} if line.startswith('@reboot'): line=line.replace('@reboot', '-1 * * * *') elif line.startswith('@yearly'): line=line.replace('@yearly', '0 0 1 1 *') elif line.startswith('@annually'): line=line.replace('@annually', '0 0 1 1 *') elif line.startswith('@monthly'): line=line.replace('@monthly', '0 0 1 * *') elif line.startswith('@weekly'): line=line.replace('@weekly', '0 0 * * 0') elif line.startswith('@daily'): line=line.replace('@daily', '0 0 * * *') elif line.startswith('@midnight'): line=line.replace('@midnight', '0 0 * * *') elif line.startswith('@hourly'): line=line.replace('@hourly', '0 * * * *') params = line.strip().split(None, 6) if len(params) < 7: return None daysofweek={'sun':0,'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6} for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']): if not s in [None, '*']: task[id] = [] vals = s.split(',') for val in vals: if val != '-1' and '-' in val and '/' not in val: val = '%s/1' % val if '/' in val: task[id] += rangetolist(val, id) elif val.isdigit() or val=='-1': task[id].append(int(val)) elif id=='dow' and val[:3].lower() in daysofweek: task[id].append(daysofweek(val[:3].lower())) task['user'] = params[5] task['cmd'] = params[6] return task class cronlauncher(threading.Thread): def __init__(self, cmd, shell=True): threading.Thread.__init__(self) if platform.system() == 'Windows': shell = False elif isinstance(cmd,list): cmd = ' '.join(cmd) self.cmd = cmd self.shell = shell def run(self): import subprocess proc = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=self.shell) (stdoutdata,stderrdata) = proc.communicate() if proc.returncode != 0: logger.warning( 'WEB2PY CRON Call returned code %s:\n%s' % \ (proc.returncode, stdoutdata+stderrdata)) else: logger.debug('WEB2PY CRON Call returned success:\n%s' \ % stdoutdata) def crondance(applications_parent, ctype='soft', startup=False): apppath = os.path.join(applications_parent,'applications') cron_path = os.path.join(apppath,'admin','cron') token = Token(cron_path) cronmaster = token.acquire(startup=startup) if not cronmaster: return now_s = time.localtime() checks=(('min',now_s.tm_min), ('hr',now_s.tm_hour), ('mon',now_s.tm_mon), ('dom',now_s.tm_mday), ('dow',(now_s.tm_wday+1)%7)) apps = [x for x in os.listdir(apppath) if os.path.isdir(os.path.join(apppath, x))] for app in apps: if _cron_stopping: break; apath = os.path.join(apppath,app) cronpath = os.path.join(apath, 'cron') crontab = os.path.join(cronpath, 'crontab') if not os.path.exists(crontab): continue try: cronlines = fileutils.readlines_file(crontab, 'rt') lines = [x.strip() for x in cronlines if x.strip() and not x.strip().startswith('#')] tasks = [parsecronline(cline) for cline in lines] except Exception, e: logger.error('WEB2PY CRON: crontab read error %s' % e) continue for task in tasks: if _cron_stopping: break; commands = [sys.executable] w2p_path = fileutils.abspath('web2py.py', gluon=True) if os.path.exists(w2p_path): commands.append(w2p_path) if global_settings.applications_parent != global_settings.gluon_parent: commands.extend(('-f', global_settings.applications_parent)) citems = [(k in task and not v in task[k]) for k,v in checks] task_min= task.get('min',[]) if not task: continue elif not startup and task_min == [-1]: continue elif task_min != [-1] and reduce(lambda a,b: a or b, citems): continue logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s' \ % (ctype, app, task.get('cmd'), os.getcwd(), datetime.datetime.now())) action, command, models = False, task['cmd'], '' if command.startswith('**'): (action,models,command) = (True,'',command[2:]) elif command.startswith('*'): (action,models,command) = (True,'-M',command[1:]) else: action=False if action and command.endswith('.py'): commands.extend(('-J', # cron job models, # import models? '-S', app, # app name '-a', '"<recycle>"', # password '-R', command)) # command shell = True elif action: commands.extend(('-J', # cron job models, # import models? '-S', app+'/'+command, # app name '-a', '"<recycle>"')) # password shell = True else: commands = command shell = False try: cronlauncher(commands, shell=shell).start() except Exception, e: logger.warning( 'WEB2PY CRON: Execution error for %s: %s' \ % (task.get('cmd'), e)) token.release()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework (Copyrighted, 2007-2011). License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Author: Thadeus Burgess Contributors: - Thank you to Massimo Di Pierro for creating the original gluon/template.py - Thank you to Jonathan Lundell for extensively testing the regex on Jython. - Thank you to Limodou (creater of uliweb) who inspired the block-element support for web2py. """ import os import re import cgi import cStringIO import logging try: from restricted import RestrictedError except: def RestrictedError(a,b,c): logging.error(str(a)+':'+str(b)+':'+str(c)) return RuntimeError class Node(object): """ Basic Container Object """ def __init__(self, value = None, pre_extend = False): self.value = value self.pre_extend = pre_extend def __str__(self): return str(self.value) class SuperNode(Node): def __init__(self, name = '', pre_extend = False): self.name = name self.value = None self.pre_extend = pre_extend def __str__(self): if self.value: return str(self.value) else: raise SyntaxError("Undefined parent block ``%s``. \n" % self.name + \ "You must define a block before referencing it.\nMake sure you have not left out an ``{{end}}`` tag." ) def __repr__(self): return "%s->%s" % (self.name, self.value) class BlockNode(Node): """ Block Container. This Node can contain other Nodes and will render in a hierarchical order of when nodes were added. ie:: {{ block test }} This is default block test {{ end }} """ def __init__(self, name = '', pre_extend = False, delimiters = ('{{','}}')): """ name - Name of this Node. """ self.nodes = [] self.name = name self.pre_extend = pre_extend self.left, self.right = delimiters def __repr__(self): lines = ['%sblock %s%s' % (self.left,self.name,self.right)] for node in self.nodes: lines.append(str(node)) lines.append('%send%s' % (self.left, self.right)) return ''.join(lines) def __str__(self): """ Get this BlockNodes content, not including child Nodes """ lines = [] for node in self.nodes: if not isinstance(node, BlockNode): lines.append(str(node)) return ''.join(lines) def append(self, node): """ Add an element to the nodes. Keyword Arguments - node -- Node object or string to append. """ if isinstance(node, str) or isinstance(node, Node): self.nodes.append(node) else: raise TypeError("Invalid type; must be instance of ``str`` or ``BlockNode``. %s" % node) def extend(self, other): """ Extend the list of nodes with another BlockNode class. Keyword Arguments - other -- BlockNode or Content object to extend from. """ if isinstance(other, BlockNode): self.nodes.extend(other.nodes) else: raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other) def output(self, blocks): """ Merges all nodes into a single string. blocks -- Dictionary of blocks that are extending from this template. """ lines = [] # Get each of our nodes for node in self.nodes: # If we have a block level node. if isinstance(node, BlockNode): # If we can override this block. if node.name in blocks: # Override block from vars. lines.append(blocks[node.name].output(blocks)) # Else we take the default else: lines.append(node.output(blocks)) # Else its just a string else: lines.append(str(node)) # Now combine all of our lines together. return ''.join(lines) class Content(BlockNode): """ Parent Container -- Used as the root level BlockNode. Contains functions that operate as such. """ def __init__(self, name = "ContentBlock", pre_extend = False): """ Keyword Arguments name -- Unique name for this BlockNode """ self.name = name self.nodes = [] self.blocks = {} self.pre_extend = pre_extend def __str__(self): lines = [] # For each of our nodes for node in self.nodes: # If it is a block node. if isinstance(node, BlockNode): # And the node has a name that corresponds with a block in us if node.name in self.blocks: # Use the overriding output. lines.append(self.blocks[node.name].output(self.blocks)) else: # Otherwise we just use the nodes output. lines.append(node.output(self.blocks)) else: # It is just a string, so include it. lines.append(str(node)) # Merge our list together. return ''.join(lines) def _insert(self, other, index = 0): """ Inserts object at index. """ if isinstance(other, str) or isinstance(other, Node): self.nodes.insert(index, other) else: raise TypeError("Invalid type, must be instance of ``str`` or ``Node``.") def insert(self, other, index = 0): """ Inserts object at index. You may pass a list of objects and have them inserted. """ if isinstance(other, (list, tuple)): # Must reverse so the order stays the same. other.reverse() for item in other: self._insert(item, index) else: self._insert(other, index) def append(self, node): """ Adds a node to list. If it is a BlockNode then we assign a block for it. """ if isinstance(node, str) or isinstance(node, Node): self.nodes.append(node) if isinstance(node, BlockNode): self.blocks[node.name] = node else: raise TypeError("Invalid type, must be instance of ``str`` or ``BlockNode``. %s" % node) def extend(self, other): """ Extends the objects list of nodes with another objects nodes """ if isinstance(other, BlockNode): self.nodes.extend(other.nodes) self.blocks.update(other.blocks) else: raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other) def clear_content(self): self.nodes = [] class TemplateParser(object): r_tag = re.compile(r'(\{\{.*?\}\})', re.DOTALL) r_multiline = re.compile(r'(""".*?""")|(\'\'\'.*?\'\'\')', re.DOTALL) # These are used for re-indentation. # Indent + 1 re_block = re.compile('^(elif |else:|except:|except |finally:).*$', re.DOTALL) # Indent - 1 re_unblock = re.compile('^(return|continue|break|raise)( .*)?$', re.DOTALL) # Indent - 1 re_pass = re.compile('^pass( .*)?$', re.DOTALL) def __init__(self, text, name = "ParserContainer", context = dict(), path = 'views/', writer = 'response.write', lexers = {}, delimiters = ('{{','}}'), _super_nodes = [], ): """ text -- text to parse context -- context to parse in path -- folder path to templates writer -- string of writer class to use lexers -- dict of custom lexers to use. delimiters -- for example ('{{','}}') _super_nodes -- a list of nodes to check for inclusion this should only be set by "self.extend" It contains a list of SuperNodes from a child template that need to be handled. """ # Keep a root level name. self.name = name # Raw text to start parsing. self.text = text # Writer to use (refer to the default for an example). # This will end up as # "%s(%s, escape=False)" % (self.writer, value) self.writer = writer # Dictionary of custom name lexers to use. if isinstance(lexers, dict): self.lexers = lexers else: self.lexers = {} # Path of templates self.path = path # Context for templates. self.context = context # allow optional alternative delimiters self.delimiters = delimiters if delimiters!=('{{','}}'): escaped_delimiters = (re.escape(delimiters[0]),re.escape(delimiters[1])) self.r_tag = re.compile(r'(%s.*?%s)' % escaped_delimiters, re.DOTALL) # Create a root level Content that everything will go into. self.content = Content(name=name) # Stack will hold our current stack of nodes. # As we descend into a node, it will be added to the stack # And when we leave, it will be removed from the stack. # self.content should stay on the stack at all times. self.stack = [self.content] # This variable will hold a reference to every super block # that we come across in this template. self.super_nodes = [] # This variable will hold a reference to the child # super nodes that need handling. self.child_super_nodes = _super_nodes # This variable will hold a reference to every block # that we come across in this template self.blocks = {} # Begin parsing. self.parse(text) def to_string(self): """ Return the parsed template with correct indentation. Used to make it easier to port to python3. """ return self.reindent(str(self.content)) def __str__(self): "Make sure str works exactly the same as python 3" return self.to_string() def __unicode__(self): "Make sure str works exactly the same as python 3" return self.to_string() def reindent(self, text): """ Reindents a string of unindented python code. """ # Get each of our lines into an array. lines = text.split('\n') # Our new lines new_lines = [] # Keeps track of how many indents we have. # Used for when we need to drop a level of indentation # only to reindent on the next line. credit = 0 # Current indentation k = 0 ################# # THINGS TO KNOW ################# # k += 1 means indent # k -= 1 means unindent # credit = 1 means unindent on the next line. for raw_line in lines: line = raw_line.strip() # ignore empty lines if not line: continue # If we have a line that contains python code that # should be unindented for this line of code. # and then reindented for the next line. if TemplateParser.re_block.match(line): k = k + credit - 1 # We obviously can't have a negative indentation k = max(k,0) # Add the indentation! new_lines.append(' '*(4*k)+line) # Bank account back to 0 again :( credit = 0 # If we are a pass block, we obviously de-dent. if TemplateParser.re_pass.match(line): k -= 1 # If we are any of the following, de-dent. # However, we should stay on the same level # But the line right after us will be de-dented. # So we add one credit to keep us at the level # while moving back one indentation level. if TemplateParser.re_unblock.match(line): credit = 1 k -= 1 # If we are an if statement, a try, or a semi-colon we # probably need to indent the next line. if line.endswith(':') and not line.startswith('#'): k += 1 # This must come before so that we can raise an error with the # right content. new_text = '\n'.join(new_lines) if k > 0: self._raise_error('missing "pass" in view', new_text) elif k < 0: self._raise_error('too many "pass" in view', new_text) return new_text def _raise_error(self, message='', text=None): """ Raise an error using itself as the filename and textual content. """ raise RestrictedError(self.name, text or self.text, message) def _get_file_text(self, filename): """ Attempt to open ``filename`` and retrieve its text. This will use self.path to search for the file. """ # If they didn't specify a filename, how can we find one! if not filename.strip(): self._raise_error('Invalid template filename') # Get the filename; filename looks like ``"template.html"``. # We need to eval to remove the quotes and get the string type. filename = eval(filename, self.context) # Get the path of the file on the system. filepath = os.path.join(self.path, filename) # try to read the text. try: fileobj = open(filepath, 'rb') text = fileobj.read() fileobj.close() except IOError: self._raise_error('Unable to open included view file: ' + filepath) return text def include(self, content, filename): """ Include ``filename`` here. """ text = self._get_file_text(filename) t = TemplateParser(text, name = filename, context = self.context, path = self.path, writer = self.writer, delimiters = self.delimiters) content.append(t.content) def extend(self, filename): """ Extend ``filename``. Anything not declared in a block defined by the parent will be placed in the parent templates ``{{include}}`` block. """ text = self._get_file_text(filename) # Create out nodes list to send to the parent super_nodes = [] # We want to include any non-handled nodes. super_nodes.extend(self.child_super_nodes) # And our nodes as well. super_nodes.extend(self.super_nodes) t = TemplateParser(text, name = filename, context = self.context, path = self.path, writer = self.writer, delimiters = self.delimiters, _super_nodes = super_nodes) # Make a temporary buffer that is unique for parent # template. buf = BlockNode(name='__include__' + filename, delimiters=self.delimiters) pre = [] # Iterate through each of our nodes for node in self.content.nodes: # If a node is a block if isinstance(node, BlockNode): # That happens to be in the parent template if node.name in t.content.blocks: # Do not include it continue if isinstance(node, Node): # Or if the node was before the extension # we should not include it if node.pre_extend: pre.append(node) continue # Otherwise, it should go int the # Parent templates {{include}} section. buf.append(node) else: buf.append(node) # Clear our current nodes. We will be replacing this with # the parent nodes. self.content.nodes = [] # Set our include, unique by filename t.content.blocks['__include__' + filename] = buf # Make sure our pre_extended nodes go first t.content.insert(pre) # Then we extend our blocks t.content.extend(self.content) # Work off the parent node. self.content = t.content def parse(self, text): # Basically, r_tag.split will split the text into # an array containing, 'non-tag', 'tag', 'non-tag', 'tag' # so if we alternate this variable, we know # what to look for. This is alternate to # line.startswith("{{") in_tag = False extend = None pre_extend = True # Use a list to store everything in # This is because later the code will "look ahead" # for missing strings or brackets. ij = self.r_tag.split(text) # j = current index # i = current item for j in range(len(ij)): i = ij[j] if i: if len(self.stack) == 0: self._raise_error('The "end" tag is unmatched, please check if you have a starting "block" tag') # Our current element in the stack. top = self.stack[-1] if in_tag: line = i # If we are missing any strings!!!! # This usually happens with the following example # template code # # {{a = '}}'}} # or # {{a = '}}blahblah{{'}} # # This will fix these # This is commented out because the current template # system has this same limitation. Since this has a # performance hit on larger templates, I do not recommend # using this code on production systems. This is still here # for "i told you it *can* be fixed" purposes. # # # if line.count("'") % 2 != 0 or line.count('"') % 2 != 0: # # # Look ahead # la = 1 # nextline = ij[j+la] # # # As long as we have not found our ending # # brackets keep going # while '}}' not in nextline: # la += 1 # nextline += ij[j+la] # # clear this line, so we # # don't attempt to parse it # # this is why there is an "if i" # # around line 530 # ij[j+la] = '' # # # retrieve our index. # index = nextline.index('}}') # # # Everything before the new brackets # before = nextline[:index+2] # # # Everything after # after = nextline[index+2:] # # # Make the next line everything after # # so it parses correctly, this *should* be # # all html # ij[j+1] = after # # # Add everything before to the current line # line += before # Get rid of '{{' and '}}' line = line[2:-2].strip() # This is bad juju, but let's do it anyway if not line: continue # We do not want to replace the newlines in code, # only in block comments. def remove_newline(re_val): # Take the entire match and replace newlines with # escaped newlines. return re_val.group(0).replace('\n', '\\n') # Perform block comment escaping. # This performs escaping ON anything # in between """ and """ line = re.sub(TemplateParser.r_multiline, remove_newline, line) if line.startswith('='): # IE: {{=response.title}} name, value = '=', line[1:].strip() else: v = line.split(' ', 1) if len(v) == 1: # Example # {{ include }} # {{ end }} name = v[0] value = '' else: # Example # {{ block pie }} # {{ include "layout.html" }} # {{ for i in range(10): }} name = v[0] value = v[1] # This will replace newlines in block comments # with the newline character. This is so that they # retain their formatting, but squish down to one # line in the rendered template. # First check if we have any custom lexers if name in self.lexers: # Pass the information to the lexer # and allow it to inject in the environment # You can define custom names such as # '{{<<variable}}' which could potentially # write unescaped version of the variable. self.lexers[name](parser = self, value = value, top = top, stack = self.stack,) elif name == '=': # So we have a variable to insert into # the template buf = "\n%s(%s)" % (self.writer, value) top.append(Node(buf, pre_extend = pre_extend)) elif name == 'block' and not value.startswith('='): # Make a new node with name. node = BlockNode(name = value.strip(), pre_extend = pre_extend, delimiters = self.delimiters) # Append this node to our active node top.append(node) # Make sure to add the node to the stack. # so anything after this gets added # to this node. This allows us to # "nest" nodes. self.stack.append(node) elif name == 'end' and not value.startswith('='): # We are done with this node. # Save an instance of it self.blocks[top.name] = top # Pop it. self.stack.pop() elif name == 'super' and not value.startswith('='): # Get our correct target name # If they just called {{super}} without a name # attempt to assume the top blocks name. if value: target_node = value else: target_node = top.name # Create a SuperNode instance node = SuperNode(name = target_node, pre_extend = pre_extend) # Add this to our list to be taken care of self.super_nodes.append(node) # And put in in the tree top.append(node) elif name == 'include' and not value.startswith('='): # If we know the target file to include if value: self.include(top, value) # Otherwise, make a temporary include node # That the child node will know to hook into. else: include_node = BlockNode(name = '__include__' + self.name, pre_extend = pre_extend, delimiters = self.delimiters) top.append(include_node) elif name == 'extend' and not value.startswith('='): # We need to extend the following # template. extend = value pre_extend = False else: # If we don't know where it belongs # we just add it anyways without formatting. if line and in_tag: # Split on the newlines >.< tokens = line.split('\n') # We need to look for any instances of # for i in range(10): # = i # pass # So we can properly put a response.write() in place. continuation = False len_parsed = 0 for k in range(len(tokens)): tokens[k] = tokens[k].strip() len_parsed += len(tokens[k]) if tokens[k].startswith('='): if tokens[k].endswith('\\'): continuation = True tokens[k] = "\n%s(%s" % (self.writer, tokens[k][1:].strip()) else: tokens[k] = "\n%s(%s)" % (self.writer, tokens[k][1:].strip()) elif continuation: tokens[k] += ')' continuation = False buf = "\n%s" % '\n'.join(tokens) top.append(Node(buf, pre_extend = pre_extend)) else: # It is HTML so just include it. buf = "\n%s(%r, escape=False)" % (self.writer, i) top.append(Node(buf, pre_extend = pre_extend)) # Remember: tag, not tag, tag, not tag in_tag = not in_tag # Make a list of items to remove from child to_rm = [] # Go through each of the children nodes for node in self.child_super_nodes: # If we declared a block that this node wants to include if node.name in self.blocks: # Go ahead and include it! node.value = self.blocks[node.name] # Since we processed this child, we don't need to # pass it along to the parent to_rm.append(node) # Remove some of the processed nodes for node in to_rm: # Since this is a pointer, it works beautifully. # Sometimes I miss C-Style pointers... I want my asterisk... self.child_super_nodes.remove(node) # If we need to extend a template. if extend: self.extend(extend) # We need this for integration with gluon def parse_template(filename, path = 'views/', context = dict(), lexers = {}, delimiters = ('{{','}}') ): """ filename can be a view filename in the views folder or an input stream path is the path of a views folder context is a dictionary of symbols used to render the template """ # First, if we have a str try to open the file if isinstance(filename, str): try: fp = open(os.path.join(path, filename), 'rb') text = fp.read() fp.close() except IOError: raise RestrictedError(filename, '', 'Unable to find the file') else: text = filename.read() # Use the file contents to get a parsed template and return it. return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters)) def get_parsed(text): """ Returns the indented python code of text. Useful for unit testing. """ return str(TemplateParser(text)) # And this is a generic render function. # Here for integration with gluon. def render(content = "hello world", stream = None, filename = None, path = None, context = {}, lexers = {}, delimiters = ('{{','}}') ): """ >>> render() 'hello world' >>> render(content='abc') 'abc' >>> render(content='abc\\'') "abc'" >>> render(content='a"\\'bc') 'a"\\'bc' >>> render(content='a\\nbc') 'a\\nbc' >>> render(content='a"bcd"e') 'a"bcd"e' >>> render(content="'''a\\nc'''") "'''a\\nc'''" >>> render(content="'''a\\'c'''") "'''a\'c'''" >>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5)) '0<br />1<br />2<br />3<br />4<br />' >>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}')) '0<br />1<br />2<br />3<br />4<br />' >>> render(content="{{='''hello\\nworld'''}}") 'hello\\nworld' >>> render(content='{{for i in range(3):\\n=i\\npass}}') '012' """ # Here to avoid circular Imports try: from globals import Response except: # Working standalone. Build a mock Response object. class Response(): def __init__(self): self.body = cStringIO.StringIO() def write(self, data, escape=True): if not escape: self.body.write(str(data)) elif hasattr(data,'xml') and callable(data.xml): self.body.write(data.xml()) else: # make it a string if not isinstance(data, (str, unicode)): data = str(data) elif isinstance(data, unicode): data = data.encode('utf8', 'xmlcharrefreplace') data = cgi.escape(data, True).replace("'","&#x27;") self.body.write(data) # A little helper to avoid escaping. class NOESCAPE(): def __init__(self, text): self.text = text def xml(self): return self.text # Add it to the context so we can use it. context['NOESCAPE'] = NOESCAPE # If we don't have anything to render, why bother? if not content and not stream and not filename: raise SyntaxError, "Must specify a stream or filename or content" # Here for legacy purposes, probably can be reduced to something more simple. close_stream = False if not stream: if filename: stream = open(filename, 'rb') close_stream = True elif content: stream = cStringIO.StringIO(content) # Get a response class. context['response'] = Response() # Execute the template. code = str(TemplateParser(stream.read(), context=context, path=path, lexers=lexers, delimiters=delimiters)) try: exec(code) in context except Exception: # for i,line in enumerate(code.split('\n')): print i,line raise if close_stream: stream.close() # Returned the rendered content. return context['response'].body.getvalue() if __name__ == '__main__': import doctest doctest.testmod()
Python
import codecs, encodings """Caller will hand this library a buffer and ask it to either convert it or auto-detect the type. Based on http://code.activestate.com/recipes/52257/ Licensed under the PSF License """ # None represents a potentially variable byte. "##" in the XML spec... autodetect_dict={ # bytepattern : ("name", (0x00, 0x00, 0xFE, 0xFF) : ("ucs4_be"), (0xFF, 0xFE, 0x00, 0x00) : ("ucs4_le"), (0xFE, 0xFF, None, None) : ("utf_16_be"), (0xFF, 0xFE, None, None) : ("utf_16_le"), (0x00, 0x3C, 0x00, 0x3F) : ("utf_16_be"), (0x3C, 0x00, 0x3F, 0x00) : ("utf_16_le"), (0x3C, 0x3F, 0x78, 0x6D): ("utf_8"), (0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC") } def autoDetectXMLEncoding(buffer): """ buffer -> encoding_name The buffer should be at least 4 bytes long. Returns None if encoding cannot be detected. Note that encoding_name might not have an installed decoder (e.g. EBCDIC) """ # a more efficient implementation would not decode the whole # buffer at once but otherwise we'd have to decode a character at # a time looking for the quote character...that's a pain encoding = "utf_8" # according to the XML spec, this is the default # this code successively tries to refine the default # whenever it fails to refine, it falls back to # the last place encoding was set. if len(buffer)>=4: bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4])) enc_info = autodetect_dict.get(bytes, None) if not enc_info: # try autodetection again removing potentially # variable bytes bytes = (byte1, byte2, None, None) enc_info = autodetect_dict.get(bytes) else: enc_info = None if enc_info: encoding = enc_info # we've got a guess... these are #the new defaults # try to find a more precise encoding using xml declaration secret_decoder_ring = codecs.lookup(encoding)[1] (decoded,length) = secret_decoder_ring(buffer) first_line = decoded.split("\n")[0] if first_line and first_line.startswith(u"<?xml"): encoding_pos = first_line.find(u"encoding") if encoding_pos!=-1: # look for double quote quote_pos=first_line.find('"', encoding_pos) if quote_pos==-1: # look for single quote quote_pos=first_line.find("'", encoding_pos) if quote_pos>-1: quote_char,rest=(first_line[quote_pos], first_line[quote_pos+1:]) encoding=rest[:rest.find(quote_char)] return encoding def decoder(buffer): encoding = autoDetectXMLEncoding(buffer) return buffer.decode(encoding).encode('utf8')
Python
# encoding utf-8 __author__ = "Thadeus Burgess <thadeusb@thadeusb.com>" # we classify as "non-reserved" those key words that are explicitly known # to the parser but are allowed as column or table names. Some key words # that are otherwise non-reserved cannot be used as function or data type n # ames and are in the nonreserved list. (Most of these words represent # built-in functions or data types with special syntax. The function # or type is still available but it cannot be redefined by the user.) # Labeled "reserved" are those tokens that are not allowed as column or # table names. Some reserved key words are allowable as names for # functions or data typesself. # Note at the bottom of the list is a dict containing references to the # tuples, and also if you add a list don't forget to remove its default # set of COMMON. # Keywords that are adapter specific. Such as a list of "postgresql" # or "mysql" keywords # These are keywords that are common to all SQL dialects, and should # never be used as a table or column. Even if you use one of these # the cursor will throw an OperationalError for the SQL syntax. COMMON = set(( 'SELECT', 'INSERT', 'DELETE', 'UPDATE', 'DROP', 'CREATE', 'ALTER', 'WHERE', 'FROM', 'INNER', 'JOIN', 'AND', 'OR', 'LIKE', 'ON', 'IN', 'SET', 'BY', 'GROUP', 'ORDER', 'LEFT', 'OUTER', 'IF', 'END', 'THEN', 'LOOP', 'AS', 'ELSE', 'FOR', 'CASE', 'WHEN', 'MIN', 'MAX', 'DISTINCT', )) POSTGRESQL = set(( 'FALSE', 'TRUE', 'ALL', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', 'ASYMMETRIC', 'AUTHORIZATION', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BOOLEAN', 'BOTH', 'CASE', 'CAST', 'CHAR', 'CHARACTER', 'CHECK', 'COALESCE', 'COLLATE', 'COLUMN', 'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_ROLE', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'DEC', 'DECIMAL', 'DEFAULT', 'DEFERRABLE', 'DESC', 'DISTINCT', 'DO', 'ELSE', 'END', 'EXCEPT', 'EXISTS', 'EXTRACT', 'FETCH', 'FLOAT', 'FOR', 'FOREIGN', 'FREEZE', 'FROM', 'FULL', 'GRANT', 'GREATEST', 'GROUP', 'HAVING', 'ILIKE', 'IN', 'INITIALLY', 'INNER', 'INOUT', 'INT', 'INTEGER', 'INTERSECT', 'INTERVAL', 'INTO', 'IS', 'ISNULL', 'JOIN', 'LEADING', 'LEAST', 'LEFT', 'LIKE', 'LIMIT', 'LOCALTIME', 'LOCALTIMESTAMP', 'NATIONAL', 'NATURAL', 'NCHAR', 'NEW', 'NONE', 'NOT', 'NOTNULL', 'NULL', 'NULLIF', 'NUMERIC', 'OFF', 'OFFSET', 'OLD', 'ON', 'ONLY', 'OR', 'ORDER', 'OUT', 'OUTER', 'OVERLAPS', 'OVERLAY', 'PLACING', 'POSITION', 'PRECISION', 'PRIMARY', 'REAL', 'REFERENCES', 'RETURNING', 'RIGHT', 'ROW', 'SELECT', 'SESSION_USER', 'SETOF', 'SIMILAR', 'SMALLINT', 'SOME', 'SUBSTRING', 'SYMMETRIC', 'TABLE', 'THEN', 'TIME', 'TIMESTAMP', 'TO', 'TRAILING', 'TREAT', 'TRIM', 'UNION', 'UNIQUE', 'USER', 'USING', 'VALUES', 'VARCHAR', 'VARIADIC', 'VERBOSE', 'WHEN', 'WHERE', 'WITH', 'XMLATTRIBUTES', 'XMLCONCAT', 'XMLELEMENT', 'XMLFOREST', 'XMLPARSE', 'XMLPI', 'XMLROOT', 'XMLSERIALIZE', )) POSTGRESQL_NONRESERVED = set(( 'A', 'ABORT', 'ABS', 'ABSENT', 'ABSOLUTE', 'ACCESS', 'ACCORDING', 'ACTION', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', 'ALIAS', 'ALLOCATE', 'ALSO', 'ALTER', 'ALWAYS', 'ARE', 'ARRAY_AGG', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'AT', 'ATOMIC', 'ATTRIBUTE', 'ATTRIBUTES', 'AVG', 'BACKWARD', 'BASE64', 'BEFORE', 'BEGIN', 'BERNOULLI', 'BIT_LENGTH', 'BITVAR', 'BLOB', 'BOM', 'BREADTH', 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', 'CASCADE', 'CASCADED', 'CATALOG', 'CATALOG_NAME', 'CEIL', 'CEILING', 'CHAIN', 'CHAR_LENGTH', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG', 'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHARACTERISTICS', 'CHARACTERS', 'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE', 'CLUSTER', 'COBOL', 'COLLATION', 'COLLATION_CATALOG', 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLLECT', 'COLUMN_NAME', 'COLUMNS', 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPLETION', 'CONCURRENTLY', 'CONDITION', 'CONDITION_NUMBER', 'CONFIGURATION', 'CONNECT', 'CONNECTION', 'CONNECTION_NAME', 'CONSTRAINT_CATALOG', 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTAINS', 'CONTENT', 'CONTINUE', 'CONVERSION', 'CONVERT', 'COPY', 'CORR', 'CORRESPONDING', 'COST', 'COUNT', 'COVAR_POP', 'COVAR_SAMP', 'CREATEDB', 'CREATEROLE', 'CREATEUSER', 'CSV', 'CUBE', 'CUME_DIST', 'CURRENT', 'CURRENT_DEFAULT_TRANSFORM_GROUP', 'CURRENT_PATH', 'CURRENT_TRANSFORM_GROUP_FOR_TYPE', 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY', 'DEALLOCATE', 'DECLARE', 'DEFAULTS', 'DEFERRED', 'DEFINED', 'DEFINER', 'DEGREE', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DENSE_RANK', 'DEPTH', 'DEREF', 'DERIVED', 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC', 'DIAGNOSTICS', 'DICTIONARY', 'DISABLE', 'DISCARD', 'DISCONNECT', 'DISPATCH', 'DOCUMENT', 'DOMAIN', 'DOUBLE', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE', 'EACH', 'ELEMENT', 'EMPTY', 'ENABLE', 'ENCODING', 'ENCRYPTED', 'END-EXEC', 'ENUM', 'EQUALS', 'ESCAPE', 'EVERY', 'EXCEPTION', 'EXCLUDE', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING', 'EXP', 'EXPLAIN', 'EXTERNAL', 'FAMILY', 'FILTER', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLAG', 'FLOOR', 'FOLLOWING', 'FORCE', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FUNCTION', 'FUSION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANTED', 'GROUPING', 'HANDLER', 'HEADER', 'HEX', 'HIERARCHY', 'HOLD', 'HOST', 'HOUR', # 'ID', 'IDENTITY', 'IF', 'IGNORE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXES', 'INDICATOR', 'INFIX', 'INHERIT', 'INHERITS', 'INITIALIZE', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANCE', 'INSTANTIABLE', 'INSTEAD', 'INTERSECTION', 'INVOKER', 'ISOLATION', 'ITERATE', 'K', 'KEY', 'KEY_MEMBER', 'KEY_TYPE', 'LAG', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST', 'LAST_VALUE', 'LATERAL', 'LC_COLLATE', 'LC_CTYPE', 'LEAD', 'LENGTH', 'LESS', 'LEVEL', 'LIKE_REGEX', 'LISTEN', 'LN', 'LOAD', 'LOCAL', 'LOCATION', 'LOCATOR', 'LOCK', 'LOGIN', 'LOWER', 'M', 'MAP', 'MAPPING', 'MATCH', 'MATCHED', 'MAX', 'MAX_CARDINALITY', 'MAXVALUE', 'MEMBER', 'MERGE', 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', 'MESSAGE_TEXT', 'METHOD', 'MIN', 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', 'MODIFY', 'MODULE', 'MONTH', 'MORE', 'MOVE', 'MULTISET', 'MUMPS', # 'NAME', 'NAMES', 'NAMESPACE', 'NCLOB', 'NESTING', 'NEXT', 'NFC', 'NFD', 'NFKC', 'NFKD', 'NIL', 'NO', 'NOCREATEDB', 'NOCREATEROLE', 'NOCREATEUSER', 'NOINHERIT', 'NOLOGIN', 'NORMALIZE', 'NORMALIZED', 'NOSUPERUSER', 'NOTHING', 'NOTIFY', 'NOWAIT', 'NTH_VALUE', 'NTILE', 'NULLABLE', 'NULLS', 'NUMBER', 'OBJECT', 'OCCURRENCES_REGEX', 'OCTET_LENGTH', 'OCTETS', 'OF', 'OIDS', 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', 'ORDERING', 'ORDINALITY', 'OTHERS', 'OUTPUT', 'OVER', 'OVERRIDING', 'OWNED', 'OWNER', 'P', 'PAD', 'PARAMETER', 'PARAMETER_MODE', 'PARAMETER_NAME', 'PARAMETER_ORDINAL_POSITION', 'PARAMETER_SPECIFIC_CATALOG', 'PARAMETER_SPECIFIC_NAME', 'PARAMETER_SPECIFIC_SCHEMA', 'PARAMETERS', 'PARSER', 'PARTIAL', 'PARTITION', 'PASCAL', 'PASSING', # 'PASSWORD', 'PATH', 'PERCENT_RANK', 'PERCENTILE_CONT', 'PERCENTILE_DISC', 'PLANS', 'PLI', 'POSITION_REGEX', 'POSTFIX', 'POWER', 'PRECEDING', 'PREFIX', 'PREORDER', 'PREPARE', 'PREPARED', 'PRESERVE', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'PUBLIC', 'QUOTE', 'RANGE', 'RANK', 'READ', 'READS', 'REASSIGN', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCING', 'REGR_AVGX', 'REGR_AVGY', 'REGR_COUNT', 'REGR_INTERCEPT', 'REGR_R2', 'REGR_SLOPE', 'REGR_SXX', 'REGR_SXY', 'REGR_SYY', 'REINDEX', 'RELATIVE', 'RELEASE', 'RENAME', 'REPEATABLE', 'REPLACE', 'REPLICA', 'RESET', 'RESPECT', 'RESTART', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNED_CARDINALITY', 'RETURNED_LENGTH', 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE', 'RETURNS', 'REVOKE', # 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINE_CATALOG', 'ROUTINE_NAME', 'ROUTINE_SCHEMA', 'ROW_COUNT', 'ROW_NUMBER', 'ROWS', 'RULE', 'SAVEPOINT', 'SCALE', 'SCHEMA', 'SCHEMA_NAME', 'SCOPE', 'SCOPE_CATALOG', 'SCOPE_NAME', 'SCOPE_SCHEMA', 'SCROLL', 'SEARCH', 'SECOND', 'SECTION', 'SECURITY', 'SELF', 'SENSITIVE', 'SEQUENCE', 'SERIALIZABLE', 'SERVER', 'SERVER_NAME', 'SESSION', 'SET', 'SETS', 'SHARE', 'SHOW', 'SIMPLE', 'SIZE', 'SOURCE', 'SPACE', 'SPECIFIC', 'SPECIFIC_NAME', 'SPECIFICTYPE', 'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SQRT', 'STABLE', 'STANDALONE', 'START', 'STATE', 'STATEMENT', 'STATIC', 'STATISTICS', 'STDDEV_POP', 'STDDEV_SAMP', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRIP', 'STRUCTURE', 'STYLE', 'SUBCLASS_ORIGIN', 'SUBLIST', 'SUBMULTISET', 'SUBSTRING_REGEX', 'SUM', 'SUPERUSER', 'SYSID', 'SYSTEM', 'SYSTEM_USER', 'T', # 'TABLE_NAME', 'TABLESAMPLE', 'TABLESPACE', 'TEMP', 'TEMPLATE', 'TEMPORARY', 'TERMINATE', 'TEXT', 'THAN', 'TIES', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TOP_LEVEL_COUNT', 'TRANSACTION', 'TRANSACTION_ACTIVE', 'TRANSACTIONS_COMMITTED', 'TRANSACTIONS_ROLLED_BACK', 'TRANSFORM', 'TRANSFORMS', 'TRANSLATE', 'TRANSLATE_REGEX', 'TRANSLATION', 'TRIGGER', 'TRIGGER_CATALOG', 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM_ARRAY', 'TRUNCATE', 'TRUSTED', 'TYPE', 'UESCAPE', 'UNBOUNDED', 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED', 'UNKNOWN', 'UNLISTEN', 'UNNAMED', 'UNNEST', 'UNTIL', 'UNTYPED', 'UPDATE', 'UPPER', 'URI', 'USAGE', 'USER_DEFINED_TYPE_CATALOG', 'USER_DEFINED_TYPE_CODE', 'USER_DEFINED_TYPE_NAME', 'USER_DEFINED_TYPE_SCHEMA', 'VACUUM', 'VALID', 'VALIDATOR', 'VALUE', 'VAR_POP', 'VAR_SAMP', 'VARBINARY', 'VARIABLE', 'VARYING', 'VERSION', 'VIEW', 'VOLATILE', 'WHENEVER', 'WHITESPACE', 'WIDTH_BUCKET', 'WINDOW', 'WITHIN', 'WITHOUT', 'WORK', 'WRAPPER', 'WRITE', 'XML', 'XMLAGG', 'XMLBINARY', 'XMLCAST', 'XMLCOMMENT', 'XMLDECLARATION', 'XMLDOCUMENT', 'XMLEXISTS', 'XMLITERATE', 'XMLNAMESPACES', 'XMLQUERY', 'XMLSCHEMA', 'XMLTABLE', 'XMLTEXT', 'XMLVALIDATE', 'YEAR', 'YES', 'ZONE', )) #Thanks villas FIREBIRD = set(( 'ABS', 'ACTIVE', 'ADMIN', 'AFTER', 'ASCENDING', 'AUTO', 'AUTODDL', 'BASED', 'BASENAME', 'BASE_NAME', 'BEFORE', 'BIT_LENGTH', 'BLOB', 'BLOBEDIT', 'BOOLEAN', 'BOTH', 'BUFFER', 'CACHE', 'CHAR_LENGTH', 'CHARACTER_LENGTH', 'CHECK_POINT_LEN', 'CHECK_POINT_LENGTH', 'CLOSE', 'COMMITTED', 'COMPILETIME', 'COMPUTED', 'CONDITIONAL', 'CONNECT', 'CONTAINING', 'CROSS', 'CSTRING', 'CURRENT_CONNECTION', 'CURRENT_ROLE', 'CURRENT_TRANSACTION', 'CURRENT_USER', 'DATABASE', 'DB_KEY', 'DEBUG', 'DESCENDING', 'DISCONNECT', 'DISPLAY', 'DO', 'ECHO', 'EDIT', 'ENTRY_POINT', 'EVENT', 'EXIT', 'EXTERN', 'FALSE', 'FETCH', 'FILE', 'FILTER', 'FREE_IT', 'FUNCTION', 'GDSCODE', 'GENERATOR', 'GEN_ID', 'GLOBAL', 'GROUP_COMMIT_WAIT', 'GROUP_COMMIT_WAIT_TIME', 'HELP', 'IF', 'INACTIVE', 'INDEX', 'INIT', 'INPUT_TYPE', 'INSENSITIVE', 'ISQL', 'LC_MESSAGES', 'LC_TYPE', 'LEADING', 'LENGTH', 'LEV', 'LOGFILE', 'LOG_BUFFER_SIZE', 'LOG_BUF_SIZE', 'LONG', 'LOWER', 'MANUAL', 'MAXIMUM', 'MAXIMUM_SEGMENT', 'MAX_SEGMENT', 'MERGE', 'MESSAGE', 'MINIMUM', 'MODULE_NAME', 'NOAUTO', 'NUM_LOG_BUFS', 'NUM_LOG_BUFFERS', 'OCTET_LENGTH', 'OPEN', 'OUTPUT_TYPE', 'OVERFLOW', 'PAGE', 'PAGELENGTH', 'PAGES', 'PAGE_SIZE', 'PARAMETER', # 'PASSWORD', 'PLAN', 'POST_EVENT', 'QUIT', 'RAW_PARTITIONS', 'RDB$DB_KEY', 'RECORD_VERSION', 'RECREATE', 'RECURSIVE', 'RELEASE', 'RESERV', 'RESERVING', 'RETAIN', 'RETURN', 'RETURNING_VALUES', 'RETURNS', # 'ROLE', 'ROW_COUNT', 'ROWS', 'RUNTIME', 'SAVEPOINT', 'SECOND', 'SENSITIVE', 'SHADOW', 'SHARED', 'SHELL', 'SHOW', 'SINGULAR', 'SNAPSHOT', 'SORT', 'STABILITY', 'START', 'STARTING', 'STARTS', 'STATEMENT', 'STATIC', 'STATISTICS', 'SUB_TYPE', 'SUSPEND', 'TERMINATOR', 'TRAILING', 'TRIGGER', 'TRIM', 'TRUE', 'TYPE', 'UNCOMMITTED', 'UNKNOWN', 'USING', 'VARIABLE', 'VERSION', 'WAIT', 'WEEKDAY', 'WHILE', 'YEARDAY', )) FIREBIRD_NONRESERVED = set(( 'BACKUP', 'BLOCK', 'COALESCE', 'COLLATION', 'COMMENT', 'DELETING', 'DIFFERENCE', 'IIF', 'INSERTING', 'LAST', 'LEAVE', 'LOCK', 'NEXT', 'NULLIF', 'NULLS', 'RESTART', 'RETURNING', 'SCALAR_ARRAY', 'SEQUENCE', 'STATEMENT', 'UPDATING', 'ABS', 'ACCENT', 'ACOS', 'ALWAYS', 'ASCII_CHAR', 'ASCII_VAL', 'ASIN', 'ATAN', 'ATAN2', 'BACKUP', 'BIN_AND', 'BIN_OR', 'BIN_SHL', 'BIN_SHR', 'BIN_XOR', 'BLOCK', 'CEIL', 'CEILING', 'COLLATION', 'COMMENT', 'COS', 'COSH', 'COT', 'DATEADD', 'DATEDIFF', 'DECODE', 'DIFFERENCE', 'EXP', 'FLOOR', 'GEN_UUID', 'GENERATED', 'HASH', 'IIF', 'LIST', 'LN', 'LOG', 'LOG10', 'LPAD', 'MATCHED', 'MATCHING', 'MAXVALUE', 'MILLISECOND', 'MINVALUE', 'MOD', 'NEXT', 'OVERLAY', 'PAD', 'PI', 'PLACING', 'POWER', 'PRESERVE', 'RAND', 'REPLACE', 'RESTART', 'RETURNING', 'REVERSE', 'ROUND', 'RPAD', 'SCALAR_ARRAY', 'SEQUENCE', 'SIGN', 'SIN', 'SINH', 'SPACE', 'SQRT', 'TAN', 'TANH', 'TEMPORARY', 'TRUNC', 'WEEK', )) # Thanks Jonathan Lundell MYSQL = set(( 'ACCESSIBLE', 'ADD', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'AS', 'ASC', 'ASENSITIVE', 'BEFORE', 'BETWEEN', 'BIGINT', 'BINARY', 'BLOB', 'BOTH', 'BY', 'CALL', 'CASCADE', 'CASE', 'CHANGE', 'CHAR', 'CHARACTER', 'CHECK', 'COLLATE', 'COLUMN', 'CONDITION', 'CONSTRAINT', 'CONTINUE', 'CONVERT', 'CREATE', 'CROSS', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'DATABASE', 'DATABASES', 'DAY_HOUR', 'DAY_MICROSECOND', 'DAY_MINUTE', 'DAY_SECOND', 'DEC', 'DECIMAL', 'DECLARE', 'DEFAULT', 'DELAYED', 'DELETE', 'DESC', 'DESCRIBE', 'DETERMINISTIC', 'DISTINCT', 'DISTINCTROW', 'DIV', 'DOUBLE', 'DROP', 'DUAL', 'EACH', 'ELSE', 'ELSEIF', 'ENCLOSED', 'ESCAPED', 'EXISTS', 'EXIT', 'EXPLAIN', 'FALSE', 'FETCH', 'FLOAT', 'FLOAT4', 'FLOAT8', 'FOR', 'FORCE', 'FOREIGN', 'FROM', 'FULLTEXT', 'GRANT', 'GROUP', 'HAVING', 'HIGH_PRIORITY', 'HOUR_MICROSECOND', 'HOUR_MINUTE', 'HOUR_SECOND', 'IF', 'IGNORE', 'IGNORE_SERVER_IDS', 'IGNORE_SERVER_IDS', 'IN', 'INDEX', 'INFILE', 'INNER', 'INOUT', 'INSENSITIVE', 'INSERT', 'INT', 'INT1', 'INT2', 'INT3', 'INT4', 'INT8', 'INTEGER', 'INTERVAL', 'INTO', 'IS', 'ITERATE', 'JOIN', 'KEY', 'KEYS', 'KILL', 'LEADING', 'LEAVE', 'LEFT', 'LIKE', 'LIMIT', 'LINEAR', 'LINES', 'LOAD', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCK', 'LONG', 'LONGBLOB', 'LONGTEXT', 'LOOP', 'LOW_PRIORITY', 'MASTER_HEARTBEAT_PERIOD', 'MASTER_HEARTBEAT_PERIOD', 'MASTER_SSL_VERIFY_SERVER_CERT', 'MATCH', 'MAXVALUE', 'MAXVALUE', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'MIDDLEINT', 'MINUTE_MICROSECOND', 'MINUTE_SECOND', 'MOD', 'MODIFIES', 'NATURAL', 'NO_WRITE_TO_BINLOG', 'NOT', 'NULL', 'NUMERIC', 'ON', 'OPTIMIZE', 'OPTION', 'OPTIONALLY', 'OR', 'ORDER', 'OUT', 'OUTER', 'OUTFILE', 'PRECISION', 'PRIMARY', 'PROCEDURE', 'PURGE', 'RANGE', 'READ', 'READ_WRITE', 'READS', 'REAL', 'REFERENCES', 'REGEXP', 'RELEASE', 'RENAME', 'REPEAT', 'REPLACE', 'REQUIRE', 'RESIGNAL', 'RESIGNAL', 'RESTRICT', 'RETURN', 'REVOKE', 'RIGHT', 'RLIKE', 'SCHEMA', 'SCHEMAS', 'SECOND_MICROSECOND', 'SELECT', 'SENSITIVE', 'SEPARATOR', 'SET', 'SHOW', 'SIGNAL', 'SIGNAL', 'SMALLINT', 'SPATIAL', 'SPECIFIC', 'SQL', 'SQL_BIG_RESULT', 'SQL_CALC_FOUND_ROWS', 'SQL_SMALL_RESULT', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SSL', 'STARTING', 'STRAIGHT_JOIN', 'TABLE', 'TERMINATED', 'THEN', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'TO', 'TRAILING', 'TRIGGER', 'TRUE', 'UNDO', 'UNION', 'UNIQUE', 'UNLOCK', 'UNSIGNED', 'UPDATE', 'USAGE', 'USE', 'USING', 'UTC_DATE', 'UTC_TIME', 'UTC_TIMESTAMP', 'VALUES', 'VARBINARY', 'VARCHAR', 'VARCHARACTER', 'VARYING', 'WHEN', 'WHERE', 'WHILE', 'WITH', 'WRITE', 'XOR', 'YEAR_MONTH', 'ZEROFILL', )) MSSQL = set(( 'ADD', 'ALL', 'ALTER', 'AND', 'ANY', 'AS', 'ASC', 'AUTHORIZATION', 'BACKUP', 'BEGIN', 'BETWEEN', 'BREAK', 'BROWSE', 'BULK', 'BY', 'CASCADE', 'CASE', 'CHECK', 'CHECKPOINT', 'CLOSE', 'CLUSTERED', 'COALESCE', 'COLLATE', 'COLUMN', 'COMMIT', 'COMPUTE', 'CONSTRAINT', 'CONTAINS', 'CONTAINSTABLE', 'CONTINUE', 'CONVERT', 'CREATE', 'CROSS', 'CURRENT', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'DATABASE', 'DBCC', 'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DELETE', 'DENY', 'DESC', 'DISK', 'DISTINCT', 'DISTRIBUTED', 'DOUBLE', 'DROP', 'DUMMY', 'DUMP', 'ELSE', 'END', 'ERRLVL', 'ESCAPE', 'EXCEPT', 'EXEC', 'EXECUTE', 'EXISTS', 'EXIT', 'FETCH', 'FILE', 'FILLFACTOR', 'FOR', 'FOREIGN', 'FREETEXT', 'FREETEXTTABLE', 'FROM', 'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'HAVING', 'HOLDLOCK', 'IDENTITY', 'IDENTITY_INSERT', 'IDENTITYCOL', 'IF', 'IN', 'INDEX', 'INNER', 'INSERT', 'INTERSECT', 'INTO', 'IS', 'JOIN', 'KEY', 'KILL', 'LEFT', 'LIKE', 'LINENO', 'LOAD', 'NATIONAL ', 'NOCHECK', 'NONCLUSTERED', 'NOT', 'NULL', 'NULLIF', 'OF', 'OFF', 'OFFSETS', 'ON', 'OPEN', 'OPENDATASOURCE', 'OPENQUERY', 'OPENROWSET', 'OPENXML', 'OPTION', 'OR', 'ORDER', 'OUTER', 'OVER', 'PERCENT', 'PLAN', 'PRECISION', 'PRIMARY', 'PRINT', 'PROC', 'PROCEDURE', 'PUBLIC', 'RAISERROR', 'READ', 'READTEXT', 'RECONFIGURE', 'REFERENCES', 'REPLICATION', 'RESTORE', 'RESTRICT', 'RETURN', 'REVOKE', 'RIGHT', 'ROLLBACK', 'ROWCOUNT', 'ROWGUIDCOL', 'RULE', 'SAVE', 'SCHEMA', 'SELECT', 'SESSION_USER', 'SET', 'SETUSER', 'SHUTDOWN', 'SOME', 'STATISTICS', 'SYSTEM_USER', 'TABLE', 'TEXTSIZE', 'THEN', 'TO', 'TOP', 'TRAN', 'TRANSACTION', 'TRIGGER', 'TRUNCATE', 'TSEQUAL', 'UNION', 'UNIQUE', 'UPDATE', 'UPDATETEXT', 'USE', 'USER', 'VALUES', 'VARYING', 'VIEW', 'WAITFOR', 'WHEN', 'WHERE', 'WHILE', 'WITH', 'WRITETEXT', )) ORACLE = set(( 'ACCESS', 'ADD', 'ALL', 'ALTER', 'AND', 'ANY', 'AS', 'ASC', 'AUDIT', 'BETWEEN', 'BY', 'CHAR', 'CHECK', 'CLUSTER', 'COLUMN', 'COMMENT', 'COMPRESS', 'CONNECT', 'CREATE', 'CURRENT', 'DATE', 'DECIMAL', 'DEFAULT', 'DELETE', 'DESC', 'DISTINCT', 'DROP', 'ELSE', 'EXCLUSIVE', 'EXISTS', 'FILE', 'FLOAT', 'FOR', 'FROM', 'GRANT', 'GROUP', 'HAVING', 'IDENTIFIED', 'IMMEDIATE', 'IN', 'INCREMENT', 'INDEX', 'INITIAL', 'INSERT', 'INTEGER', 'INTERSECT', 'INTO', 'IS', 'LEVEL', 'LIKE', 'LOCK', 'LONG', 'MAXEXTENTS', 'MINUS', 'MLSLABEL', 'MODE', 'MODIFY', 'NOAUDIT', 'NOCOMPRESS', 'NOT', 'NOWAIT', 'NULL', 'NUMBER', 'OF', 'OFFLINE', 'ON', 'ONLINE', 'OPTION', 'OR', 'ORDER', 'PCTFREE', 'PRIOR', 'PRIVILEGES', 'PUBLIC', 'RAW', 'RENAME', 'RESOURCE', 'REVOKE', 'ROW', 'ROWID', 'ROWNUM', 'ROWS', 'SELECT', 'SESSION', 'SET', 'SHARE', 'SIZE', 'SMALLINT', 'START', 'SUCCESSFUL', 'SYNONYM', 'SYSDATE', 'TABLE', 'THEN', 'TO', 'TRIGGER', 'UID', 'UNION', 'UNIQUE', 'UPDATE', 'USER', 'VALIDATE', 'VALUES', 'VARCHAR', 'VARCHAR2', 'VIEW', 'WHENEVER', 'WHERE', 'WITH', )) SQLITE = set(( 'ABORT', 'ACTION', 'ADD', 'AFTER', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'AS', 'ASC', 'ATTACH', 'AUTOINCREMENT', 'BEFORE', 'BEGIN', 'BETWEEN', 'BY', 'CASCADE', 'CASE', 'CAST', 'CHECK', 'COLLATE', 'COLUMN', 'COMMIT', 'CONFLICT', 'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'DATABASE', 'DEFAULT', 'DEFERRABLE', 'DEFERRED', 'DELETE', 'DESC', 'DETACH', 'DISTINCT', 'DROP', 'EACH', 'ELSE', 'END', 'ESCAPE', 'EXCEPT', 'EXCLUSIVE', 'EXISTS', 'EXPLAIN', 'FAIL', 'FOR', 'FOREIGN', 'FROM', 'FULL', 'GLOB', 'GROUP', 'HAVING', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INDEX', 'INDEXED', 'INITIALLY', 'INNER', 'INSERT', 'INSTEAD', 'INTERSECT', 'INTO', 'IS', 'ISNULL', 'JOIN', 'KEY', 'LEFT', 'LIKE', 'LIMIT', 'MATCH', 'NATURAL', 'NO', 'NOT', 'NOTNULL', 'NULL', 'OF', 'OFFSET', 'ON', 'OR', 'ORDER', 'OUTER', 'PLAN', 'PRAGMA', 'PRIMARY', 'QUERY', 'RAISE', 'REFERENCES', 'REGEXP', 'REINDEX', 'RELEASE', 'RENAME', 'REPLACE', 'RESTRICT', 'RIGHT', 'ROLLBACK', 'ROW', 'SAVEPOINT', 'SELECT', 'SET', 'TABLE', 'TEMP', 'TEMPORARY', 'THEN', 'TO', 'TRANSACTION', 'TRIGGER', 'UNION', 'UNIQUE', 'UPDATE', 'USING', 'VACUUM', 'VALUES', 'VIEW', 'VIRTUAL', 'WHEN', 'WHERE', )) # remove from here when you add a list. JDBCSQLITE = SQLITE DB2 = INFORMIX = INGRES = JDBCPOSTGRESQL = COMMON ADAPTERS = { 'sqlite': SQLITE, 'mysql': MYSQL, 'postgres': POSTGRESQL, 'postgres_nonreserved': POSTGRESQL_NONRESERVED, 'oracle': ORACLE, 'mssql': MSSQL, 'mssql2': MSSQL, 'db2': DB2, 'informix': INFORMIX, 'firebird': FIREBIRD, 'firebird_embedded': FIREBIRD, 'firebird_nonreserved': FIREBIRD_NONRESERVED, 'ingres': INGRES, 'ingresu': INGRES, 'jdbc:sqlite': JDBCSQLITE, 'jdbc:postgres': JDBCPOSTGRESQL, 'common': COMMON, } ADAPTERS['all'] = reduce(lambda a,b:a.union(b),(x for x in ADAPTERS.values()))
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>, limodou <limodou@gmail.com> and srackham <srackham@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import os import sys import code import logging import types import re import optparse import glob import fileutils import settings from utils import web2py_uuid from compileapp import build_environment, read_pyc, run_models_in from restricted import RestrictedError from globals import Request, Response, Session from storage import Storage from admin import w2p_unpack logger = logging.getLogger("web2py") def exec_environment( pyfile='', request=None, response=None, session=None, ): """ .. function:: gluon.shell.exec_environment([pyfile=''[, request=Request() [, response=Response[, session=Session()]]]]) Environment builder and module loader. Builds a web2py environment and optionally executes a Python file into the environment. A Storage dictionary containing the resulting environment is returned. The working directory must be web2py root -- this is the web2py default. """ if request==None: request = Request() if response==None: response = Response() if session==None: session = Session() if request.folder is None: mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile) if mo: appname = mo.group('appname') request.folder = os.path.join('applications', appname) else: request.folder = '' env = build_environment(request, response, session, store_current=False) if pyfile: pycfile = pyfile + 'c' if os.path.isfile(pycfile): exec read_pyc(pycfile) in env else: execfile(pyfile, env) return Storage(env) def env( a, import_models=False, c=None, f=None, dir='', extra_request={}, ): """ Return web2py execution environment for application (a), controller (c), function (f). If import_models is True the exec all application models into the environment. extra_request allows you to pass along any extra variables to the request object before your models get executed. This was mainly done to support web2py_utils.test_runner, however you can use it with any wrapper scripts that need access to the web2py environment. """ request = Request() response = Response() session = Session() request.application = a # Populate the dummy environment with sensible defaults. if not dir: request.folder = os.path.join('applications', a) else: request.folder = dir request.controller = c or 'default' request.function = f or 'index' response.view = '%s/%s.html' % (request.controller, request.function) request.env.path_info = '/%s/%s/%s' % (a, c, f) request.env.http_host = '127.0.0.1:8000' request.env.remote_addr = '127.0.0.1' request.env.web2py_runtime_gae = settings.global_settings.web2py_runtime_gae for k,v in extra_request.items(): request[k] = v # Monkey patch so credentials checks pass. def check_credentials(request, other_application='admin'): return True fileutils.check_credentials = check_credentials environment = build_environment(request, response, session) if import_models: try: run_models_in(environment) except RestrictedError, e: sys.stderr.write(e.traceback+'\n') sys.exit(1) environment['__name__'] = '__main__' return environment def exec_pythonrc(): pythonrc = os.environ.get('PYTHONSTARTUP') if pythonrc and os.path.isfile(pythonrc): try: execfile(pythonrc) except NameError: pass def run( appname, plain=False, import_models=False, startfile=None, bpython=False ): """ Start interactive shell or run Python script (startfile) in web2py controller environment. appname is formatted like: a web2py application name a/c exec the controller c into the application environment """ (a, c, f) = parse_path_info(appname) errmsg = 'invalid application name: %s' % appname if not a: die(errmsg) adir = os.path.join('applications', a) if not os.path.exists(adir): if raw_input('application %s does not exist, create (y/n)?' % a).lower() in ['y', 'yes']: os.mkdir(adir) w2p_unpack('welcome.w2p', adir) for subfolder in ['models','views','controllers', 'databases', 'modules','cron','errors','sessions', 'languages','static','private','uploads']: subpath = os.path.join(adir,subfolder) if not os.path.exists(subpath): os.mkdir(subpath) db = os.path.join(adir,'models/db.py') if os.path.exists(db): data = fileutils.read_file(db) data = data.replace('<your secret key>','sha512:'+web2py_uuid()) fileutils.write_file(db, data) if c: import_models = True _env = env(a, c=c, import_models=import_models) if c: cfile = os.path.join('applications', a, 'controllers', c + '.py') if not os.path.isfile(cfile): cfile = os.path.join('applications', a, 'compiled', "controllers_%s_%s.pyc" % (c,f)) if not os.path.isfile(cfile): die(errmsg) else: exec read_pyc(cfile) in _env else: execfile(cfile, _env) if f: exec ('print %s()' % f, _env) elif startfile: exec_pythonrc() try: execfile(startfile, _env) except RestrictedError, e: print e.traceback else: if not plain: if bpython: try: import bpython bpython.embed(locals_=_env) return except: logger.warning( 'import bpython error; trying ipython...') else: try: import IPython # following 2 lines fix a problem with IPython; thanks Michael Toomim if '__builtins__' in _env: del _env['__builtins__'] shell = IPython.Shell.IPShell(argv=[], user_ns=_env) shell.mainloop() return except: logger.warning( 'import IPython error; use default python shell') try: import readline import rlcompleter except ImportError: pass else: readline.set_completer(rlcompleter.Completer(_env).complete) readline.parse_and_bind('tab:complete') exec_pythonrc() code.interact(local=_env) def parse_path_info(path_info): """ Parse path info formatted like a/c/f where c and f are optional and a leading / accepted. Return tuple (a, c, f). If invalid path_info a is set to None. If c or f are omitted they are set to None. """ mo = re.match(r'^/?(?P<a>\w+)(/(?P<c>\w+)(/(?P<f>\w+))?)?$', path_info) if mo: return (mo.group('a'), mo.group('c'), mo.group('f')) else: return (None, None, None) def die(msg): print >> sys.stderr, msg sys.exit(1) def test(testpath, import_models=True, verbose=False): """ Run doctests in web2py environment. testpath is formatted like: a tests all controllers in application a a/c tests controller c in application a a/c/f test function f in controller c, application a Where a, c and f are application, controller and function names respectively. If the testpath is a file name the file is tested. If a controller is specified models are executed by default. """ import doctest if os.path.isfile(testpath): mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath) if not mo: die('test file is not in application directory: %s' % testpath) a = mo.group('a') c = f = None files = [testpath] else: (a, c, f) = parse_path_info(testpath) errmsg = 'invalid test path: %s' % testpath if not a: die(errmsg) cdir = os.path.join('applications', a, 'controllers') if not os.path.isdir(cdir): die(errmsg) if c: cfile = os.path.join(cdir, c + '.py') if not os.path.isfile(cfile): die(errmsg) files = [cfile] else: files = glob.glob(os.path.join(cdir, '*.py')) for testfile in files: globs = env(a, import_models) ignores = globs.keys() execfile(testfile, globs) def doctest_object(name, obj): """doctest obj and enclosed methods and classes.""" if type(obj) in (types.FunctionType, types.TypeType, types.ClassType, types.MethodType, types.UnboundMethodType): # Reload environment before each test. globs = env(a, c=c, f=f, import_models=import_models) execfile(testfile, globs) doctest.run_docstring_examples(obj, globs=globs, name='%s: %s' % (os.path.basename(testfile), name), verbose=verbose) if type(obj) in (types.TypeType, types.ClassType): for attr_name in dir(obj): # Execute . operator so decorators are executed. o = eval('%s.%s' % (name, attr_name), globs) doctest_object(attr_name, o) for (name, obj) in globs.items(): if name not in ignores and (f is None or f == name): doctest_object(name, obj) def get_usage(): usage = """ %prog [options] pythonfile """ return usage def execute_from_command_line(argv=None): if argv is None: argv = sys.argv parser = optparse.OptionParser(usage=get_usage()) parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', help='run web2py in interactive shell or IPython(if installed) ' + \ 'with specified appname') msg = 'run web2py in interactive shell or bpython (if installed) with' msg += ' specified appname (if app does not exist it will be created).' msg += '\n Use combined with --shell' parser.add_option( '-B', '--bpython', action='store_true', default=False, dest='bpython', help=msg, ) parser.add_option( '-P', '--plain', action='store_true', default=False, dest='plain', help='only use plain python shell, should be used with --shell option', ) parser.add_option( '-M', '--import_models', action='store_true', default=False, dest='import_models', help='auto import model files, default is False, ' + \ ' should be used with --shell option', ) parser.add_option( '-R', '--run', dest='run', metavar='PYTHON_FILE', default='', help='run PYTHON_FILE in web2py environment, ' + \ 'should be used with --shell option', ) (options, args) = parser.parse_args(argv[1:]) if len(sys.argv) == 1: parser.print_help() sys.exit(0) if len(args) > 0: startfile = args[0] else: startfile = '' run(options.shell, options.plain, startfile=startfile, bpython=options.bpython) if __name__ == '__main__': execute_from_command_line()
Python
# -*- coding: utf-8 -*- # This file is part of the Rocket Web Server # Copyright (c) 2010 Timothy Farrell # Import System Modules import sys import errno import socket import logging import platform # Define Constants VERSION = '1.2.2' SERVER_NAME = socket.gethostname() SERVER_SOFTWARE = 'Rocket %s' % VERSION HTTP_SERVER_SOFTWARE = '%s Python/%s' % (SERVER_SOFTWARE, sys.version.split(' ')[0]) BUF_SIZE = 16384 SOCKET_TIMEOUT = 1 # in secs THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message? IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET]) DEFAULT_LISTEN_QUEUE_SIZE = 5 DEFAULT_MIN_THREADS = 10 DEFAULT_MAX_THREADS = 0 DEFAULTS = dict(LISTEN_QUEUE_SIZE = DEFAULT_LISTEN_QUEUE_SIZE, MIN_THREADS = DEFAULT_MIN_THREADS, MAX_THREADS = DEFAULT_MAX_THREADS) PY3K = sys.version_info[0] > 2 class NullHandler(logging.Handler): "A Logging handler to prevent library errors." def emit(self, record): pass if PY3K: def b(val): """ Convert string/unicode/bytes literals into bytes. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, str): return val.encode() else: return val def u(val, encoding="us-ascii"): """ Convert bytes into string/unicode. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, bytes): return val.decode(encoding) else: return val else: def b(val): """ Convert string/unicode/bytes literals into bytes. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, unicode): return val.encode() else: return val def u(val, encoding="us-ascii"): """ Convert bytes into string/unicode. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, str): return val.decode(encoding) else: return val # Import Package Modules # package imports removed in monolithic build __all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE', 'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u', 'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler'] # Monolithic build...end of module: rocket\__init__.py # Monolithic build...start of module: rocket\connection.py # Import System Modules import sys import time import socket try: import ssl has_ssl = True except ImportError: has_ssl = False # package imports removed in monolithic build class Connection(object): __slots__ = [ 'setblocking', 'sendall', 'shutdown', 'makefile', 'fileno', 'client_addr', 'client_port', 'server_port', 'socket', 'start_time', 'ssl', 'secure' ] def __init__(self, sock_tuple, port, secure=False): self.client_addr, self.client_port = sock_tuple[1] self.server_port = port self.socket = sock_tuple[0] self.start_time = time.time() self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket) self.secure = secure if IS_JYTHON: # In Jython we must set TCP_NODELAY here since it does not # inherit from the listening socket. # See: http://bugs.jython.org/issue1309 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.socket.settimeout(SOCKET_TIMEOUT) self.sendall = self.socket.sendall self.shutdown = self.socket.shutdown self.fileno = self.socket.fileno self.makefile = self.socket.makefile self.setblocking = self.socket.setblocking def close(self): if hasattr(self.socket, '_sock'): try: self.socket._sock.close() except socket.error: info = sys.exc_info() if info[1].errno != socket.EBADF: raise info[1] else: pass self.socket.close() # Monolithic build...end of module: rocket\connection.py # Monolithic build...start of module: rocket\listener.py # Import System Modules import os import socket import logging import traceback from threading import Thread try: import ssl from ssl import SSLError has_ssl = True except ImportError: has_ssl = False class SSLError(socket.error): pass # Import Package Modules # package imports removed in monolithic build class Listener(Thread): """The Listener class is a class responsible for accepting connections and queuing them to be processed by a worker thread.""" def __init__(self, interface, queue_size, active_queue, *args, **kwargs): Thread.__init__(self, *args, **kwargs) # Instance variables self.active_queue = active_queue self.interface = interface self.addr = interface[0] self.port = interface[1] self.secure = len(interface) == 4 and \ os.path.exists(interface[2]) and \ os.path.exists(interface[3]) self.ready = False # Error Log self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port) self.err_log.addHandler(NullHandler()) # Build the socket listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if not listener: self.err_log.error("Failed to get socket.") return if self.secure: if not has_ssl: self.err_log.error("ssl module required to serve HTTPS.") return elif not os.path.exists(interface[2]): data = (interface[2], interface[0], interface[1]) self.err_log.error("Cannot find key file " "'%s'. Cannot bind to %s:%s" % data) return elif not os.path.exists(interface[3]): data = (interface[3], interface[0], interface[1]) self.err_log.error("Cannot find certificate file " "'%s'. Cannot bind to %s:%s" % data) return # Set socket options try: listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except: msg = "Cannot share socket. Using %s:%i exclusively." self.err_log.warning(msg % (self.addr, self.port)) try: if not IS_JYTHON: listener.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) except: msg = "Cannot set TCP_NODELAY, things might run a little slower" self.err_log.warning(msg) try: listener.bind((self.addr, self.port)) except: msg = "Socket %s:%i in use by other process and it won't share." self.err_log.error(msg % (self.addr, self.port)) else: # We want socket operations to timeout periodically so we can # check if the server is shutting down listener.settimeout(THREAD_STOP_CHECK_INTERVAL) # Listen for new connections allowing queue_size number of # connections to wait before rejecting a connection. listener.listen(queue_size) self.listener = listener self.ready = True def wrap_socket(self, sock): try: sock = ssl.wrap_socket(sock, keyfile = self.interface[2], certfile = self.interface[3], server_side = True, ssl_version = ssl.PROTOCOL_SSLv23) except SSLError: # Generally this happens when an HTTP request is received on a # secure socket. We don't do anything because it will be detected # by Worker and dealt with appropriately. pass return sock def run(self): if not self.ready: self.err_log.warning('Listener started when not ready.') return if __debug__: self.err_log.debug('Entering main loop.') while True: try: sock, addr = self.listener.accept() if self.secure: sock = self.wrap_socket(sock) self.active_queue.put(((sock, addr), self.interface[1], self.secure)) except socket.timeout: # socket.timeout will be raised every THREAD_STOP_CHECK_INTERVAL # seconds. When that happens, we check if it's time to die. if not self.ready: if __debug__: self.err_log.debug('Listener exiting.') return else: continue except: self.err_log.error(str(traceback.format_exc())) # Monolithic build...end of module: rocket\listener.py # Monolithic build...start of module: rocket\main.py # Import System Modules import sys import time import socket import logging import traceback try: from queue import Queue except ImportError: from Queue import Queue # Import Package Modules # package imports removed in monolithic build # Setup Logging log = logging.getLogger('Rocket') log.addHandler(NullHandler()) class Rocket(object): """The Rocket class is responsible for handling threads and accepting and dispatching connections.""" def __init__(self, interfaces = ('127.0.0.1', 8000), method = 'wsgi', app_info = None, min_threads = None, max_threads = None, queue_size = None, timeout = 600, handle_signals = True): self.handle_signals = handle_signals if not isinstance(interfaces, list): self.interfaces = [interfaces] else: self.interfaces = interfaces if min_threads is None: min_threads = DEFAULTS['MIN_THREADS'] if max_threads is None: max_threads = DEFAULTS['MAX_THREADS'] if not queue_size: if hasattr(socket, 'SOMAXCONN'): queue_size = socket.SOMAXCONN else: queue_size = DEFAULTS['LISTEN_QUEUE_SIZE'] if max_threads and queue_size > max_threads: queue_size = max_threads if isinstance(app_info, dict): app_info['server_software'] = SERVER_SOFTWARE monitor_queue = Queue() active_queue = Queue() self._monitor = Monitor(monitor_queue, active_queue, timeout) self._threadpool = ThreadPool(get_method(method), app_info = app_info, active_queue=active_queue, monitor_queue = monitor_queue, min_threads=min_threads, max_threads=max_threads) # Build our socket listeners self.listeners = [Listener(i, queue_size, active_queue) for i in self.interfaces] for ndx in range(len(self.listeners)-1, 0, -1): if not self.listeners[ndx].ready: del self.listeners[ndx] if not self.listeners: log.critical("No interfaces to listen on...closing.") sys.exit(1) def _sigterm(self, signum, frame): log.info('Received SIGTERM') self.stop() def _sighup(self, signum, frame): log.info('Received SIGHUP') self.restart() def start(self): log.info('Starting %s' % SERVER_SOFTWARE) # Set up our shutdown signals if self.handle_signals: try: import signal signal.signal(signal.SIGTERM, self._sigterm) signal.signal(signal.SIGUSR1, self._sighup) except: log.debug('This platform does not support signals.') # Start our worker threads self._threadpool.start() # Start our monitor thread self._monitor.setDaemon(True) self._monitor.start() # I know that EXPR and A or B is bad but I'm keeping it for Py2.4 # compatibility. str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '') msg = 'Listening on sockets: ' msg += ', '.join(['%s:%i%s' % str_extract(l) for l in self.listeners]) log.info(msg) for l in self.listeners: l.start() tp = self._threadpool dynamic_resize = tp.dynamic_resize while not tp.stop_server: try: dynamic_resize() time.sleep(THREAD_STOP_CHECK_INTERVAL) except KeyboardInterrupt: # Capture a keyboard interrupt when running from a console break except: if not tp.stop_server: log.error(str(traceback.format_exc())) continue return self.stop() def stop(self, stoplogging = True): log.info("Stopping Server") # Stop listeners for l in self.listeners: l.ready = False if l.isAlive(): l.join() # Stop Worker threads self._threadpool.stop() # Stop Monitor self._monitor.stop() if self._monitor.isAlive(): self._monitor.join() if stoplogging: logging.shutdown() def restart(self): self.stop(False) self.start() def CherryPyWSGIServer(bind_addr, wsgi_app, numthreads = 10, server_name = None, max = -1, request_queue_size = 5, timeout = 10, shutdown_timeout = 5): """ A Cherrypy wsgiserver-compatible wrapper. """ max_threads = max if max_threads < 0: max_threads = 0 return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app}, min_threads = numthreads, max_threads = max_threads, queue_size = request_queue_size, timeout = timeout) # Monolithic build...end of module: rocket\main.py # Monolithic build...start of module: rocket\monitor.py # Import System Modules import time import logging import select from threading import Thread # Import Package Modules # package imports removed in monolithic build class Monitor(Thread): # Monitor worker class. def __init__(self, monitor_queue, active_queue, timeout, *args, **kwargs): Thread.__init__(self, *args, **kwargs) # Instance Variables self.monitor_queue = monitor_queue self.active_queue = active_queue self.timeout = timeout self.connections = set() self.active = False def run(self): self.name = self.getName() self.log = logging.getLogger('Rocket.Monitor') self.log.addHandler(NullHandler()) self.active = True conn_list = list() list_changed = False if __debug__: self.log.debug('Entering monitor loop.') # Enter thread main loop while self.active: # Move the queued connections to the selection pool while not self.monitor_queue.empty() or not len(self.connections): if __debug__: self.log.debug('In "receive timed-out connections" loop.') c = self.monitor_queue.get() if c is None: # A non-client is a signal to die if __debug__: self.log.debug('Received a death threat.') return self.log.debug('Received a timed out connection.') if __debug__: assert(c not in self.connections) if IS_JYTHON: # Jython requires a socket to be in Non-blocking mode in # order to select on it. c.setblocking(False) if __debug__: self.log.debug('Adding connection to monitor list.') self.connections.add(c) list_changed = True # Wait on those connections self.log.debug('Blocking on connections') if list_changed: conn_list = list(self.connections) list_changed = False try: readable = select.select(conn_list, [], [], THREAD_STOP_CHECK_INTERVAL)[0] except: if self.active: raise else: break # If we have any readable connections, put them back for r in readable: if __debug__: self.log.debug('Restoring readable connection') if IS_JYTHON: # Jython requires a socket to be in Non-blocking mode in # order to select on it, but the rest of the code requires # that it be in blocking mode. r.setblocking(True) r.start_time = time.time() self.active_queue.put(r) self.connections.remove(r) list_changed = True # If we have any stale connections, kill them off. if self.timeout: now = time.time() stale = set() for c in self.connections: if (now - c.start_time) >= self.timeout: stale.add(c) for c in stale: if __debug__: # "EXPR and A or B" kept for Py2.4 compatibility data = (c.client_addr, c.server_port, c.ssl and '*' or '') self.log.debug('Flushing stale connection: %s:%i%s' % data) self.connections.remove(c) list_changed = True try: c.close() finally: del c def stop(self): self.active = False if __debug__: self.log.debug('Flushing waiting connections') for c in self.connections: try: c.close() finally: del c if __debug__: self.log.debug('Flushing queued connections') while not self.monitor_queue.empty(): c = self.monitor_queue.get() if c is None: continue try: c.close() finally: del c # Place a None sentry value to cause the monitor to die. self.monitor_queue.put(None) # Monolithic build...end of module: rocket\monitor.py # Monolithic build...start of module: rocket\threadpool.py # Import System Modules import logging # Import Package Modules # package imports removed in monolithic build # Setup Logging log = logging.getLogger('Rocket.Errors.ThreadPool') log.addHandler(NullHandler()) class ThreadPool: """The ThreadPool class is a container class for all the worker threads. It manages the number of actively running threads.""" def __init__(self, method, app_info, active_queue, monitor_queue, min_threads=DEFAULTS['MIN_THREADS'], max_threads=DEFAULTS['MAX_THREADS'], ): if __debug__: log.debug("Initializing ThreadPool.") self.check_for_dead_threads = 0 self.active_queue = active_queue self.worker_class = method self.min_threads = min_threads self.max_threads = max_threads self.monitor_queue = monitor_queue self.stop_server = False # TODO - Optimize this based on some real-world usage data self.grow_threshold = int(max_threads/10) + 2 if not isinstance(app_info, dict): app_info = dict() app_info.update(max_threads=max_threads, min_threads=min_threads) self.app_info = app_info self.threads = set() for x in range(min_threads): worker = self.worker_class(app_info, self.active_queue, self.monitor_queue) self.threads.add(worker) def start(self): self.stop_server = False if __debug__: log.debug("Starting threads.") for thread in self.threads: thread.setDaemon(True) thread.start() def stop(self): if __debug__: log.debug("Stopping threads.") self.stop_server = True # Prompt the threads to die for t in self.threads: self.active_queue.put(None) # Give them the gun for t in self.threads: t.kill() # Wait until they pull the trigger for t in self.threads: t.join() # Clean up the mess self.bring_out_your_dead() def bring_out_your_dead(self): # Remove dead threads from the pool dead_threads = [t for t in self.threads if not t.isAlive()] for t in dead_threads: if __debug__: log.debug("Removing dead thread: %s." % t.getName()) try: # Py2.4 complains here so we put it in a try block self.threads.remove(t) except: pass self.check_for_dead_threads -= len(dead_threads) def grow(self, amount=None): if self.stop_server: return if not amount: amount = self.max_threads amount = min([amount, self.max_threads - len(self.threads)]) if __debug__: log.debug("Growing by %i." % amount) for x in range(amount): worker = self.worker_class(self.app_info, self.active_queue, self.monitor_queue) worker.setDaemon(True) self.threads.add(worker) worker.start() def shrink(self, amount=1): if __debug__: log.debug("Shrinking by %i." % amount) self.check_for_dead_threads += amount for x in range(amount): self.active_queue.put(None) def dynamic_resize(self): if (self.max_threads > self.min_threads or self.max_threads == 0): if self.check_for_dead_threads > 0: self.bring_out_your_dead() queueSize = self.active_queue.qsize() threadCount = len(self.threads) if __debug__: log.debug("Examining ThreadPool. %i threads and %i Q'd conxions" % (threadCount, queueSize)) if queueSize == 0 and threadCount > self.min_threads: self.shrink() elif queueSize > self.grow_threshold: self.grow(queueSize) # Monolithic build...end of module: rocket\threadpool.py # Monolithic build...start of module: rocket\worker.py # Import System Modules import re import sys import socket import logging import traceback #from wsgiref.headers import Headers from threading import Thread from datetime import datetime try: from urllib import unquote except ImportError: from urllib.parse import unquote try: from io import StringIO except ImportError: try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: from ssl import SSLError except ImportError: class SSLError(socket.error): pass # Import Package Modules # package imports removed in monolithic build # Define Constants re_SLASH = re.compile('%2F', re.IGNORECASE) re_REQUEST_LINE = re.compile(r"""^ (?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method \ # (single space) ( (?P<scheme>[^:/]+) # Scheme (://) # (?P<host>[^/]+) # Host )? # (?P<path>(\*|/[^ \?]*)) # Path (\? (?P<query_string>[^ ]+))? # Query String \ # (single space) (?P<protocol>HTTPS?/1\.[01]) # Protocol $ """, re.X) LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s' RESPONSE = '''\ HTTP/1.1 %s Content-Length: %i Content-Type: %s %s ''' if IS_JYTHON: HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT']) ### # The Headers and FileWrapper classes are ripped straight from the Python # Standard Library. I've removed some docstrings and integrated my BUF_SIZE. # See the Python License here: http://docs.python.org/license.html ### # Regular expression that matches `special' characters in parameters, the # existance of which force quoting of the parameter value. import re _tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') def _formatparam(param, value=None, quote=1): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. """ if value is not None and len(value) > 0: if quote or _tspecials.search(value): value = value.replace('\\', '\\\\').replace('"', r'\"') return '%s="%s"' % (param, value) else: return '%s=%s' % (param, value) else: return param class Headers: def __init__(self,headers): if type(headers) is not type([]): raise TypeError("Headers must be a list of name/value tuples") self._headers = headers def __len__(self): return len(self._headers) def __setitem__(self, name, val): del self[name] self._headers.append((name, val)) def __delitem__(self,name): name = name.lower() self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name] def __getitem__(self,name): return self.get(name) def has_key(self, name): return self.get(name) is not None __contains__ = has_key def get_all(self, name): name = name.lower() return [kv[1] for kv in self._headers if kv[0].lower()==name] def get(self,name,default=None): name = name.lower() for k,v in self._headers: if k.lower()==name: return v return default def keys(self): return [k for k, v in self._headers] def values(self): return [v for k, v in self._headers] def items(self): return self._headers[:] def __repr__(self): return "Headers(%r)" % self._headers def __str__(self): return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['','']) def setdefault(self,name,value): result = self.get(name) if result is None: self._headers.append((name,value)) return value else: return result def add_header(self, _name, _value, **_params): parts = [] if _value is not None: parts.append(_value) for k, v in _params.items(): if v is None: parts.append(k.replace('_', '-')) else: parts.append(_formatparam(k.replace('_', '-'), v)) self._headers.append((_name, "; ".join(parts))) class FileWrapper: """Wrapper to convert file-like objects to iterables""" def __init__(self, filelike, blksize=BUF_SIZE): self.filelike = filelike self.blksize = blksize if hasattr(filelike,'close'): self.close = filelike.close def __getitem__(self,key): data = self.filelike.read(self.blksize) if data: return data raise IndexError def __iter__(self): return self def next(self): data = self.filelike.read(self.blksize) if data: return data raise StopIteration class Worker(Thread): """The Worker class is a base class responsible for receiving connections and (a subclass) will run an application to process the the connection """ def __init__(self, app_info, active_queue, monitor_queue, *args, **kwargs): Thread.__init__(self, *args, **kwargs) # Instance Variables self.app_info = app_info self.active_queue = active_queue self.monitor_queue = monitor_queue self.size = 0 self.status = "200 OK" self.closeConnection = True # Request Log self.req_log = logging.getLogger('Rocket.Requests') self.req_log.addHandler(NullHandler()) # Error Log self.err_log = logging.getLogger('Rocket.Errors.'+self.getName()) self.err_log.addHandler(NullHandler()) def _handleError(self, typ, val, tb): if typ == SSLError: if 'timed out' in val.args[0]: typ = SocketTimeout if typ == SocketTimeout: if __debug__: self.err_log.debug('Socket timed out') self.monitor_queue.put(self.conn) return True if typ == SocketClosed: self.closeConnection = True if __debug__: self.err_log.debug('Client closed socket') return False if typ == BadRequest: self.closeConnection = True if __debug__: self.err_log.debug('Client sent a bad request') return True if typ == socket.error: self.closeConnection = True if val.args[0] in IGNORE_ERRORS_ON_CLOSE: if __debug__: self.err_log.debug('Ignorable socket Error received...' 'closing connection.') return False else: self.status = "999 Utter Server Failure" tb_fmt = traceback.format_exception(typ, val, tb) self.err_log.error('Unhandled Error when serving ' 'connection:\n' + '\n'.join(tb_fmt)) return False self.closeConnection = True tb_fmt = traceback.format_exception(typ, val, tb) self.err_log.error('\n'.join(tb_fmt)) self.send_response('500 Server Error') return False def run(self): if __debug__: self.err_log.debug('Entering main loop.') # Enter thread main loop while True: conn = self.active_queue.get() if not conn: # A non-client is a signal to die if __debug__: self.err_log.debug('Received a death threat.') return conn if isinstance(conn, tuple): conn = Connection(*conn) self.conn = conn if conn.ssl != conn.secure: self.err_log.info('Received HTTP connection on HTTPS port.') self.send_response('400 Bad Request') self.closeConnection = True conn.close() continue else: if __debug__: self.err_log.debug('Received a connection.') self.closeConnection = False # Enter connection serve loop while True: if __debug__: self.err_log.debug('Serving a request') try: self.run_app(conn) log_info = dict(client_ip = conn.client_addr, time = datetime.now().strftime('%c'), status = self.status.split(' ')[0], size = self.size, request_line = self.request_line) self.req_log.info(LOG_LINE % log_info) except: exc = sys.exc_info() handled = self._handleError(*exc) if handled: break else: if self.request_line: log_info = dict(client_ip = conn.client_addr, time = datetime.now().strftime('%c'), status = self.status.split(' ')[0], size = self.size, request_line = self.request_line + ' - not stopping') self.req_log.info(LOG_LINE % log_info) if self.closeConnection: try: conn.close() except: self.err_log.error(str(traceback.format_exc())) break def run_app(self, conn): # Must be overridden with a method reads the request from the socket # and sends a response. self.closeConnection = True raise NotImplementedError('Overload this method!') def send_response(self, status): stat_msg = status.split(' ', 1)[1] msg = RESPONSE % (status, len(stat_msg), 'text/plain', stat_msg) try: self.conn.sendall(b(msg)) except socket.error: self.closeConnection = True self.err_log.error('Tried to send "%s" to client but received socket' ' error' % status) def kill(self): if self.isAlive() and hasattr(self, 'conn'): try: self.conn.shutdown(socket.SHUT_RDWR) except socket.error: info = sys.exc_info() if info[1].args[0] != socket.EBADF: self.err_log.debug('Error on shutdown: '+str(info)) def read_request_line(self, sock_file): self.request_line = '' try: # Grab the request line d = sock_file.readline() if PY3K: d = d.decode('ISO-8859-1') if d == '\r\n': # Allow an extra NEWLINE at the beginning per HTTP 1.1 spec if __debug__: self.err_log.debug('Client sent newline') d = sock_file.readline() if PY3K: d = d.decode('ISO-8859-1') except socket.timeout: raise SocketTimeout("Socket timed out before request.") d = d.strip() if not d: if __debug__: self.err_log.debug('Client did not send a recognizable request.') raise SocketClosed('Client closed socket.') self.request_line = d # NOTE: I've replaced the traditional method of procedurally breaking # apart the request line with a (rather unsightly) regular expression. # However, Java's regexp support sucks so bad that it actually takes # longer in Jython to process the regexp than procedurally. So I've # left the old code here for Jython's sake...for now. if IS_JYTHON: return self._read_request_line_jython(d) match = re_REQUEST_LINE.match(d) if not match: self.send_response('400 Bad Request') raise BadRequest req = match.groupdict() for k,v in req.items(): if not v: req[k] = "" if k == 'path': req['path'] = r'%2F'.join([unquote(x) for x in re_SLASH.split(v)]) return req def _read_request_line_jython(self, d): d = d.strip() try: method, uri, proto = d.split(' ') if not proto.startswith('HTTP') or \ proto[-3:] not in ('1.0', '1.1') or \ method not in HTTP_METHODS: self.send_response('400 Bad Request') raise BadRequest except ValueError: self.send_response('400 Bad Request') raise BadRequest req = dict(method=method, protocol = proto) scheme = '' host = '' if uri == '*' or uri.startswith('/'): path = uri elif '://' in uri: scheme, rest = uri.split('://') host, path = rest.split('/', 1) path = '/' + path else: self.send_response('400 Bad Request') raise BadRequest query_string = '' if '?' in path: path, query_string = path.split('?', 1) path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)]) req.update(path=path, query_string=query_string, scheme=scheme.lower(), host=host) return req def read_headers(self, sock_file): headers = dict() l = sock_file.readline() lname = None lval = None while True: if PY3K: try: l = str(l, 'ISO-8859-1') except UnicodeDecodeError: self.err_log.warning('Client sent invalid header: ' + repr(l)) if l == '\r\n': break if l[0] in ' \t' and lname: # Some headers take more than one line lval += ',' + l.strip() else: # HTTP header values are latin-1 encoded l = l.split(':', 1) # HTTP header names are us-ascii encoded lname = l[0].strip().upper().replace('-', '_') lval = l[-1].strip() headers[str(lname)] = str(lval) l = sock_file.readline() return headers class SocketTimeout(Exception): "Exception for when a socket times out between requests." pass class BadRequest(Exception): "Exception for when a client sends an incomprehensible request." pass class SocketClosed(Exception): "Exception for when a socket is closed by the client." pass class ChunkedReader(object): def __init__(self, sock_file): self.stream = sock_file self.chunk_size = 0 def _read_header(self): chunk_len = "" try: while "" == chunk_len: chunk_len = self.stream.readline().strip() return int(chunk_len, 16) except ValueError: return 0 def read(self, size): data = b('') chunk_size = self.chunk_size while size: if not chunk_size: chunk_size = self._read_header() if size < chunk_size: data += self.stream.read(size) chunk_size -= size break else: if not chunk_size: break data += self.stream.read(chunk_size) size -= chunk_size chunk_size = 0 self.chunk_size = chunk_size return data def readline(self): data = b('') c = self.read(1) while c and c != b('\n'): data += c c = self.read(1) data += c return data def readlines(self): yield self.readline() def get_method(method): methods = dict(wsgi=WSGIWorker) return methods[method.lower()] # Monolithic build...end of module: rocket\worker.py # Monolithic build...start of module: rocket\methods\__init__.py # Monolithic build...end of module: rocket\methods\__init__.py # Monolithic build...start of module: rocket\methods\wsgi.py # Import System Modules import sys import socket #from wsgiref.headers import Headers #from wsgiref.util import FileWrapper # Import Package Modules # package imports removed in monolithic build if PY3K: from email.utils import formatdate else: # Caps Utils for Py2.4 compatibility from email.Utils import formatdate # Define Constants NEWLINE = b('\r\n') HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s''' BASE_ENV = {'SERVER_NAME': SERVER_NAME, 'SCRIPT_NAME': '', # Direct call WSGI does not need a name 'wsgi.errors': sys.stderr, 'wsgi.version': (1, 0), 'wsgi.multiprocess': False, 'wsgi.run_once': False, 'wsgi.file_wrapper': FileWrapper } class WSGIWorker(Worker): def __init__(self, *args, **kwargs): """Builds some instance variables that will last the life of the thread.""" Worker.__init__(self, *args, **kwargs) if isinstance(self.app_info, dict): multithreaded = self.app_info.get('max_threads') != 1 else: multithreaded = False self.base_environ = dict({'SERVER_SOFTWARE': self.app_info['server_software'], 'wsgi.multithread': multithreaded, }) self.base_environ.update(BASE_ENV) # Grab our application self.app = self.app_info.get('wsgi_app') if not hasattr(self.app, "__call__"): raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app)) def build_environ(self, sock_file, conn): """ Build the execution environment. """ # Grab the request line request = self.read_request_line(sock_file) # Copy the Base Environment environ = self.base_environ.copy() # Grab the headers for k, v in self.read_headers(sock_file).items(): environ[str('HTTP_'+k)] = v # Add CGI Variables environ['REQUEST_METHOD'] = request['method'] environ['PATH_INFO'] = request['path'] environ['SERVER_PROTOCOL'] = request['protocol'] environ['SERVER_PORT'] = str(conn.server_port) environ['REMOTE_PORT'] = str(conn.client_port) environ['REMOTE_ADDR'] = str(conn.client_addr) environ['QUERY_STRING'] = request['query_string'] if 'HTTP_CONTENT_LENGTH' in environ: environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH'] if 'HTTP_CONTENT_TYPE' in environ: environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE'] # Save the request method for later self.request_method = environ['REQUEST_METHOD'] # Add Dynamic WSGI Variables if conn.ssl: environ['wsgi.url_scheme'] = 'https' environ['HTTPS'] = 'on' else: environ['wsgi.url_scheme'] = 'http' if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked': environ['wsgi.input'] = ChunkedReader(sock_file) else: environ['wsgi.input'] = sock_file return environ def send_headers(self, data, sections): h_set = self.header_set # Does the app want us to send output chunked? self.chunked = h_set.get('transfer-encoding', '').lower() == 'chunked' # Add a Date header if it's not there already if not 'date' in h_set: h_set['Date'] = formatdate(usegmt=True) # Add a Server header if it's not there already if not 'server' in h_set: h_set['Server'] = HTTP_SERVER_SOFTWARE if 'content-length' in h_set: self.size = int(h_set['content-length']) else: s = int(self.status.split(' ')[0]) if s < 200 or s not in (204, 205, 304): if not self.chunked: if sections == 1: # Add a Content-Length header if it's not there already h_set['Content-Length'] = str(len(data)) self.size = len(data) else: # If they sent us more than one section, we blow chunks h_set['Transfer-Encoding'] = 'Chunked' self.chunked = True if __debug__: self.err_log.debug('Adding header...' 'Transfer-Encoding: Chunked') if 'connection' not in h_set: # If the application did not provide a connection header, fill it in client_conn = self.environ.get('HTTP_CONNECTION', '').lower() if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1': # HTTP = 1.1 defaults to keep-alive connections if client_conn: h_set['Connection'] = client_conn else: h_set['Connection'] = 'keep-alive' else: # HTTP < 1.1 supports keep-alive but it's quirky so we don't support it h_set['Connection'] = 'close' # Close our connection if we need to. self.closeConnection = h_set.get('connection', '').lower() == 'close' # Build our output headers header_data = HEADER_RESPONSE % (self.status, str(h_set)) # Send the headers if __debug__: self.err_log.debug('Sending Headers: %s' % repr(header_data)) self.conn.sendall(b(header_data)) self.headers_sent = True def write_warning(self, data, sections=None): self.err_log.warning('WSGI app called write method directly. This is ' 'deprecated behavior. Please update your app.') return self.write(data, sections) def write(self, data, sections=None): """ Write the data to the output socket. """ if self.error[0]: self.status = self.error[0] data = b(self.error[1]) if not self.headers_sent: self.send_headers(data, sections) if self.request_method != 'HEAD': try: if self.chunked: self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data))) else: self.conn.sendall(data) except socket.error: # But some clients will close the connection before that # resulting in a socket error. self.closeConnection = True def start_response(self, status, response_headers, exc_info=None): """ Store the HTTP status and headers to be sent when self.write is called. """ if exc_info: try: if self.headers_sent: # Re-raise original exception if headers sent # because this violates WSGI specification. raise finally: exc_info = None elif self.header_set: raise AssertionError("Headers already set!") if PY3K and not isinstance(status, str): self.status = str(status, 'ISO-8859-1') else: self.status = status # Make sure headers are bytes objects try: self.header_set = Headers(response_headers) except UnicodeDecodeError: self.error = ('500 Internal Server Error', 'HTTP Headers should be bytes') self.err_log.error('Received HTTP Headers from client that contain' ' invalid characters for Latin-1 encoding.') return self.write_warning def run_app(self, conn): self.size = 0 self.header_set = Headers([]) self.headers_sent = False self.error = (None, None) self.chunked = False sections = None output = None if __debug__: self.err_log.debug('Getting sock_file') # Build our file-like object sock_file = conn.makefile('rb',BUF_SIZE) try: # Read the headers and build our WSGI environment self.environ = environ = self.build_environ(sock_file, conn) # Handle 100 Continue if environ.get('HTTP_EXPECT', '') == '100-continue': res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n' conn.sendall(b(res)) # Send it to our WSGI application output = self.app(environ, self.start_response) if not hasattr(output, '__len__') and not hasattr(output, '__iter__'): self.error = ('500 Internal Server Error', 'WSGI applications must return a list or ' 'generator type.') if hasattr(output, '__len__'): sections = len(output) for data in output: # Don't send headers until body appears if data: self.write(data, sections) if self.chunked: # If chunked, send our final chunk length self.conn.sendall(b('0\r\n\r\n')) elif not self.headers_sent: # Send headers if the body was empty self.send_headers('', sections) # Don't capture exceptions here. The Worker class handles # them appropriately. finally: if __debug__: self.err_log.debug('Finally closing output and sock_file') if hasattr(output,'close'): output.close() sock_file.close() # Monolithic build...end of module: rocket\methods\wsgi.py # # the following code is not part of Rocket but was added in web2py for testing purposes # def demo_app(environ, start_response): global static_folder import os types = {'htm': 'text/html','html': 'text/html','gif': 'image/gif', 'jpg': 'image/jpeg','png': 'image/png','pdf': 'applications/pdf'} if static_folder: if not static_folder.startswith('/'): static_folder = os.path.join(os.getcwd(),static_folder) path = os.path.join(static_folder, environ['PATH_INFO'][1:] or 'index.html') type = types.get(path.split('.')[-1],'text') if os.path.exists(path): try: pathfile = open(path,'rb') try: data = pathfile.read() finally: pathfile.close() start_response('200 OK', [('Content-Type', type)]) except IOError: start_response('404 NOT FOUND', []) data = '404 NOT FOUND' else: start_response('500 INTERNAL SERVER ERROR', []) data = '500 INTERNAL SERVER ERROR' else: start_response('200 OK', [('Content-Type', 'text/html')]) data = '<html><body><h1>Hello from Rocket Web Server</h1></body></html>' return [data] def demo(): from optparse import OptionParser parser = OptionParser() parser.add_option("-i", "--ip", dest="ip",default="127.0.0.1", help="ip address of the network interface") parser.add_option("-p", "--port", dest="port",default="8000", help="post where to run web server") parser.add_option("-s", "--static", dest="static",default=None, help="folder containing static files") (options, args) = parser.parse_args() global static_folder static_folder = options.static print 'Rocket running on %s:%s' % (options.ip, options.port) r=Rocket((options.ip,int(options.port)),'wsgi', {'wsgi_app':demo_app}) r.start() if __name__=='__main__': demo()
Python
#!/bin/python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import base64 import cPickle import datetime import thread import logging import sys import os import re import time import copy import smtplib import urllib import urllib2 import Cookie import cStringIO from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string from contenttype import contenttype from storage import Storage, StorageList, Settings, Messages from utils import web2py_uuid from gluon import * from fileutils import read_file import serializers import contrib.simplejson as simplejson __all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service', 'PluginManager', 'fetch', 'geocode', 'prettydate'] logger = logging.getLogger("web2py") DEFAULT = lambda: None def callback(actions,form,tablename=None): if actions: if tablename and isinstance(actions,dict): actions = actions.get(tablename, []) if not isinstance(actions,(list, tuple)): actions = [actions] [action(form) for action in actions] def validators(*a): b = [] for item in a: if isinstance(item, (list, tuple)): b = b + list(item) else: b.append(item) return b def call_or_redirect(f,*args): if callable(f): redirect(f(*args)) else: redirect(f) class Mail(object): """ Class for configuring and sending emails with alternative text / html body, multiple attachments and encryption support Works with SMTP and Google App Engine. """ class Attachment(MIMEBase.MIMEBase): """ Email attachment Arguments:: payload: path to file or file-like object with read() method filename: name of the attachment stored in message; if set to None, it will be fetched from payload path; file-like object payload must have explicit filename specified content_id: id of the attachment; automatically contained within < and > content_type: content type of the attachment; if set to None, it will be fetched from filename using gluon.contenttype module encoding: encoding of all strings passed to this function (except attachment body) Content ID is used to identify attachments within the html body; in example, attached image with content ID 'photo' may be used in html message as a source of img tag <img src="cid:photo" />. Examples:: #Create attachment from text file: attachment = Mail.Attachment('/path/to/file.txt') Content-Type: text/plain MIME-Version: 1.0 Content-Disposition: attachment; filename="file.txt" Content-Transfer-Encoding: base64 SOMEBASE64CONTENT= #Create attachment from image file with custom filename and cid: attachment = Mail.Attachment('/path/to/file.png', filename='photo.png', content_id='photo') Content-Type: image/png MIME-Version: 1.0 Content-Disposition: attachment; filename="photo.png" Content-Id: <photo> Content-Transfer-Encoding: base64 SOMEOTHERBASE64CONTENT= """ def __init__( self, payload, filename=None, content_id=None, content_type=None, encoding='utf-8'): if isinstance(payload, str): if filename == None: filename = os.path.basename(payload) payload = read_file(payload, 'rb') else: if filename == None: raise Exception('Missing attachment name') payload = payload.read() filename = filename.encode(encoding) if content_type == None: content_type = contenttype(filename) self.my_filename = filename self.my_payload = payload MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1)) self.set_payload(payload) self['Content-Disposition'] = 'attachment; filename="%s"' % filename if content_id != None: self['Content-Id'] = '<%s>' % content_id.encode(encoding) Encoders.encode_base64(self) def __init__(self, server=None, sender=None, login=None, tls=True): """ Main Mail object Arguments:: server: SMTP server address in address:port notation sender: sender email address login: sender login name and password in login:password notation or None if no authentication is required tls: enables/disables encryption (True by default) In Google App Engine use:: server='gae' For sake of backward compatibility all fields are optional and default to None, however, to be able to send emails at least server and sender must be specified. They are available under following fields: mail.settings.server mail.settings.sender mail.settings.login When server is 'logging', email is logged but not sent (debug mode) Optionally you can use PGP encryption or X509: mail.settings.cipher_type = None mail.settings.sign = True mail.settings.sign_passphrase = None mail.settings.encrypt = True mail.settings.x509_sign_keyfile = None mail.settings.x509_sign_certfile = None mail.settings.x509_crypt_certfiles = None cipher_type : None gpg - need a python-pyme package and gpgme lib x509 - smime sign : sign the message (True or False) sign_passphrase : passphrase for key signing encrypt : encrypt the message ... x509 only ... x509_sign_keyfile : the signers private key filename (PEM format) x509_sign_certfile: the signers certificate filename (PEM format) x509_crypt_certfiles: the certificates file to encrypt the messages with can be a file name or a list of file names (PEM format) Examples:: #Create Mail object with authentication data for remote server: mail = Mail('example.com:25', 'me@example.com', 'me:password') """ settings = self.settings = Settings() settings.server = server settings.sender = sender settings.login = login settings.tls = tls settings.ssl = False settings.cipher_type = None settings.sign = True settings.sign_passphrase = None settings.encrypt = True settings.x509_sign_keyfile = None settings.x509_sign_certfile = None settings.x509_crypt_certfiles = None settings.debug = False settings.lock_keys = True self.result = {} self.error = None def send( self, to, subject='None', message='None', attachments=None, cc=None, bcc=None, reply_to=None, encoding='utf-8', ): """ Sends an email using data specified in constructor Arguments:: to: list or tuple of receiver addresses; will also accept single object subject: subject of the email message: email body text; depends on type of passed object: if 2-list or 2-tuple is passed: first element will be source of plain text while second of html text; otherwise: object will be the only source of plain text and html source will be set to None; If text or html source is: None: content part will be ignored, string: content part will be set to it, file-like object: content part will be fetched from it using it's read() method attachments: list or tuple of Mail.Attachment objects; will also accept single object cc: list or tuple of carbon copy receiver addresses; will also accept single object bcc: list or tuple of blind carbon copy receiver addresses; will also accept single object reply_to: address to which reply should be composed encoding: encoding of all strings passed to this method (including message bodies) Examples:: #Send plain text message to single address: mail.send('you@example.com', 'Message subject', 'Plain text body of the message') #Send html message to single address: mail.send('you@example.com', 'Message subject', '<html>Plain text body of the message</html>') #Send text and html message to three addresses (two in cc): mail.send('you@example.com', 'Message subject', ('Plain text body', '<html>html body</html>'), cc=['other1@example.com', 'other2@example.com']) #Send html only message with image attachment available from the message by 'photo' content id: mail.send('you@example.com', 'Message subject', (None, '<html><img src="cid:photo" /></html>'), Mail.Attachment('/path/to/photo.jpg' content_id='photo')) #Send email with two attachments and no body text mail.send('you@example.com, 'Message subject', None, [Mail.Attachment('/path/to/fist.file'), Mail.Attachment('/path/to/second.file')]) Returns True on success, False on failure. Before return, method updates two object's fields: self.result: return value of smtplib.SMTP.sendmail() or GAE's mail.send_mail() method self.error: Exception message or None if above was successful """ def encode_header(key): if [c for c in key if 32>ord(c) or ord(c)>127]: return Header.Header(key.encode('utf-8'),'utf-8') else: return key if not isinstance(self.settings.server, str): raise Exception('Server address not specified') if not isinstance(self.settings.sender, str): raise Exception('Sender address not specified') payload_in = MIMEMultipart.MIMEMultipart('mixed') if to: if not isinstance(to, (list,tuple)): to = [to] else: raise Exception('Target receiver address not specified') if cc: if not isinstance(cc, (list, tuple)): cc = [cc] if bcc: if not isinstance(bcc, (list, tuple)): bcc = [bcc] if message == None: text = html = None elif isinstance(message, (list, tuple)): text, html = message elif message.strip().startswith('<html') and message.strip().endswith('</html>'): text = self.settings.server=='gae' and message or None html = message else: text = message html = None if text != None or html != None: attachment = MIMEMultipart.MIMEMultipart('alternative') if text != None: if isinstance(text, basestring): text = text.decode(encoding).encode('utf-8') else: text = text.read().decode(encoding).encode('utf-8') attachment.attach(MIMEText.MIMEText(text,_charset='utf-8')) if html != None: if isinstance(html, basestring): html = html.decode(encoding).encode('utf-8') else: html = html.read().decode(encoding).encode('utf-8') attachment.attach(MIMEText.MIMEText(html, 'html',_charset='utf-8')) payload_in.attach(attachment) if attachments == None: pass elif isinstance(attachments, (list, tuple)): for attachment in attachments: payload_in.attach(attachment) else: payload_in.attach(attachments) ####################################################### # CIPHER # ####################################################### cipher_type = self.settings.cipher_type sign = self.settings.sign sign_passphrase = self.settings.sign_passphrase encrypt = self.settings.encrypt ####################################################### # GPGME # ####################################################### if cipher_type == 'gpg': if not sign and not encrypt: self.error="No sign and no encrypt is set but cipher type to gpg" return False # need a python-pyme package and gpgme lib from pyme import core, errors from pyme.constants.sig import mode ############################################ # sign # ############################################ if sign: import string core.check_version(None) pin=string.replace(payload_in.as_string(),'\n','\r\n') plain = core.Data(pin) sig = core.Data() c = core.Context() c.set_armor(1) c.signers_clear() # search for signing key for From: for sigkey in c.op_keylist_all(self.settings.sender, 1): if sigkey.can_sign: c.signers_add(sigkey) if not c.signers_enum(0): self.error='No key for signing [%s]' % self.settings.sender return False c.set_passphrase_cb(lambda x,y,z: sign_passphrase) try: # make a signature c.op_sign(plain,sig,mode.DETACH) sig.seek(0,0) # make it part of the email payload=MIMEMultipart.MIMEMultipart('signed', boundary=None, _subparts=None, **dict(micalg="pgp-sha1", protocol="application/pgp-signature")) # insert the origin payload payload.attach(payload_in) # insert the detached signature p=MIMEBase.MIMEBase("application",'pgp-signature') p.set_payload(sig.read()) payload.attach(p) # it's just a trick to handle the no encryption case payload_in=payload except errors.GPGMEError, ex: self.error="GPG error: %s" % ex.getstring() return False ############################################ # encrypt # ############################################ if encrypt: core.check_version(None) plain = core.Data(payload_in.as_string()) cipher = core.Data() c = core.Context() c.set_armor(1) # collect the public keys for encryption recipients=[] rec=to[:] if cc: rec.extend(cc) if bcc: rec.extend(bcc) for addr in rec: c.op_keylist_start(addr,0) r = c.op_keylist_next() if r == None: self.error='No key for [%s]' % addr return False recipients.append(r) try: # make the encryption c.op_encrypt(recipients, 1, plain, cipher) cipher.seek(0,0) # make it a part of the email payload=MIMEMultipart.MIMEMultipart('encrypted', boundary=None, _subparts=None, **dict(protocol="application/pgp-encrypted")) p=MIMEBase.MIMEBase("application",'pgp-encrypted') p.set_payload("Version: 1\r\n") payload.attach(p) p=MIMEBase.MIMEBase("application",'octet-stream') p.set_payload(cipher.read()) payload.attach(p) except errors.GPGMEError, ex: self.error="GPG error: %s" % ex.getstring() return False ####################################################### # X.509 # ####################################################### elif cipher_type == 'x509': if not sign and not encrypt: self.error="No sign and no encrypt is set but cipher type to x509" return False x509_sign_keyfile=self.settings.x509_sign_keyfile if self.settings.x509_sign_certfile: x509_sign_certfile=self.settings.x509_sign_certfile else: # if there is no sign certfile we'll assume the # cert is in keyfile x509_sign_certfile=self.settings.x509_sign_keyfile # crypt certfiles could be a string or a list x509_crypt_certfiles=self.settings.x509_crypt_certfiles # need m2crypto from M2Crypto import BIO, SMIME, X509 msg_bio = BIO.MemoryBuffer(payload_in.as_string()) s = SMIME.SMIME() # SIGN if sign: #key for signing try: s.load_key(x509_sign_keyfile, x509_sign_certfile, callback=lambda x: sign_passphrase) if encrypt: p7 = s.sign(msg_bio) else: p7 = s.sign(msg_bio,flags=SMIME.PKCS7_DETACHED) msg_bio = BIO.MemoryBuffer(payload_in.as_string()) # Recreate coz sign() has consumed it. except Exception,e: self.error="Something went wrong on signing: <%s>" %str(e) return False # ENCRYPT if encrypt: try: sk = X509.X509_Stack() if not isinstance(x509_crypt_certfiles, (list, tuple)): x509_crypt_certfiles = [x509_crypt_certfiles] # make an encryption cert's stack for x in x509_crypt_certfiles: sk.push(X509.load_cert(x)) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp_bio = BIO.MemoryBuffer() if sign: s.write(tmp_bio, p7) else: tmp_bio.write(payload_in.as_string()) p7 = s.encrypt(tmp_bio) except Exception,e: self.error="Something went wrong on encrypting: <%s>" %str(e) return False # Final stage in sign and encryption out = BIO.MemoryBuffer() if encrypt: s.write(out, p7) else: if sign: s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED) else: out.write('\r\n') out.write(payload_in.as_string()) out.close() st=str(out.read()) payload=message_from_string(st) else: # no cryptography process as usual payload=payload_in payload['From'] = encode_header(self.settings.sender.decode(encoding)) origTo = to[:] if to: payload['To'] = encode_header(', '.join(to).decode(encoding)) if reply_to: payload['Reply-To'] = encode_header(reply_to.decode(encoding)) if cc: payload['Cc'] = encode_header(', '.join(cc).decode(encoding)) to.extend(cc) if bcc: to.extend(bcc) payload['Subject'] = encode_header(subject.decode(encoding)) payload['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) result = {} try: if self.settings.server == 'logging': logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\n\n%s\n%s\n' % \ ('-'*40,self.settings.sender, ', '.join(to),text or html,'-'*40)) elif self.settings.server == 'gae': xcc = dict() if cc: xcc['cc'] = cc if bcc: xcc['bcc'] = bcc from google.appengine.api import mail attachments = attachments and [(a.my_filename,a.my_payload) for a in attachments] if attachments: result = mail.send_mail(sender=self.settings.sender, to=origTo, subject=subject, body=text, html=html, attachments=attachments, **xcc) elif html: result = mail.send_mail(sender=self.settings.sender, to=origTo, subject=subject, body=text, html=html, **xcc) else: result = mail.send_mail(sender=self.settings.sender, to=origTo, subject=subject, body=text, **xcc) else: smtp_args = self.settings.server.split(':') if self.settings.ssl: server = smtplib.SMTP_SSL(*smtp_args) else: server = smtplib.SMTP(*smtp_args) if self.settings.tls and not self.settings.ssl: server.ehlo() server.starttls() server.ehlo() if self.settings.login != None: server.login(*self.settings.login.split(':',1)) result = server.sendmail(self.settings.sender, to, payload.as_string()) server.quit() except Exception, e: logger.warn('Mail.send failure:%s' % e) self.result = result self.error = e return False self.result = result self.error = None return True class Recaptcha(DIV): API_SSL_SERVER = 'https://www.google.com/recaptcha/api' API_SERVER = 'http://www.google.com/recaptcha/api' VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify' def __init__( self, request, public_key='', private_key='', use_ssl=False, error=None, error_message='invalid', label = 'Verify:', options = '' ): self.remote_addr = request.env.remote_addr self.public_key = public_key self.private_key = private_key self.use_ssl = use_ssl self.error = error self.errors = Storage() self.error_message = error_message self.components = [] self.attributes = {} self.label = label self.options = options self.comment = '' def _validate(self): # for local testing: recaptcha_challenge_field = \ self.request_vars.recaptcha_challenge_field recaptcha_response_field = \ self.request_vars.recaptcha_response_field private_key = self.private_key remoteip = self.remote_addr if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): self.errors['captcha'] = self.error_message return False params = urllib.urlencode({ 'privatekey': private_key, 'remoteip': remoteip, 'challenge': recaptcha_challenge_field, 'response': recaptcha_response_field, }) request = urllib2.Request( url=self.VERIFY_SERVER, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urllib2.urlopen(request) return_values = httpresp.read().splitlines() httpresp.close() return_code = return_values[0] if return_code == 'true': del self.request_vars.recaptcha_challenge_field del self.request_vars.recaptcha_response_field self.request_vars.captcha = '' return True self.errors['captcha'] = self.error_message return False def xml(self): public_key = self.public_key use_ssl = self.use_ssl error_param = '' if self.error: error_param = '&error=%s' % self.error if use_ssl: server = self.API_SSL_SERVER else: server = self.API_SERVER captcha = DIV( SCRIPT("var RecaptchaOptions = {%s};" % self.options), SCRIPT(_type="text/javascript", _src="%s/challenge?k=%s%s" % (server,public_key,error_param)), TAG.noscript(IFRAME(_src="%s/noscript?k=%s%s" % (server,public_key,error_param), _height="300",_width="500",_frameborder="0"), BR(), INPUT(_type='hidden', _name='recaptcha_response_field', _value='manual_challenge')), _id='recaptcha') if not self.errors.captcha: return XML(captcha).xml() else: captcha.append(DIV(self.errors['captcha'], _class='error')) return XML(captcha).xml() def addrow(form,a,b,c,style,_id,position=-1): if style == "divs": form[0].insert(position, DIV(DIV(LABEL(a),_class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id = _id)) elif style == "table2cols": form[0].insert(position, TR(LABEL(a),'')) form[0].insert(position+1, TR(b, _colspan=2, _id = _id)) elif style == "ul": form[0].insert(position, LI(DIV(LABEL(a),_class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id = _id)) else: form[0].insert(position, TR(LABEL(a),b,c,_id = _id)) class Auth(object): """ Class for authentication, authorization, role based access control. Includes: - registration and profile - login and logout - username and password retrieval - event logging - role creation and assignment - user defined group/role based permission Authentication Example:: from contrib.utils import * mail=Mail() mail.settings.server='smtp.gmail.com:587' mail.settings.sender='you@somewhere.com' mail.settings.login='username:password' auth=Auth(globals(), db) auth.settings.mailer=mail # auth.settings....=... auth.define_tables() def authentication(): return dict(form=auth()) exposes: - http://.../{application}/{controller}/authentication/login - http://.../{application}/{controller}/authentication/logout - http://.../{application}/{controller}/authentication/register - http://.../{application}/{controller}/authentication/verify_email - http://.../{application}/{controller}/authentication/retrieve_username - http://.../{application}/{controller}/authentication/retrieve_password - http://.../{application}/{controller}/authentication/reset_password - http://.../{application}/{controller}/authentication/profile - http://.../{application}/{controller}/authentication/change_password On registration a group with role=new_user.id is created and user is given membership of this group. You can create a group with:: group_id=auth.add_group('Manager', 'can access the manage action') auth.add_permission(group_id, 'access to manage') Here \"access to manage\" is just a user defined string. You can give access to a user:: auth.add_membership(group_id, user_id) If user id is omitted, the logged in user is assumed Then you can decorate any action:: @auth.requires_permission('access to manage') def manage(): return dict() You can restrict a permission to a specific table:: auth.add_permission(group_id, 'edit', db.sometable) @auth.requires_permission('edit', db.sometable) Or to a specific record:: auth.add_permission(group_id, 'edit', db.sometable, 45) @auth.requires_permission('edit', db.sometable, 45) If authorization is not granted calls:: auth.settings.on_failed_authorization Other options:: auth.settings.mailer=None auth.settings.expiration=3600 # seconds ... ### these are messages that can be customized ... """ def url(self, f=None, args=[], vars={}): return URL(c=self.settings.controller,f=f,args=args,vars=vars) def __init__(self, environment=None, db=None, controller='default', cas_provider = None): """ auth=Auth(globals(), db) - environment is there for legacy but unused (awful) - db has to be the database where to create tables for authentication """ ## next two lines for backward compatibility if not db and environment and isinstance(environment,DAL): db = environment self.db = db self.environment = current request = current.request session = current.session auth = session.auth if auth and auth.last_visit and auth.last_visit + \ datetime.timedelta(days=0, seconds=auth.expiration) > request.now: self.user = auth.user # this is a trick to speed up sessions if (request.now - auth.last_visit).seconds > (auth.expiration/10): auth.last_visit = request.now else: self.user = None session.auth = None settings = self.settings = Settings() # ## what happens after login? # ## what happens after registration? settings.hideerror = False settings.cas_domains = [request.env.http_host] settings.cas_provider = cas_provider settings.extra_fields = {} settings.actions_disabled = [] settings.reset_password_requires_verification = False settings.registration_requires_verification = False settings.registration_requires_approval = False settings.alternate_requires_registration = False settings.create_user_groups = True settings.controller = controller settings.login_url = self.url('user', args='login') settings.logged_url = self.url('user', args='profile') settings.download_url = self.url('download') settings.mailer = None settings.login_captcha = None settings.register_captcha = None settings.retrieve_username_captcha = None settings.retrieve_password_captcha = None settings.captcha = None settings.expiration = 3600 # one hour settings.long_expiration = 3600*30*24 # one month settings.remember_me_form = True settings.allow_basic_login = False settings.allow_basic_login_only = False settings.on_failed_authorization = \ self.url('user',args='not_authorized') settings.on_failed_authentication = lambda x: redirect(x) settings.formstyle = 'table3cols' settings.label_separator = ': ' # ## table names to be used settings.password_field = 'password' settings.table_user_name = 'auth_user' settings.table_group_name = 'auth_group' settings.table_membership_name = 'auth_membership' settings.table_permission_name = 'auth_permission' settings.table_event_name = 'auth_event' settings.table_cas_name = 'auth_cas' # ## if none, they will be created settings.table_user = None settings.table_group = None settings.table_membership = None settings.table_permission = None settings.table_event = None settings.table_cas = None # ## settings.showid = False # ## these should be functions or lambdas settings.login_next = self.url('index') settings.login_onvalidation = [] settings.login_onaccept = [] settings.login_methods = [self] settings.login_form = self settings.login_email_validate = True settings.login_userfield = None settings.logout_next = self.url('index') settings.logout_onlogout = None settings.register_next = self.url('index') settings.register_onvalidation = [] settings.register_onaccept = [] settings.register_fields = None settings.verify_email_next = self.url('user', args='login') settings.verify_email_onaccept = [] settings.profile_next = self.url('index') settings.profile_onvalidation = [] settings.profile_onaccept = [] settings.profile_fields = None settings.retrieve_username_next = self.url('index') settings.retrieve_password_next = self.url('index') settings.request_reset_password_next = self.url('user', args='login') settings.reset_password_next = self.url('user', args='login') settings.change_password_next = self.url('index') settings.change_password_onvalidation = [] settings.change_password_onaccept = [] settings.retrieve_password_onvalidation = [] settings.reset_password_onvalidation = [] settings.hmac_key = None settings.lock_keys = True # ## these are messages that can be customized messages = self.messages = Messages(current.T) messages.login_button = 'Login' messages.register_button = 'Register' messages.password_reset_button = 'Request reset password' messages.password_change_button = 'Change password' messages.profile_save_button = 'Save profile' messages.submit_button = 'Submit' messages.verify_password = 'Verify Password' messages.delete_label = 'Check to delete:' messages.function_disabled = 'Function disabled' messages.access_denied = 'Insufficient privileges' messages.registration_verifying = 'Registration needs verification' messages.registration_pending = 'Registration is pending approval' messages.login_disabled = 'Login disabled by administrator' messages.logged_in = 'Logged in' messages.email_sent = 'Email sent' messages.unable_to_send_email = 'Unable to send email' messages.email_verified = 'Email verified' messages.logged_out = 'Logged out' messages.registration_successful = 'Registration successful' messages.invalid_email = 'Invalid email' messages.unable_send_email = 'Unable to send email' messages.invalid_login = 'Invalid login' messages.invalid_user = 'Invalid user' messages.invalid_password = 'Invalid password' messages.is_empty = "Cannot be empty" messages.mismatched_password = "Password fields don't match" messages.verify_email = \ 'Click on the link http://...verify_email/%(key)s to verify your email' messages.verify_email_subject = 'Email verification' messages.username_sent = 'Your username was emailed to you' messages.new_password_sent = 'A new password was emailed to you' messages.password_changed = 'Password changed' messages.retrieve_username = 'Your username is: %(username)s' messages.retrieve_username_subject = 'Username retrieve' messages.retrieve_password = 'Your password is: %(password)s' messages.retrieve_password_subject = 'Password retrieve' messages.reset_password = \ 'Click on the link http://...reset_password/%(key)s to reset your password' messages.reset_password_subject = 'Password reset' messages.invalid_reset_password = 'Invalid reset password' messages.profile_updated = 'Profile updated' messages.new_password = 'New password' messages.old_password = 'Old password' messages.group_description = \ 'Group uniquely assigned to user %(id)s' messages.register_log = 'User %(id)s Registered' messages.login_log = 'User %(id)s Logged-in' messages.login_failed_log = None messages.logout_log = 'User %(id)s Logged-out' messages.profile_log = 'User %(id)s Profile updated' messages.verify_email_log = 'User %(id)s Verification email sent' messages.retrieve_username_log = 'User %(id)s Username retrieved' messages.retrieve_password_log = 'User %(id)s Password retrieved' messages.reset_password_log = 'User %(id)s Password reset' messages.change_password_log = 'User %(id)s Password changed' messages.add_group_log = 'Group %(group_id)s created' messages.del_group_log = 'Group %(group_id)s deleted' messages.add_membership_log = None messages.del_membership_log = None messages.has_membership_log = None messages.add_permission_log = None messages.del_permission_log = None messages.has_permission_log = None messages.impersonate_log = 'User %(id)s is impersonating %(other_id)s' messages.label_first_name = 'First name' messages.label_last_name = 'Last name' messages.label_username = 'Username' messages.label_email = 'E-mail' messages.label_password = 'Password' messages.label_registration_key = 'Registration key' messages.label_reset_password_key = 'Reset Password key' messages.label_registration_id = 'Registration identifier' messages.label_role = 'Role' messages.label_description = 'Description' messages.label_user_id = 'User ID' messages.label_group_id = 'Group ID' messages.label_name = 'Name' messages.label_table_name = 'Table name' messages.label_record_id = 'Record ID' messages.label_time_stamp = 'Timestamp' messages.label_client_ip = 'Client IP' messages.label_origin = 'Origin' messages.label_remember_me = "Remember me (for 30 days)" messages['T'] = current.T messages.verify_password_comment = 'please input your password again' messages.lock_keys = True # for "remember me" option response = current.response if auth and auth.remember: #when user wants to be logged in for longer response.cookies[response.session_id_name]["expires"] = \ auth.expiration def lazy_user (auth = self): return auth.user_id reference_user = 'reference %s' % settings.table_user_name def represent(id,record=None,s=settings): try: user = s.table_user(id) return '%(first_name)s %(last_name)s' % user except: return id self.signature = db.Table(self.db,'auth_signature', Field('is_active','boolean',default=True), Field('created_on','datetime', default=request.now, writable=False,readable=False), Field('created_by', reference_user, default=lazy_user,represent=represent, writable=False,readable=False, ), Field('modified_on','datetime', update=request.now,default=request.now, writable=False,readable=False), Field('modified_by', reference_user,represent=represent, default=lazy_user,update=lazy_user, writable=False,readable=False)) def _get_user_id(self): "accessor for auth.user_id" return self.user and self.user.id or None user_id = property(_get_user_id, doc="user.id or None") def _HTTP(self, *a, **b): """ only used in lambda: self._HTTP(404) """ raise HTTP(*a, **b) def __call__(self): """ usage: def authentication(): return dict(form=auth()) """ request = current.request args = request.args if not args: redirect(self.url(args='login',vars=request.vars)) elif args[0] in self.settings.actions_disabled: raise HTTP(404) if args[0] in ('login','logout','register','verify_email', 'retrieve_username','retrieve_password', 'reset_password','request_reset_password', 'change_password','profile','groups', 'impersonate','not_authorized'): return getattr(self,args[0])() elif args[0]=='cas' and not self.settings.cas_provider: if args(1) == 'login': return self.cas_login(version=2) if args(1) == 'validate': return self.cas_validate(version=2) if args(1) == 'logout': return self.logout() else: raise HTTP(404) def navbar(self,prefix='Welcome',action=None): request = current.request T = current.T if isinstance(prefix,str): prefix = T(prefix) if not action: action=URL(request.application,request.controller,'user') if prefix: prefix = prefix.strip()+' ' if self.user_id: logout=A(T('logout'),_href=action+'/logout') profile=A(T('profile'),_href=action+'/profile') password=A(T('password'),_href=action+'/change_password') bar = SPAN(prefix,self.user.first_name,' [ ', logout, ']',_class='auth_navbar') if not 'profile' in self.settings.actions_disabled: bar.insert(4, ' | ') bar.insert(5, profile) if not 'change_password' in self.settings.actions_disabled: bar.insert(-1, ' | ') bar.insert(-1, password) else: login=A(T('login'),_href=action+'/login') register=A(T('register'),_href=action+'/register') retrieve_username=A(T('forgot username?'), _href=action+'/retrieve_username') lost_password=A(T('lost password?'), _href=action+'/request_reset_password') bar = SPAN('[ ',login,' ]',_class='auth_navbar') if not 'register' in self.settings.actions_disabled: bar.insert(2, ' | ') bar.insert(3, register) if 'username' in self.settings.table_user.fields() and \ not 'retrieve_username' in self.settings.actions_disabled: bar.insert(-1, ' | ') bar.insert(-1, retrieve_username) if not 'request_reset_password' in self.settings.actions_disabled: bar.insert(-1, ' | ') bar.insert(-1, lost_password) return bar def __get_migrate(self, tablename, migrate=True): if type(migrate).__name__ == 'str': return (migrate + tablename + '.table') elif migrate == False: return False else: return True def define_tables(self, username=False, migrate=True, fake_migrate=False): """ to be called unless tables are defined manually usages:: # defines all needed tables and table files # 'myprefix_auth_user.table', ... auth.define_tables(migrate='myprefix_') # defines all needed tables without migration/table files auth.define_tables(migrate=False) """ db = self.db settings = self.settings if not settings.table_user_name in db.tables: passfield = settings.password_field if username or settings.cas_provider: table = db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name), Field('last_name', length=128, default='', label=self.messages.label_last_name), Field('username', length=128, default='', label=self.messages.label_username), Field('email', length=512, default='', label=self.messages.label_email), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *settings.extra_fields.get(settings.table_user_name,[]), **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(username)s')) table.username.requires = (IS_MATCH('[\w\.\-]+'), IS_NOT_IN_DB(db, table.username)) else: table = db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name), Field('last_name', length=128, default='', label=self.messages.label_last_name), Field('email', length=512, default='', label=self.messages.label_email), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), *settings.extra_fields.get(settings.table_user_name,[]), **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(first_name)s %(last_name)s (%(id)s)')) table.first_name.requires = \ IS_NOT_EMPTY(error_message=self.messages.is_empty) table.last_name.requires = \ IS_NOT_EMPTY(error_message=self.messages.is_empty) table[passfield].requires = [CRYPT(key=settings.hmac_key)] table.email.requires = \ [IS_EMAIL(error_message=self.messages.invalid_email), IS_NOT_IN_DB(db, table.email)] table.registration_key.default = '' settings.table_user = db[settings.table_user_name] if not settings.table_group_name in db.tables: table = db.define_table( settings.table_group_name, Field('role', length=512, default='', label=self.messages.label_role), Field('description', 'text', label=self.messages.label_description), *settings.extra_fields.get(settings.table_group_name,[]), **dict( migrate=self.__get_migrate( settings.table_group_name, migrate), fake_migrate=fake_migrate, format = '%(role)s (%(id)s)')) table.role.requires = IS_NOT_IN_DB(db, '%s.role' % settings.table_group_name) settings.table_group = db[settings.table_group_name] if not settings.table_membership_name in db.tables: table = db.define_table( settings.table_membership_name, Field('user_id', settings.table_user, label=self.messages.label_user_id), Field('group_id', settings.table_group, label=self.messages.label_group_id), *settings.extra_fields.get(settings.table_membership_name,[]), **dict( migrate=self.__get_migrate( settings.table_membership_name, migrate), fake_migrate=fake_migrate)) table.user_id.requires = IS_IN_DB(db, '%s.id' % settings.table_user_name, '%(first_name)s %(last_name)s (%(id)s)') table.group_id.requires = IS_IN_DB(db, '%s.id' % settings.table_group_name, '%(role)s (%(id)s)') settings.table_membership = db[settings.table_membership_name] if not settings.table_permission_name in db.tables: table = db.define_table( settings.table_permission_name, Field('group_id', settings.table_group, label=self.messages.label_group_id), Field('name', default='default', length=512, label=self.messages.label_name), Field('table_name', length=512, label=self.messages.label_table_name), Field('record_id', 'integer',default=0, label=self.messages.label_record_id), *settings.extra_fields.get(settings.table_permission_name,[]), **dict( migrate=self.__get_migrate( settings.table_permission_name, migrate), fake_migrate=fake_migrate)) table.group_id.requires = IS_IN_DB(db, '%s.id' % settings.table_group_name, '%(role)s (%(id)s)') table.name.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty) table.table_name.requires = IS_EMPTY_OR(IS_IN_SET(self.db.tables)) table.record_id.requires = IS_INT_IN_RANGE(0, 10 ** 9) settings.table_permission = db[settings.table_permission_name] if not settings.table_event_name in db.tables: table = db.define_table( settings.table_event_name, Field('time_stamp', 'datetime', default=current.request.now, label=self.messages.label_time_stamp), Field('client_ip', default=current.request.client, label=self.messages.label_client_ip), Field('user_id', settings.table_user, default=None, label=self.messages.label_user_id), Field('origin', default='auth', length=512, label=self.messages.label_origin), Field('description', 'text', default='', label=self.messages.label_description), *settings.extra_fields.get(settings.table_event_name,[]), **dict( migrate=self.__get_migrate( settings.table_event_name, migrate), fake_migrate=fake_migrate)) table.user_id.requires = IS_IN_DB(db, '%s.id' % settings.table_user_name, '%(first_name)s %(last_name)s (%(id)s)') table.origin.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty) table.description.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty) settings.table_event = db[settings.table_event_name] now = current.request.now if settings.cas_domains: if not settings.table_cas_name in db.tables: table = db.define_table( settings.table_cas_name, Field('user_id', settings.table_user, default=None, label=self.messages.label_user_id), Field('created_on','datetime',default=now), Field('url',requires=IS_URL()), Field('uuid'), *settings.extra_fields.get(settings.table_cas_name,[]), **dict( migrate=self.__get_migrate( settings.table_event_name, migrate), fake_migrate=fake_migrate)) table.user_id.requires = IS_IN_DB(db, '%s.id' % \ settings.table_user_name, '%(first_name)s %(last_name)s (%(id)s)') settings.table_cas = db[settings.table_cas_name] if settings.cas_provider: settings.actions_disabled = \ ['profile','register','change_password','request_reset_password'] from gluon.contrib.login_methods.cas_auth import CasAuth maps = dict((name,lambda v,n=name:v.get(n,None)) for name in \ settings.table_user.fields if name!='id' \ and settings.table_user[name].readable) maps['registration_id'] = \ lambda v,p=settings.cas_provider:'%s/%s' % (p,v['user']) settings.login_form = CasAuth( casversion = 2, urlbase = settings.cas_provider, actions=['login','validate','logout'], maps=maps) def log_event(self, description, origin='auth'): """ usage:: auth.log_event(description='this happened', origin='auth') """ if self.is_logged_in(): user_id = self.user.id else: user_id = None # user unknown self.settings.table_event.insert(description=description, origin=origin, user_id=user_id) def get_or_create_user(self, keys): """ Used for alternate login methods: If the user exists already then password is updated. If the user doesn't yet exist, then they are created. """ table_user = self.settings.table_user if 'registration_id' in table_user.fields() and \ 'registration_id' in keys: username = 'registration_id' elif 'username' in table_user.fields(): username = 'username' elif 'email' in table_user.fields(): username = 'email' else: raise SyntaxError, "user must have username or email" passfield = self.settings.password_field user = self.db(table_user[username] == keys[username]).select().first() keys['registration_key']='' if user: user.update_record(**table_user._filter_fields(keys)) else: if not 'first_name' in keys and 'first_name' in table_user.fields: keys['first_name'] = keys[username] user_id = table_user.insert(**table_user._filter_fields(keys)) user = self.user = table_user[user_id] if self.settings.create_user_groups: group_id = self.add_group("user_%s" % user_id) self.add_membership(group_id, user_id) return user def basic(self): if not self.settings.allow_basic_login: return False basic = current.request.env.http_authorization if not basic or not basic[:6].lower() == 'basic ': return False (username, password) = base64.b64decode(basic[6:]).split(':') return self.login_bare(username, password) def login_bare(self, username, password): """ logins user """ request = current.request session = current.session table_user = self.settings.table_user if self.settings.login_userfield: userfield = self.settings.login_userfield elif 'username' in table_user.fields: userfield = 'username' else: userfield = 'email' passfield = self.settings.password_field user = self.db(table_user[userfield] == username).select().first() password = table_user[passfield].validate(password)[0] if user: if not user.registration_key and user[passfield] == password: user = Storage(table_user._filter_fields(user, id=True)) session.auth = Storage(user=user, last_visit=request.now, expiration=self.settings.expiration, hmac_key = web2py_uuid()) self.user = user return user return False def cas_login( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, version=2, ): request, session = current.request, current.session db, table = self.db, self.settings.table_cas session._cas_service = request.vars.service or session._cas_service if not request.env.http_host in self.settings.cas_domains or \ not session._cas_service: raise HTTP(403,'not authorized') def allow_access(): row = table(url=session._cas_service,user_id=self.user.id) if row: row.update_record(created_on=request.now) uuid = row.uuid else: uuid = web2py_uuid() table.insert(url=session._cas_service, user_id=self.user.id, uuid=uuid, created_on=request.now) url = session._cas_service del session._cas_service redirect(url+"?ticket="+uuid) if self.is_logged_in(): allow_access() def cas_onaccept(form, onaccept=onaccept): if onaccept!=DEFAULT: onaccept(form) allow_access() return self.login(next,onvalidation,cas_onaccept,log) def cas_validate(self,version=2): request = current.request db, table = self.db, self.settings.table_cas current.response.headers['Content-Type']='text' ticket = table(uuid=request.vars.ticket) url = request.env.path_info.rsplit('/',1)[0] if ticket: # and ticket.created_on>request.now-datetime.timedelta(60): user = self.settings.table_user(ticket.user_id) fullname = user.first_name+' '+user.last_name if version==1: raise HTTP(200,'yes\n%s:%s:%s'%(user.id,user.email,fullname)) # assume version 2 username = user.get('username',user.email) raise HTTP(200,'<?xml version="1.0" encoding="UTF-8"?>\n'+\ TAG['cas:serviceResponse']( TAG['cas:authenticationSuccess']( TAG['cas:user'](username), *[TAG['cas:'+field.name](user[field.name]) \ for field in self.settings.table_user \ if field.readable]), **{'_xmlns:cas':'http://www.yale.edu/tp/cas'}).xml()) if version==1: raise HTTP(200,'no\n') # assume version 2 raise HTTP(200,'<?xml version="1.0" encoding="UTF-8"?>\n'+\ TAG['cas:serviceResponse']( TAG['cas:authenticationFailure']( 'Ticket %s not recognized' % ticket, _code='INVALID TICKET'), **{'_xmlns:cas':'http://www.yale.edu/tp/cas'}).xml()) def login( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a login form .. method:: Auth.login([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user if self.settings.login_userfield: username = self.settings.login_userfield elif 'username' in table_user.fields: username = 'username' else: username = 'email' if 'username' in table_user.fields or not self.settings.login_email_validate: tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty) else: tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email) old_requires = table_user[username].requires table_user[username].requires = tmpvalidator request = current.request response = current.response session = current.session passfield = self.settings.password_field if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.login_next if onvalidation == DEFAULT: onvalidation = self.settings.login_onvalidation if onaccept == DEFAULT: onaccept = self.settings.login_onaccept if log == DEFAULT: log = self.messages.login_log user = None # default # do we use our own login form, or from a central source? if self.settings.login_form == self: form = SQLFORM( table_user, fields=[username, passfield], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.login_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if self.settings.remember_me_form: ## adds a new input checkbox "remember me for longer" addrow(form,XML("&nbsp;"), DIV(XML("&nbsp;"), INPUT(_type='checkbox', _class='checkbox', _id="auth_user_remember", _name="remember", ), XML("&nbsp;&nbsp;"), LABEL( self.messages.label_remember_me, _for="auth_user_remember", )),"", self.settings.formstyle, 'auth_user_remember__row') captcha = self.settings.login_captcha or \ (self.settings.login_captcha!=False and self.settings.captcha) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle,'captcha__row') accepted_form = False if form.accepts(request, session, formname='login', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): accepted_form = True # check for username in db user = self.db(table_user[username] == form.vars[username]).select().first() if user: # user in db, check if registration pending or disabled temp_user = user if temp_user.registration_key == 'pending': response.flash = self.messages.registration_pending return form elif temp_user.registration_key in ('disabled','blocked'): response.flash = self.messages.login_disabled return form elif temp_user.registration_key!=None and \ temp_user.registration_key.strip(): response.flash = \ self.messages.registration_verifying return form # try alternate logins 1st as these have the # current version of the password user = None for login_method in self.settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in self.settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user(form.vars) break if not user: # alternates have failed, maybe because service inaccessible if self.settings.login_methods[0] == self: # try logging in locally using cached credentials if temp_user[passfield] == form.vars.get(passfield, ''): # success user = temp_user else: # user not in db if not self.settings.alternate_requires_registration: # we're allowed to auto-register users from external systems for login_method in self.settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in self.settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user(form.vars) break if not user: if self.settings.login_failed_log: self.log_event(self.settings.login_failed_log % request.post_vars) # invalid login session.flash = self.messages.invalid_login redirect(self.url(args=request.args,vars=request.get_vars)) else: # use a central authentication server cas = self.settings.login_form cas_user = cas.get_user() if cas_user: cas_user[passfield] = None user = self.get_or_create_user(table_user._filter_fields(cas_user)) elif hasattr(cas,'login_form'): return cas.login_form() else: # we need to pass through login again before going on next = self.url('user',args='login',vars=dict(_next=next)) redirect(cas.login_url(next)) # process authenticated users if user: user = Storage(table_user._filter_fields(user, id=True)) if log: self.log_event(log % user) # process authenticated users # user wants to be logged in for longer session.auth = Storage( user = user, last_visit = request.now, expiration = self.settings.long_expiration, remember = request.vars.has_key("remember"), hmac_key = web2py_uuid() ) self.user = user session.flash = self.messages.logged_in # how to continue if self.settings.login_form == self: if accepted_form: callback(onaccept,form) if isinstance(next, (list, tuple)): # fix issue with 2.6 next = next[0] if next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) table_user[username].requires = old_requires return form elif user: callback(onaccept,None) redirect(next) def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT): """ logout and redirects to login .. method:: Auth.logout ([next=DEFAULT[, onlogout=DEFAULT[, log=DEFAULT]]]) """ if next == DEFAULT: next = self.settings.logout_next if onlogout == DEFAULT: onlogout = self.settings.logout_onlogout if onlogout: onlogout(self.user) if log == DEFAULT: log = self.messages.logout_log if log and self.user: self.log_event(log % self.user) if self.settings.login_form != self: cas = self.settings.login_form cas_user = cas.get_user() if cas_user: next = cas.logout_url(next) current.session.auth = None current.session.flash = self.messages.logged_out if next: redirect(next) def register( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a registration form .. method:: Auth.register([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user request = current.request response = current.response session = current.session if self.is_logged_in(): redirect(self.settings.logged_url) if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.register_next if onvalidation == DEFAULT: onvalidation = self.settings.register_onvalidation if onaccept == DEFAULT: onaccept = self.settings.register_onaccept if log == DEFAULT: log = self.messages.register_log passfield = self.settings.password_field formstyle = self.settings.formstyle form = SQLFORM(table_user, fields = self.settings.register_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.register_button, delete_label=self.messages.delete_label, formstyle=formstyle, separator=self.settings.label_separator ) for i, row in enumerate(form[0].components): item = row.element('input',_name=passfield) if item: form.custom.widget.password_two = \ INPUT(_name="password_two", _type="password", requires=IS_EXPR('value==%s' % \ repr(request.vars.get(passfield, None)), error_message=self.messages.mismatched_password)) addrow(form, self.messages.verify_password + ':', form.custom.widget.password_two, self.messages.verify_password_comment, formstyle, '%s_%s__row' % (table_user, 'password_two'), position=i+1) break captcha = self.settings.register_captcha or self.settings.captcha if captcha: addrow(form, captcha.label, captcha, captcha.comment,self.settings.formstyle, 'captcha__row') table_user.registration_key.default = key = web2py_uuid() if form.accepts(request, session, formname='register', onvalidation=onvalidation,hideerror=self.settings.hideerror): description = self.messages.group_description % form.vars if self.settings.create_user_groups: group_id = self.add_group("user_%s" % form.vars.id, description) self.add_membership(group_id, form.vars.id) if self.settings.registration_requires_verification: if not self.settings.mailer or \ not self.settings.mailer.send(to=form.vars.email, subject=self.messages.verify_email_subject, message=self.messages.verify_email % dict(key=key)): self.db.rollback() response.flash = self.messages.unable_send_email return form session.flash = self.messages.email_sent elif self.settings.registration_requires_approval: table_user[form.vars.id] = dict(registration_key='pending') session.flash = self.messages.registration_pending else: table_user[form.vars.id] = dict(registration_key='') session.flash = self.messages.registration_successful table_user = self.settings.table_user if 'username' in table_user.fields: username = 'username' else: username = 'email' user = self.db(table_user[username] == form.vars[username]).select().first() user = Storage(table_user._filter_fields(user, id=True)) session.auth = Storage(user=user, last_visit=request.now, expiration=self.settings.expiration, hmac_key = web2py_uuid()) self.user = user session.flash = self.messages.logged_in if log: self.log_event(log % form.vars) callback(onaccept,form) if not next: next = self.url(args = request.args) elif isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] elif next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) return form def is_logged_in(self): """ checks if the user is logged in and returns True/False. if so user is in auth.user as well as in session.auth.user """ if self.user: return True return False def verify_email( self, next=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ action user to verify the registration email, XXXXXXXXXXXXXXXX .. method:: Auth.verify_email([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ key = current.request.args[-1] table_user = self.settings.table_user user = self.db(table_user.registration_key == key).select().first() if not user: redirect(self.settings.login_url) if self.settings.registration_requires_approval: user.update_record(registration_key = 'pending') current.session.flash = self.messages.registration_pending else: user.update_record(registration_key = '') current.session.flash = self.messages.email_verified if log == DEFAULT: log = self.messages.verify_email_log if next == DEFAULT: next = self.settings.verify_email_next if onaccept == DEFAULT: onaccept = self.settings.verify_email_onaccept if log: self.log_event(log % user) callback(onaccept,user) redirect(next) def retrieve_username( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to retrieve the user username (only if there is a username field) .. method:: Auth.retrieve_username([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user if not 'username' in table_user.fields: raise HTTP(404) request = current.request response = current.response session = current.session captcha = self.settings.retrieve_username_captcha or \ (self.settings.retrieve_username_captcha!=False and self.settings.captcha) if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.retrieve_username_next if onvalidation == DEFAULT: onvalidation = self.settings.retrieve_username_onvalidation if onaccept == DEFAULT: onaccept = self.settings.retrieve_username_onaccept if log == DEFAULT: log = self.messages.retrieve_username_log old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment,self.settings.formstyle, 'captcha__row') if form.accepts(request, session, formname='retrieve_username', dbio=False, onvalidation=onvalidation,hideerror=self.settings.hideerror): user = self.db(table_user.email == form.vars.email).select().first() if not user: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) username = user.username self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_username_subject, message=self.messages.retrieve_username % dict(username=username)) session.flash = self.messages.email_sent if log: self.log_event(log % user) callback(onaccept,form) if not next: next = self.url(args = request.args) elif isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] elif next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) table_user.email.requires = old_requires return form def random_password(self): import string import random password = '' specials=r'!#$*' for i in range(0,3): password += random.choice(string.lowercase) password += random.choice(string.uppercase) password += random.choice(string.digits) password += random.choice(specials) return ''.join(random.sample(password,len(password))) def reset_password_deprecated( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to reset the user password (deprecated) .. method:: Auth.reset_password_deprecated([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user request = current.request response = current.response session = current.session if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.retrieve_password_next if onvalidation == DEFAULT: onvalidation = self.settings.retrieve_password_onvalidation if onaccept == DEFAULT: onaccept = self.settings.retrieve_password_onaccept if log == DEFAULT: log = self.messages.retrieve_password_log old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='retrieve_password', dbio=False, onvalidation=onvalidation,hideerror=self.settings.hideerror): user = self.db(table_user.email == form.vars.email).select().first() if not user: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) elif user.registration_key in ('pending','disabled','blocked'): current.session.flash = \ self.messages.registration_pending redirect(self.url(args=request.args)) password = self.random_password() passfield = self.settings.password_field d = {passfield: table_user[passfield].validate(password)[0], 'registration_key': ''} user.update_record(**d) if self.settings.mailer and \ self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_password_subject, message=self.messages.retrieve_password \ % dict(password=password)): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email if log: self.log_event(log % user) callback(onaccept,form) if not next: next = self.url(args = request.args) elif isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] elif next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) table_user.email.requires = old_requires return form def reset_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to reset the user password .. method:: Auth.reset_password([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user request = current.request # response = current.response session = current.session if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.reset_password_next try: key = request.vars.key or request.args[-1] t0 = int(key.split('-')[0]) if time.time()-t0 > 60*60*24: raise Exception user = self.db(table_user.reset_password_key == key).select().first() if not user: raise Exception except Exception: session.flash = self.messages.invalid_reset_password redirect(next) passfield = self.settings.password_field form = SQLFORM.factory( Field('new_password', 'password', label=self.messages.new_password, requires=self.settings.table_user[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR('value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_reset_button, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request,session,hideerror=self.settings.hideerror): user.update_record(**{passfield:form.vars.new_password, 'registration_key':'', 'reset_password_key':''}) session.flash = self.messages.password_changed redirect(next) return form def request_reset_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to reset the user password .. method:: Auth.reset_password([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user request = current.request response = current.response session = current.session captcha = self.settings.retrieve_password_captcha or \ (self.settings.retrieve_password_captcha!=False and self.settings.captcha) if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.request_reset_password_next if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if onvalidation == DEFAULT: onvalidation = self.settings.reset_password_onvalidation if onaccept == DEFAULT: onaccept = self.settings.reset_password_onaccept if log == DEFAULT: log = self.messages.reset_password_log # old_requires = table_user.email.requires <<< perhaps should be restored table_user.email.requires = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.password_reset_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle,'captcha__row') if form.accepts(request, session, formname='reset_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = self.db(table_user.email == form.vars.email).select().first() if not user: session.flash = self.messages.invalid_email redirect(self.url(args=request.args)) elif user.registration_key in ('pending','disabled','blocked'): session.flash = self.messages.registration_pending redirect(self.url(args=request.args)) reset_password_key = str(int(time.time()))+'-' + web2py_uuid() if self.settings.mailer.send(to=form.vars.email, subject=self.messages.reset_password_subject, message=self.messages.reset_password % \ dict(key=reset_password_key)): session.flash = self.messages.email_sent user.update_record(reset_password_key=reset_password_key) else: session.flash = self.messages.unable_to_send_email if log: self.log_event(log % user) callback(onaccept,form) if not next: next = self.url(args = request.args) elif isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] elif next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) # old_requires = table_user.email.requires return form def retrieve_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): if self.settings.reset_password_requires_verification: return self.request_reset_password(next,onvalidation,onaccept,log) else: return self.reset_password_deprecated(next,onvalidation,onaccept,log) def change_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form that lets the user change password .. method:: Auth.change_password([next=DEFAULT[, onvalidation=DEFAULT[, onaccept=DEFAULT[, log=DEFAULT]]]]) """ if not self.is_logged_in(): redirect(self.settings.login_url) db = self.db table_user = self.settings.table_user usern = self.settings.table_user_name s = db(table_user.id == self.user.id) request = current.request session = current.session if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.change_password_next if onvalidation == DEFAULT: onvalidation = self.settings.change_password_onvalidation if onaccept == DEFAULT: onaccept = self.settings.change_password_onaccept if log == DEFAULT: log = self.messages.change_password_log passfield = self.settings.password_field form = SQLFORM.factory( Field('old_password', 'password', label=self.messages.old_password, requires=validators( table_user[passfield].requires, IS_IN_DB(s, '%s.%s' % (usern, passfield), error_message=self.messages.invalid_password))), Field('new_password', 'password', label=self.messages.new_password, requires=table_user[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR('value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_change_button, formstyle = self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='change_password', onvalidation=onvalidation, hideerror=self.settings.hideerror): d = {passfield: form.vars.new_password} s.update(**d) session.flash = self.messages.password_changed if log: self.log_event(log % self.user) callback(onaccept,form) if not next: next = self.url(args=request.args) elif isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] elif next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) return form def profile( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form that lets the user change his/her profile .. method:: Auth.profile([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.settings.table_user if not self.is_logged_in(): redirect(self.settings.login_url) passfield = self.settings.password_field self.settings.table_user[passfield].writable = False request = current.request session = current.session if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.profile_next if onvalidation == DEFAULT: onvalidation = self.settings.profile_onvalidation if onaccept == DEFAULT: onaccept = self.settings.profile_onaccept if log == DEFAULT: log = self.messages.profile_log form = SQLFORM( table_user, self.user.id, fields = self.settings.profile_fields, hidden = dict(_next=next), showid = self.settings.showid, submit_button = self.messages.profile_save_button, delete_label = self.messages.delete_label, upload = self.settings.download_url, formstyle = self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='profile', onvalidation=onvalidation,hideerror=self.settings.hideerror): self.user.update(table_user._filter_fields(form.vars)) session.flash = self.messages.profile_updated if log: self.log_event(log % self.user) callback(onaccept,form) if not next: next = self.url(args=request.args) elif isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] elif next and not next[0] == '/' and next[:4] != 'http': next = self.url(next.replace('[id]', str(form.vars.id))) redirect(next) return form def is_impersonating(self): return current.session.auth.impersonator def impersonate(self, user_id=DEFAULT): """ usage: POST TO http://..../impersonate request.post_vars.user_id=<id> set request.post_vars.user_id to 0 to restore original user. requires impersonator is logged in and has_permission('impersonate', 'auth_user', user_id) """ request = current.request session = current.session auth = session.auth if not self.is_logged_in(): raise HTTP(401, "Not Authorized") current_id = auth.user.id requested_id = user_id if user_id == DEFAULT: user_id = current.request.post_vars.user_id if user_id and user_id != self.user.id and user_id != '0': if not self.has_permission('impersonate', self.settings.table_user_name, user_id): raise HTTP(403, "Forbidden") user = self.settings.table_user(user_id) if not user: raise HTTP(401, "Not Authorized") auth.impersonator = cPickle.dumps(session) auth.user.update( self.settings.table_user._filter_fields(user, True)) self.user = auth.user if self.settings.login_onaccept: form = Storage(dict(vars=self.user)) self.settings.login_onaccept(form) log = self.messages.impersonate_log if log: self.log_event(log % dict(id=current_id,other_id=auth.user.id)) elif user_id in (0, '0') and self.is_impersonating(): session.clear() session.update(cPickle.loads(auth.impersonator)) self.user = session.auth.user if requested_id == DEFAULT and not request.post_vars: return SQLFORM.factory(Field('user_id','integer')) return self.user def groups(self): """ displays the groups and their roles for the logged in user """ if not self.is_logged_in(): redirect(self.settings.login_url) memberships = self.db(self.settings.table_membership.user_id == self.user.id).select() table = TABLE() for membership in memberships: groups = self.db(self.settings.table_group.id == membership.group_id).select() if groups: group = groups[0] table.append(TR(H3(group.role, '(%s)' % group.id))) table.append(TR(P(group.description))) if not memberships: return None return table def not_authorized(self): """ you can change the view for this page to make it look as you like """ return 'ACCESS DENIED' def requires(self, condition): """ decorator that prevents access to action if not logged in """ def decorator(action): def f(*a, **b): if self.settings.allow_basic_login_only and not self.basic(): if current.request.is_restful: raise HTTP(403,"Not authorized") return call_or_redirect(self.settings.on_failed_authorization) if not condition: if current.request.is_restful: raise HTTP(403,"Not authorized") if not self.basic() and not self.is_logged_in(): request = current.request next = URL(r=request,args=request.args, vars=request.get_vars) current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next='+urllib.quote(next)) else: current.session.flash = self.messages.access_denied return call_or_redirect(self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_login(self): """ decorator that prevents access to action if not logged in """ def decorator(action): def f(*a, **b): if self.settings.allow_basic_login_only and not self.basic(): if current.request.is_restful: raise HTTP(403,"Not authorized") return call_or_redirect(self.settings.on_failed_authorization) if not self.basic() and not self.is_logged_in(): if current.request.is_restful: raise HTTP(403,"Not authorized") request = current.request next = URL(r=request,args=request.args, vars=request.get_vars) current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next='+urllib.quote(next) ) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_membership(self, role=None, group_id=None): """ decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def decorator(action): def f(*a, **b): if self.settings.allow_basic_login_only and not self.basic(): if current.request.is_restful: raise HTTP(403,"Not authorized") return call_or_redirect(self.settings.on_failed_authorization) if not self.basic() and not self.is_logged_in(): if current.request.is_restful: raise HTTP(403,"Not authorized") request = current.request next = URL(r=request,args=request.args, vars=request.get_vars) current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next='+urllib.quote(next) ) if not self.has_membership(group_id=group_id, role=role): current.session.flash = self.messages.access_denied return call_or_redirect(self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_permission( self, name, table_name='', record_id=0, ): """ decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'. """ def decorator(action): def f(*a, **b): if self.settings.allow_basic_login_only and not self.basic(): if current.request.is_restful: raise HTTP(403,"Not authorized") return call_or_redirect(self.settings.on_failed_authorization) if not self.basic() and not self.is_logged_in(): if current.request.is_restful: raise HTTP(403,"Not authorized") request = current.request next = URL(r=request,args=request.args, vars=request.get_vars) current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next='+urllib.quote(next) ) if not self.has_permission(name, table_name, record_id): current.session.flash = self.messages.access_denied return call_or_redirect(self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_signature(self): """ decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def decorator(action): def f(*a, **b): if self.settings.allow_basic_login_only and not self.basic(): if current.request.is_restful: raise HTTP(403,"Not authorized") return call_or_redirect(self.settings.on_failed_authorization) if not self.basic() and not self.is_logged_in(): if current.request.is_restful: raise HTTP(403,"Not authorized") request = current.request next = URL(r=request,args=request.args, vars=request.get_vars) current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next='+urllib.quote(next) ) if not URL.verify(current.request,user_signature=True): current.session.flash = self.messages.access_denied return call_or_redirect(self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def add_group(self, role, description=''): """ creates a group associated to a role """ group_id = self.settings.table_group.insert(role=role, description=description) log = self.messages.add_group_log if log: self.log_event(log % dict(group_id=group_id, role=role)) return group_id def del_group(self, group_id): """ deletes a group """ self.db(self.settings.table_group.id == group_id).delete() self.db(self.settings.table_membership.group_id == group_id).delete() self.db(self.settings.table_permission.group_id == group_id).delete() log = self.messages.del_group_log if log: self.log_event(log % dict(group_id=group_id)) def id_group(self, role): """ returns the group_id of the group specified by the role """ rows = self.db(self.settings.table_group.role == role).select() if not rows: return None return rows[0].id def user_group(self, user_id = None): """ returns the group_id of the group uniquely associated to this user i.e. role=user:[user_id] """ if not user_id and self.user: user_id = self.user.id role = 'user_%s' % user_id return self.id_group(role) def has_membership(self, group_id=None, user_id=None, role=None): """ checks if user is member of group_id or role """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.settings.table_membership if self.db((membership.user_id == user_id) & (membership.group_id == group_id)).select(): r = True else: r = False log = self.messages.has_membership_log if log: self.log_event(log % dict(user_id=user_id, group_id=group_id, check=r)) return r def add_membership(self, group_id=None, user_id=None, role=None): """ gives user_id membership of group_id or role if user_id==None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.settings.table_membership record = membership(user_id = user_id,group_id = group_id) if record: return record.id else: id = membership.insert(group_id=group_id, user_id=user_id) log = self.messages.add_membership_log if log: self.log_event(log % dict(user_id=user_id, group_id=group_id)) return id def del_membership(self, group_id, user_id=None, role=None): """ revokes membership from group_id to user_id if user_id==None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) if not user_id and self.user: user_id = self.user.id membership = self.settings.table_membership log = self.messages.del_membership_log if log: self.log_event(log % dict(user_id=user_id, group_id=group_id)) return self.db(membership.user_id == user_id)(membership.group_id == group_id).delete() def has_permission( self, name='any', table_name='', record_id=0, user_id=None, group_id=None, ): """ checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission """ if not user_id and not group_id and self.user: user_id = self.user.id if user_id: membership = self.settings.table_membership rows = self.db(membership.user_id == user_id).select(membership.group_id) groups = set([row.group_id for row in rows]) if group_id and not group_id in groups: return False else: groups = set([group_id]) permission = self.settings.table_permission rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == record_id).select(permission.group_id) groups_required = set([row.group_id for row in rows]) if record_id: rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == 0).select(permission.group_id) groups_required = groups_required.union(set([row.group_id for row in rows])) if groups.intersection(groups_required): r = True else: r = False log = self.messages.has_permission_log if log and user_id: self.log_event(log % dict(user_id=user_id, name=name, table_name=table_name, record_id=record_id)) return r def add_permission( self, group_id, name='any', table_name='', record_id=0, ): """ gives group_id 'name' access to 'table_name' and 'record_id' """ permission = self.settings.table_permission if group_id == 0: group_id = self.user_group() id = permission.insert(group_id=group_id, name=name, table_name=str(table_name), record_id=long(record_id)) log = self.messages.add_permission_log if log: self.log_event(log % dict(permission_id=id, group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return id def del_permission( self, group_id, name='any', table_name='', record_id=0, ): """ revokes group_id 'name' access to 'table_name' and 'record_id' """ permission = self.settings.table_permission log = self.messages.del_permission_log if log: self.log_event(log % dict(group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == long(record_id)).delete() def accessible_query(self, name, table, user_id=None): """ returns a query with all accessible records for user_id or the current logged in user this method does not work on GAE because uses JOIN and IN example:: db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL) """ if not user_id: user_id = self.user.id if self.has_permission(name, table, 0, user_id): return table.id > 0 db = self.db membership = self.settings.table_membership permission = self.settings.table_permission return table.id.belongs(db(membership.user_id == user_id)\ (membership.group_id == permission.group_id)\ (permission.name == name)\ (permission.table_name == table)\ ._select(permission.record_id)) class Crud(object): def url(self, f=None, args=[], vars={}): """ this should point to the controller that exposes download and crud """ return URL(c=self.settings.controller,f=f,args=args,vars=vars) def __init__(self, environment, db=None, controller='default'): self.db = db if not db and environment and isinstance(environment,DAL): self.db = environment elif not db: raise SyntaxError, "must pass db as first or second argument" self.environment = current settings = self.settings = Settings() settings.auth = None settings.logger = None settings.create_next = None settings.update_next = None settings.controller = controller settings.delete_next = self.url() settings.download_url = self.url('download') settings.create_onvalidation = StorageList() settings.update_onvalidation = StorageList() settings.delete_onvalidation = StorageList() settings.create_onaccept = StorageList() settings.update_onaccept = StorageList() settings.update_ondelete = StorageList() settings.delete_onaccept = StorageList() settings.update_deletable = True settings.showid = False settings.keepvalues = False settings.create_captcha = None settings.update_captcha = None settings.captcha = None settings.formstyle = 'table3cols' settings.label_separator = ': ' settings.hideerror = False settings.detect_record_change = True settings.hmac_key = None settings.lock_keys = True messages = self.messages = Messages(current.T) messages.submit_button = 'Submit' messages.delete_label = 'Check to delete:' messages.record_created = 'Record Created' messages.record_updated = 'Record Updated' messages.record_deleted = 'Record Deleted' messages.update_log = 'Record %(id)s updated' messages.create_log = 'Record %(id)s created' messages.read_log = 'Record %(id)s read' messages.delete_log = 'Record %(id)s deleted' messages.lock_keys = True def __call__(self): args = current.request.args if len(args) < 1: raise HTTP(404) elif args[0] == 'tables': return self.tables() elif len(args) > 1 and not args(1) in self.db.tables: raise HTTP(404) table = self.db[args(1)] if args[0] == 'create': return self.create(table) elif args[0] == 'select': return self.select(table,linkto=self.url(args='read')) elif args[0] == 'search': form, rows = self.search(table,linkto=self.url(args='read')) return DIV(form,SQLTABLE(rows)) elif args[0] == 'read': return self.read(table, args(2)) elif args[0] == 'update': return self.update(table, args(2)) elif args[0] == 'delete': return self.delete(table, args(2)) else: raise HTTP(404) def log_event(self, message): if self.settings.logger: self.settings.logger.log_event(message, 'crud') def has_permission(self, name, table, record=0): if not self.settings.auth: return True try: record_id = record.id except: record_id = record return self.settings.auth.has_permission(name, str(table), record_id) def tables(self): return TABLE(*[TR(A(name, _href=self.url(args=('select',name)))) \ for name in self.db.tables]) @staticmethod def archive(form,archive_table=None,current_record='current_record'): """ If you have a table (db.mytable) that needs full revision history you can just do:: form=crud.update(db.mytable,myrecord,onaccept=crud.archive) crud.archive will define a new table "mytable_archive" and store the previous record in the newly created table including a reference to the current record. If you want to access such table you need to define it yourself in a model:: db.define_table('mytable_archive', Field('current_record',db.mytable), db.mytable) Notice such table includes all fields of db.mytable plus one: current_record. crud.archive does not timestamp the stored record unless your original table has a fields like:: db.define_table(..., Field('saved_on','datetime', default=request.now,update=request.now,writable=False), Field('saved_by',auth.user, default=auth.user_id,update=auth.user_id,writable=False), there is nothing special about these fields since they are filled before the record is archived. If you want to change the archive table name and the name of the reference field you can do, for example:: db.define_table('myhistory', Field('parent_record',db.mytable), db.mytable) and use it as:: form=crud.update(db.mytable,myrecord, onaccept=lambda form:crud.archive(form, archive_table=db.myhistory, current_record='parent_record')) """ old_record = form.record if not old_record: return None table = form.table if not archive_table: archive_table_name = '%s_archive' % table if archive_table_name in table._db: archive_table = table._db[archive_table_name] else: archive_table = table._db.define_table(archive_table_name, Field(current_record,table), table) new_record = {current_record:old_record.id} for fieldname in archive_table.fields: if not fieldname in ['id',current_record] and fieldname in old_record: new_record[fieldname]=old_record[fieldname] id = archive_table.insert(**new_record) return id def update( self, table, record, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, ondelete=DEFAULT, log=DEFAULT, message=DEFAULT, deletable=DEFAULT, formname=DEFAULT, ): """ .. method:: Crud.update(table, record, [next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT [, message=DEFAULT[, deletable=DEFAULT]]]]]]) """ if not (isinstance(table, self.db.Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, self.db.Table): table = self.db[table] try: record_id = record.id except: record_id = record or 0 if record_id and not self.has_permission('update', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) if not record_id \ and not self.has_permission('create', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request response = current.response session = current.session if request.extension == 'json' and request.vars.json: request.vars.update(simplejson.loads(request.vars.json)) if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.update_next if onvalidation == DEFAULT: onvalidation = self.settings.update_onvalidation if onaccept == DEFAULT: onaccept = self.settings.update_onaccept if ondelete == DEFAULT: ondelete = self.settings.update_ondelete if log == DEFAULT: log = self.messages.update_log if deletable == DEFAULT: deletable = self.settings.update_deletable if message == DEFAULT: message = self.messages.record_updated form = SQLFORM( table, record, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, deletable=deletable, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) self.accepted = False self.deleted = False captcha = self.settings.update_captcha or \ self.settings.captcha if record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle,'captcha__row') captcha = self.settings.create_captcha or \ self.settings.captcha if not record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle,'captcha__row') if not request.extension in ('html','load'): (_session, _formname) = (None, None) else: (_session, _formname) = \ (session, '%s/%s' % (table._tablename, form.record_id)) if formname!=DEFAULT: _formname = formname keepvalues = self.settings.keepvalues if request.vars.delete_this_record: keepvalues = False if isinstance(onvalidation,StorageList): onvalidation=onvalidation.get(table._tablename, []) if form.accepts(request, _session, formname=_formname, onvalidation=onvalidation, keepvalues=keepvalues, hideerror=self.settings.hideerror, detect_record_change = self.settings.detect_record_change): self.accepted = True response.flash = message if log: self.log_event(log % form.vars) if request.vars.delete_this_record: self.deleted = True message = self.messages.record_deleted callback(ondelete,form,table._tablename) response.flash = message callback(onaccept,form,table._tablename) if not request.extension in ('html','load'): raise HTTP(200, 'RECORD CREATED/UPDATED') if isinstance(next, (list, tuple)): ### fix issue with 2.6 next = next[0] if next: # Only redirect when explicit if next[0] != '/' and next[:4] != 'http': next = URL(r=request, f=next.replace('[id]', str(form.vars.id))) session.flash = response.flash redirect(next) elif not request.extension in ('html','load'): raise HTTP(401) return form def create( self, table, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, message=DEFAULT, formname=DEFAULT, ): """ .. method:: Crud.create(table, [next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT[, message=DEFAULT]]]]]) """ if next == DEFAULT: next = self.settings.create_next if onvalidation == DEFAULT: onvalidation = self.settings.create_onvalidation if onaccept == DEFAULT: onaccept = self.settings.create_onaccept if log == DEFAULT: log = self.messages.create_log if message == DEFAULT: message = self.messages.record_created return self.update( table, None, next=next, onvalidation=onvalidation, onaccept=onaccept, log=log, message=message, deletable=False, formname=formname, ) def read(self, table, record): if not (isinstance(table, self.db.Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, self.db.Table): table = self.db[table] if not self.has_permission('read', table, record): redirect(self.settings.auth.settings.on_failed_authorization) form = SQLFORM( table, record, readonly=True, comments=False, upload=self.settings.download_url, showid=self.settings.showid, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if not current.request.extension in ('html','load'): return table._filter_fields(form.record, id=True) return form def delete( self, table, record_id, next=DEFAULT, message=DEFAULT, ): """ .. method:: Crud.delete(table, record_id, [next=DEFAULT [, message=DEFAULT]]) """ if not (isinstance(table, self.db.Table) or table in self.db.tables) \ or not str(record_id).isdigit(): raise HTTP(404) if not isinstance(table, self.db.Table): table = self.db[table] if not self.has_permission('delete', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request session = current.session if next == DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.delete_next if message == DEFAULT: message = self.messages.record_deleted record = table[record_id] if record: callback(self.settings.delete_onvalidation,record) del table[record_id] callback(self.settings.delete_onaccept,record,table._tablename) session.flash = message if next: # Only redirect when explicit redirect(next) def rows( self, table, query=None, fields=None, orderby=None, limitby=None, ): request = current.request if not (isinstance(table, self.db.Table) or table in self.db.tables): raise HTTP(404) if not self.has_permission('select', table): redirect(self.settings.auth.settings.on_failed_authorization) #if record_id and not self.has_permission('select', table): # redirect(self.settings.auth.settings.on_failed_authorization) if not isinstance(table, self.db.Table): table = self.db[table] if not query: query = table.id > 0 if not fields: fields = [field for field in table if field.readable] rows = self.db(query).select(*fields,**dict(orderby=orderby, limitby=limitby)) return rows def select( self, table, query=None, fields=None, orderby=None, limitby=None, headers={}, **attr ): rows = self.rows(table,query,fields,orderby,limitby) if not rows: return None # Nicer than an empty table. if not 'upload' in attr: attr['upload'] = self.url('download') if not current.request.extension in ('html','load'): return rows.as_list() if not headers: if isinstance(table,str): table = self.db[table] headers = dict((str(k),k.label) for k in table) return SQLTABLE(rows,headers=headers,**attr) def get_format(self, field): rtable = field._db[field.type[10:]] format = rtable.get('_format', None) if format and isinstance(format, str): return format[2:-2] return field.name def get_query(self, field, op, value, refsearch=False): try: if refsearch: format = self.get_format(field) if op == 'equals': if not refsearch: return field == value else: return lambda row: row[field.name][format] == value elif op == 'not equal': if not refsearch: return field != value else: return lambda row: row[field.name][format] != value elif op == 'greater than': if not refsearch: return field > value else: return lambda row: row[field.name][format] > value elif op == 'less than': if not refsearch: return field < value else: return lambda row: row[field.name][format] < value elif op == 'starts with': if not refsearch: return field.like(value+'%') else: return lambda row: str(row[field.name][format]).startswith(value) elif op == 'ends with': if not refsearch: return field.like('%'+value) else: return lambda row: str(row[field.name][format]).endswith(value) elif op == 'contains': if not refsearch: return field.like('%'+value+'%') else: return lambda row: value in row[field.name][format] except: return None def search(self, *tables, **args): """ Creates a search form and its results for a table Example usage: form, results = crud.search(db.test, queries = ['equals', 'not equal', 'contains'], query_labels={'equals':'Equals', 'not equal':'Not equal'}, fields = ['id','children'], field_labels = {'id':'ID','children':'Children'}, zero='Please choose', query = (db.test.id > 0)&(db.test.id != 3) ) """ table = tables[0] fields = args.get('fields', table.fields) request = current.request db = self.db if not (isinstance(table, db.Table) or table in db.tables): raise HTTP(404) attributes = {} for key in ('orderby','groupby','left','distinct','limitby','cache'): if key in args: attributes[key]=args[key] tbl = TABLE() selected = []; refsearch = []; results = [] ops = args.get('queries', []) zero = args.get('zero', '') if not ops: ops = ['equals', 'not equal', 'greater than', 'less than', 'starts with', 'ends with', 'contains'] ops.insert(0,zero) query_labels = args.get('query_labels', {}) query = args.get('query',table.id > 0) field_labels = args.get('field_labels',{}) for field in fields: field = table[field] if not field.readable: continue fieldname = field.name chkval = request.vars.get('chk' + fieldname, None) txtval = request.vars.get('txt' + fieldname, None) opval = request.vars.get('op' + fieldname, None) row = TR(TD(INPUT(_type = "checkbox", _name = "chk" + fieldname, _disabled = (field.type == 'id'), value = (field.type == 'id' or chkval == 'on'))), TD(field_labels.get(fieldname,field.label)), TD(SELECT([OPTION(query_labels.get(op,op), _value=op) for op in ops], _name = "op" + fieldname, value = opval)), TD(INPUT(_type = "text", _name = "txt" + fieldname, _value = txtval, _id='txt' + fieldname, _class = str(field.type)))) tbl.append(row) if request.post_vars and (chkval or field.type=='id'): if txtval and opval != '': if field.type[0:10] == 'reference ': refsearch.append(self.get_query(field, opval, txtval, refsearch=True)) else: value, error = field.validate(txtval) if not error: ### TODO deal with 'starts with', 'ends with', 'contains' on GAE query &= self.get_query(field, opval, value) else: row[3].append(DIV(error,_class='error')) selected.append(field) form = FORM(tbl,INPUT(_type="submit")) if selected: try: results = db(query).select(*selected,**attributes) for r in refsearch: results = results.find(r) except: # hmmm, we should do better here results = None return form, results urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor())) def fetch(url, data=None, headers={}, cookie=Cookie.SimpleCookie(), user_agent='Mozilla/5.0'): if data != None: data = urllib.urlencode(data) if user_agent: headers['User-agent'] = user_agent headers['Cookie'] = ' '.join(['%s=%s;'%(c.key,c.value) for c in cookie.values()]) try: from google.appengine.api import urlfetch except ImportError: req = urllib2.Request(url, data, headers) html = urllib2.urlopen(req).read() else: method = ((data==None) and urlfetch.GET) or urlfetch.POST while url is not None: response = urlfetch.fetch(url=url, payload=data, method=method, headers=headers, allow_truncated=False,follow_redirects=False, deadline=10) # next request will be a get, so no need to send the data again data = None method = urlfetch.GET # load cookies from the response cookie.load(response.headers.get('set-cookie', '')) url = response.headers.get('location') html = response.content return html regex_geocode = \ re.compile('\<coordinates\>(?P<la>[^,]*),(?P<lo>[^,]*).*?\</coordinates\>') def geocode(address): try: a = urllib.quote(address) txt = fetch('http://maps.google.com/maps/geo?q=%s&output=xml' % a) item = regex_geocode.search(txt) (la, lo) = (float(item.group('la')), float(item.group('lo'))) return (la, lo) except: return (0.0, 0.0) def universal_caller(f, *a, **b): c = f.func_code.co_argcount n = f.func_code.co_varnames[:c] defaults = f.func_defaults or [] pos_args = n[0:-len(defaults)] named_args = n[-len(defaults):] arg_dict = {} # Fill the arg_dict with name and value for the submitted, positional values for pos_index, pos_val in enumerate(a[:c]): arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument # There might be pos_args left, that are sent as named_values. Gather them as well. # If a argument already is populated with values we simply replaces them. for arg_name in pos_args[len(arg_dict):]: if b.has_key(arg_name): arg_dict[arg_name] = b[arg_name] if len(arg_dict) >= len(pos_args): # All the positional arguments is found. The function may now be called. # However, we need to update the arg_dict with the values from the named arguments as well. for arg_name in named_args: if b.has_key(arg_name): arg_dict[arg_name] = b[arg_name] return f(**arg_dict) # Raise an error, the function cannot be called. raise HTTP(404, "Object does not exist") class Service(object): def __init__(self, environment=None): self.run_procedures = {} self.csv_procedures = {} self.xml_procedures = {} self.rss_procedures = {} self.json_procedures = {} self.jsonrpc_procedures = {} self.xmlrpc_procedures = {} self.amfrpc_procedures = {} self.amfrpc3_procedures = {} self.soap_procedures = {} def run(self, f): """ example:: service = Service(globals()) @service.run def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/run/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def csv(self, f): """ example:: service = Service(globals()) @service.csv def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/csv/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def xml(self, f): """ example:: service = Service(globals()) @service.xml def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/xml/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def rss(self, f): """ example:: service = Service(globals()) @service.rss def myfunction(): return dict(title=..., link=..., description=..., created_on=..., entries=[dict(title=..., link=..., description=..., created_on=...]) def call(): return service() Then call it with:: wget http://..../app/default/call/rss/myfunction """ self.rss_procedures[f.__name__] = f return f def json(self, f): """ example:: service = Service(globals()) @service.json def myfunction(a, b): return [{a: b}] def call(): return service() Then call it with:: wget http://..../app/default/call/json/myfunction?a=hello&b=world """ self.json_procedures[f.__name__] = f return f def jsonrpc(self, f): """ example:: service = Service(globals()) @service.jsonrpc def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world """ self.jsonrpc_procedures[f.__name__] = f return f def xmlrpc(self, f): """ example:: service = Service(globals()) @service.xmlrpc def myfunction(a, b): return a + b def call(): return service() The call it with:: wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world """ self.xmlrpc_procedures[f.__name__] = f return f def amfrpc(self, f): """ example:: service = Service(globals()) @service.amfrpc def myfunction(a, b): return a + b def call(): return service() The call it with:: wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world """ self.amfrpc_procedures[f.__name__] = f return f def amfrpc3(self, domain='default'): """ example:: service = Service(globals()) @service.amfrpc3('domain') def myfunction(a, b): return a + b def call(): return service() The call it with:: wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world """ if not isinstance(domain, str): raise SyntaxError, "AMF3 requires a domain for function" def _amfrpc3(f): if domain: self.amfrpc3_procedures[domain+'.'+f.__name__] = f else: self.amfrpc3_procedures[f.__name__] = f return f return _amfrpc3 def soap(self, name=None, returns=None, args=None,doc=None): """ example:: service = Service(globals()) @service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,}) def myfunction(a, b): return a + b def call(): return service() The call it with:: from gluon.contrib.pysimplesoap.client import SoapClient client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL") response = client.MyFunction(a=1,b=2) return response['result'] Exposes online generated documentation and xml example messages at: - http://..../app/default/call/soap """ def _soap(f): self.soap_procedures[name or f.__name__] = f, returns, args, doc return f return _soap def serve_run(self, args=None): request = current.request if not args: args = request.args if args and args[0] in self.run_procedures: return str(universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars))) self.error() def serve_csv(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/x-csv' if not args: args = request.args def none_exception(value): if isinstance(value, unicode): return value.encode('utf8') if hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') if value == None: return '<NULL>' return value if args and args[0] in self.run_procedures: r = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) s = cStringIO.StringIO() if hasattr(r, 'export_to_csv_file'): r.export_to_csv_file(s) elif r and isinstance(r[0], (dict, Storage)): import csv writer = csv.writer(s) writer.writerow(r[0].keys()) for line in r: writer.writerow([none_exception(v) \ for v in line.values()]) else: import csv writer = csv.writer(s) for line in r: writer.writerow(line) return s.getvalue() self.error() def serve_xml(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/xml' if not args: args = request.args if args and args[0] in self.run_procedures: s = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) if hasattr(s, 'as_list'): s = s.as_list() return serializers.xml(s) self.error() def serve_rss(self, args=None): request = current.request response = current.response if not args: args = request.args if args and args[0] in self.rss_procedures: feed = universal_caller(self.rss_procedures[args[0]], *args[1:], **dict(request.vars)) else: self.error() response.headers['Content-Type'] = 'application/rss+xml' return serializers.rss(feed) def serve_json(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/x-json' if not args: args = request.args d = dict(request.vars) if args and args[0] in self.json_procedures: s = universal_caller(self.json_procedures[args[0]],*args[1:],**d) if hasattr(s, 'as_list'): s = s.as_list() return response.json(s) self.error() class JsonRpcException(Exception): def __init__(self,code,info): self.code,self.info = code,info def serve_jsonrpc(self): import contrib.simplejson as simplejson def return_response(id, result): return serializers.json({'version': '1.1', 'id': id, 'result': result, 'error': None}) def return_error(id, code, message): return serializers.json({'id': id, 'version': '1.1', 'error': {'name': 'JSONRPCError', 'code': code, 'message': message} }) request = current.request methods = self.jsonrpc_procedures data = simplejson.loads(request.body.read()) id, method, params = data['id'], data['method'], data.get('params','') if not method in methods: return return_error(id, 100, 'method "%s" does not exist' % method) try: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() return return_response(id, s) except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except BaseException: etype, eval, etb = sys.exc_info() return return_error(id, 100, '%s: %s' % (etype.__name__, eval)) except: etype, eval, etb = sys.exc_info() return return_error(id, 100, 'Exception %s: %s' % (etype, eval)) def serve_xmlrpc(self): request = current.request response = current.response services = self.xmlrpc_procedures.values() return response.xmlrpc(request, services) def serve_amfrpc(self, version=0): try: import pyamf import pyamf.remoting.gateway except: return "pyamf not installed or not in Python sys.path" request = current.request response = current.response if version == 3: services = self.amfrpc3_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) pyamf_request = pyamf.remoting.decode(request.body) else: services = self.amfrpc_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) context = pyamf.get_context(pyamf.AMF0) pyamf_request = pyamf.remoting.decode(request.body, context) pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion) for name, message in pyamf_request: pyamf_response[name] = base_gateway.getProcessor(message)(message) response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE if version==3: return pyamf.remoting.encode(pyamf_response).getvalue() else: return pyamf.remoting.encode(pyamf_response, context).getvalue() def serve_soap(self, version="1.1"): try: from contrib.pysimplesoap.server import SoapDispatcher except: return "pysimplesoap not installed in contrib" request = current.request response = current.response procedures = self.soap_procedures location = "%s://%s%s" % ( request.env.wsgi_url_scheme, request.env.http_host, URL(r=request,f="call/soap",vars={})) namespace = 'namespace' in response and response.namespace or location documentation = response.description or '' dispatcher = SoapDispatcher( name = response.title, location = location, action = location, # SOAPAction namespace = namespace, prefix='pys', documentation = documentation, ns = True) for method, (function, returns, args, doc) in procedures.items(): dispatcher.register_function(method, function, returns, args, doc) if request.env.request_method == 'POST': # Process normal Soap Operation response.headers['Content-Type'] = 'text/xml' return dispatcher.dispatch(request.body.read()) elif 'WSDL' in request.vars: # Return Web Service Description response.headers['Content-Type'] = 'text/xml' return dispatcher.wsdl() elif 'op' in request.vars: # Return method help webpage response.headers['Content-Type'] = 'text/html' method = request.vars['op'] sample_req_xml, sample_res_xml, doc = dispatcher.help(method) body = [H1("Welcome to Web2Py SOAP webservice gateway"), A("See all webservice operations", _href=URL(r=request,f="call/soap",vars={})), H2(method), P(doc), UL(LI("Location: %s" % dispatcher.location), LI("Namespace: %s" % dispatcher.namespace), LI("SoapAction: %s" % dispatcher.action), ), H3("Sample SOAP XML Request Message:"), CODE(sample_req_xml,language="xml"), H3("Sample SOAP XML Response Message:"), CODE(sample_res_xml,language="xml"), ] return {'body': body} else: # Return general help and method list webpage response.headers['Content-Type'] = 'text/html' body = [H1("Welcome to Web2Py SOAP webservice gateway"), P(response.description), P("The following operations are available"), A("See WSDL for webservice description", _href=URL(r=request,f="call/soap",vars={"WSDL":None})), UL([LI(A("%s: %s" % (method, doc or ''), _href=URL(r=request,f="call/soap",vars={'op': method}))) for method, doc in dispatcher.list_methods()]), ] return {'body': body} def __call__(self): """ register services with: service = Service(globals()) @service.run @service.rss @service.json @service.jsonrpc @service.xmlrpc @service.jsonrpc @service.amfrpc @service.amfrpc3('domain') @service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,}) expose services with def call(): return service() call services with http://..../app/default/call/run?[parameters] http://..../app/default/call/rss?[parameters] http://..../app/default/call/json?[parameters] http://..../app/default/call/jsonrpc http://..../app/default/call/xmlrpc http://..../app/default/call/amfrpc http://..../app/default/call/amfrpc3 http://..../app/default/call/soap """ request = current.request if len(request.args) < 1: raise HTTP(404, "Not Found") arg0 = request.args(0) if arg0 == 'run': return self.serve_run(request.args[1:]) elif arg0 == 'rss': return self.serve_rss(request.args[1:]) elif arg0 == 'csv': return self.serve_csv(request.args[1:]) elif arg0 == 'xml': return self.serve_xml(request.args[1:]) elif arg0 == 'json': return self.serve_json(request.args[1:]) elif arg0 == 'jsonrpc': return self.serve_jsonrpc() elif arg0 == 'xmlrpc': return self.serve_xmlrpc() elif arg0 == 'amfrpc': return self.serve_amfrpc() elif arg0 == 'amfrpc3': return self.serve_amfrpc(3) elif arg0 == 'soap': return self.serve_soap() else: self.error() def error(self): raise HTTP(404, "Object does not exist") def completion(callback): """ Executes a task on completion of the called action. For example: from gluon.tools import completion @completion(lambda d: logging.info(repr(d))) def index(): return dict(message='hello') It logs the output of the function every time input is called. The argument of completion is executed in a new thread. """ def _completion(f): def __completion(*a,**b): d = None try: d = f(*a,**b) return d finally: thread.start_new_thread(callback,(d,)) return __completion return _completion def prettydate(d,T=lambda x:x): try: dt = datetime.datetime.now() - d except: return '' if dt.days >= 2*365: return T('%d years ago') % int(dt.days / 365) elif dt.days >= 365: return T('1 year ago') elif dt.days >= 60: return T('%d months ago') % int(dt.days / 30) elif dt.days > 21: return T('1 month ago') elif dt.days >= 14: return T('%d weeks ago') % int(dt.days / 7) elif dt.days >= 7: return T('1 week ago') elif dt.days > 1: return T('%d days ago') % dt.days elif dt.days == 1: return T('1 day ago') elif dt.seconds >= 2*60*60: return T('%d hours ago') % int(dt.seconds / 3600) elif dt.seconds >= 60*60: return T('1 hour ago') elif dt.seconds >= 2*60: return T('%d minutes ago') % int(dt.seconds / 60) elif dt.seconds >= 60: return T('1 minute ago') elif dt.seconds > 1: return T('%d seconds ago') % dt.seconds elif dt.seconds == 1: return T('1 second ago') else: return T('now') def test_thread_separation(): def f(): c=PluginManager() lock1.acquire() lock2.acquire() c.x=7 lock1.release() lock2.release() lock1=thread.allocate_lock() lock2=thread.allocate_lock() lock1.acquire() thread.start_new_thread(f,()) a=PluginManager() a.x=5 lock1.release() lock2.acquire() return a.x class PluginManager(object): """ Plugin Manager is similar to a storage object but it is a single level singleton this means that multiple instances within the same thread share the same attributes Its constructor is also special. The first argument is the name of the plugin you are defining. The named arguments are parameters needed by the plugin with default values. If the parameters were previous defined, the old values are used. For example: ### in some general configuration file: >>> plugins = PluginManager() >>> plugins.me.param1=3 ### within the plugin model >>> _ = PluginManager('me',param1=5,param2=6,param3=7) ### where the plugin is used >>> print plugins.me.param1 3 >>> print plugins.me.param2 6 >>> plugins.me.param3 = 8 >>> print plugins.me.param3 8 Here are some tests: >>> a=PluginManager() >>> a.x=6 >>> b=PluginManager('check') >>> print b.x 6 >>> b=PluginManager() # reset settings >>> print b.x <Storage {}> >>> b.x=7 >>> print a.x 7 >>> a.y.z=8 >>> print b.y.z 8 >>> test_thread_separation() 5 >>> plugins=PluginManager('me',db='mydb') >>> print plugins.me.db mydb >>> print 'me' in plugins True >>> print plugins.me.installed True """ instances = {} def __new__(cls,*a,**b): id = thread.get_ident() lock = thread.allocate_lock() try: lock.acquire() try: return cls.instances[id] except KeyError: instance = object.__new__(cls,*a,**b) cls.instances[id] = instance return instance finally: lock.release() def __init__(self,plugin=None,**defaults): if not plugin: self.__dict__.clear() settings = self.__getattr__(plugin) settings.installed = True [settings.update({key:value}) for key,value in defaults.items() if not key in settings] def __getattr__(self, key): if not key in self.__dict__: self.__dict__[key] = Storage() return self.__dict__[key] def keys(self): return self.__dict__.keys() def __contains__(self,key): return key in self.__dict__ if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Functions required to execute app components ============================================ FOR INTERNAL USE ONLY """ import os import stat import thread from fileutils import read_file cfs = {} # for speed-up cfs_lock = thread.allocate_lock() # and thread safety def getcfs(key, filename, filter=None): """ Caches the *filtered* file `filename` with `key` until the file is modified. :param key: the cache key :param filename: the file to cache :param filter: is the function used for filtering. Normally `filename` is a .py file and `filter` is a function that bytecode compiles the file. In this way the bytecode compiled file is cached. (Default = None) This is used on Google App Engine since pyc files cannot be saved. """ t = os.stat(filename)[stat.ST_MTIME] cfs_lock.acquire() item = cfs.get(key, None) cfs_lock.release() if item and item[0] == t: return item[1] if not filter: data = read_file(filename) else: data = filter() cfs_lock.acquire() cfs[key] = (t, data) cfs_lock.release() return data
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) CONTENT_TYPE dictionary created against freedesktop.org' shared mime info database version 0.70. """ __all__ = ['contenttype'] CONTENT_TYPE = { '.load': 'text/html', '.123': 'application/vnd.lotus-1-2-3', '.3ds': 'image/x-3ds', '.3g2': 'video/3gpp', '.3ga': 'video/3gpp', '.3gp': 'video/3gpp', '.3gpp': 'video/3gpp', '.602': 'application/x-t602', '.669': 'audio/x-mod', '.7z': 'application/x-7z-compressed', '.a': 'application/x-archive', '.aac': 'audio/mp4', '.abw': 'application/x-abiword', '.abw.crashed': 'application/x-abiword', '.abw.gz': 'application/x-abiword', '.ac3': 'audio/ac3', '.ace': 'application/x-ace', '.adb': 'text/x-adasrc', '.ads': 'text/x-adasrc', '.afm': 'application/x-font-afm', '.ag': 'image/x-applix-graphics', '.ai': 'application/illustrator', '.aif': 'audio/x-aiff', '.aifc': 'audio/x-aiff', '.aiff': 'audio/x-aiff', '.al': 'application/x-perl', '.alz': 'application/x-alz', '.amr': 'audio/amr', '.ani': 'application/x-navi-animation', '.anim[1-9j]': 'video/x-anim', '.anx': 'application/annodex', '.ape': 'audio/x-ape', '.arj': 'application/x-arj', '.arw': 'image/x-sony-arw', '.as': 'application/x-applix-spreadsheet', '.asc': 'text/plain', '.asf': 'video/x-ms-asf', '.asp': 'application/x-asp', '.ass': 'text/x-ssa', '.asx': 'audio/x-ms-asx', '.atom': 'application/atom+xml', '.au': 'audio/basic', '.avi': 'video/x-msvideo', '.aw': 'application/x-applix-word', '.awb': 'audio/amr-wb', '.awk': 'application/x-awk', '.axa': 'audio/annodex', '.axv': 'video/annodex', '.bak': 'application/x-trash', '.bcpio': 'application/x-bcpio', '.bdf': 'application/x-font-bdf', '.bib': 'text/x-bibtex', '.bin': 'application/octet-stream', '.blend': 'application/x-blender', '.blender': 'application/x-blender', '.bmp': 'image/bmp', '.bz': 'application/x-bzip', '.bz2': 'application/x-bzip', '.c': 'text/x-csrc', '.c++': 'text/x-c++src', '.cab': 'application/vnd.ms-cab-compressed', '.cb7': 'application/x-cb7', '.cbr': 'application/x-cbr', '.cbt': 'application/x-cbt', '.cbz': 'application/x-cbz', '.cc': 'text/x-c++src', '.cdf': 'application/x-netcdf', '.cdr': 'application/vnd.corel-draw', '.cer': 'application/x-x509-ca-cert', '.cert': 'application/x-x509-ca-cert', '.cgm': 'image/cgm', '.chm': 'application/x-chm', '.chrt': 'application/x-kchart', '.class': 'application/x-java', '.cls': 'text/x-tex', '.cmake': 'text/x-cmake', '.cpio': 'application/x-cpio', '.cpio.gz': 'application/x-cpio-compressed', '.cpp': 'text/x-c++src', '.cr2': 'image/x-canon-cr2', '.crt': 'application/x-x509-ca-cert', '.crw': 'image/x-canon-crw', '.cs': 'text/x-csharp', '.csh': 'application/x-csh', '.css': 'text/css', '.cssl': 'text/css', '.csv': 'text/csv', '.cue': 'application/x-cue', '.cur': 'image/x-win-bitmap', '.cxx': 'text/x-c++src', '.d': 'text/x-dsrc', '.dar': 'application/x-dar', '.dbf': 'application/x-dbf', '.dc': 'application/x-dc-rom', '.dcl': 'text/x-dcl', '.dcm': 'application/dicom', '.dcr': 'image/x-kodak-dcr', '.dds': 'image/x-dds', '.deb': 'application/x-deb', '.der': 'application/x-x509-ca-cert', '.desktop': 'application/x-desktop', '.dia': 'application/x-dia-diagram', '.diff': 'text/x-patch', '.divx': 'video/x-msvideo', '.djv': 'image/vnd.djvu', '.djvu': 'image/vnd.djvu', '.dng': 'image/x-adobe-dng', '.doc': 'application/msword', '.docbook': 'application/docbook+xml', '.docm': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', '.dot': 'text/vnd.graphviz', '.dsl': 'text/x-dsl', '.dtd': 'application/xml-dtd', '.dtx': 'text/x-tex', '.dv': 'video/dv', '.dvi': 'application/x-dvi', '.dvi.bz2': 'application/x-bzdvi', '.dvi.gz': 'application/x-gzdvi', '.dwg': 'image/vnd.dwg', '.dxf': 'image/vnd.dxf', '.e': 'text/x-eiffel', '.egon': 'application/x-egon', '.eif': 'text/x-eiffel', '.el': 'text/x-emacs-lisp', '.emf': 'image/x-emf', '.emp': 'application/vnd.emusic-emusic_package', '.ent': 'application/xml-external-parsed-entity', '.eps': 'image/x-eps', '.eps.bz2': 'image/x-bzeps', '.eps.gz': 'image/x-gzeps', '.epsf': 'image/x-eps', '.epsf.bz2': 'image/x-bzeps', '.epsf.gz': 'image/x-gzeps', '.epsi': 'image/x-eps', '.epsi.bz2': 'image/x-bzeps', '.epsi.gz': 'image/x-gzeps', '.epub': 'application/epub+zip', '.erl': 'text/x-erlang', '.es': 'application/ecmascript', '.etheme': 'application/x-e-theme', '.etx': 'text/x-setext', '.exe': 'application/x-ms-dos-executable', '.exr': 'image/x-exr', '.ez': 'application/andrew-inset', '.f': 'text/x-fortran', '.f90': 'text/x-fortran', '.f95': 'text/x-fortran', '.fb2': 'application/x-fictionbook+xml', '.fig': 'image/x-xfig', '.fits': 'image/fits', '.fl': 'application/x-fluid', '.flac': 'audio/x-flac', '.flc': 'video/x-flic', '.fli': 'video/x-flic', '.flv': 'video/x-flv', '.flw': 'application/x-kivio', '.fo': 'text/x-xslfo', '.for': 'text/x-fortran', '.g3': 'image/fax-g3', '.gb': 'application/x-gameboy-rom', '.gba': 'application/x-gba-rom', '.gcrd': 'text/directory', '.ged': 'application/x-gedcom', '.gedcom': 'application/x-gedcom', '.gen': 'application/x-genesis-rom', '.gf': 'application/x-tex-gf', '.gg': 'application/x-sms-rom', '.gif': 'image/gif', '.glade': 'application/x-glade', '.gmo': 'application/x-gettext-translation', '.gnc': 'application/x-gnucash', '.gnd': 'application/gnunet-directory', '.gnucash': 'application/x-gnucash', '.gnumeric': 'application/x-gnumeric', '.gnuplot': 'application/x-gnuplot', '.gp': 'application/x-gnuplot', '.gpg': 'application/pgp-encrypted', '.gplt': 'application/x-gnuplot', '.gra': 'application/x-graphite', '.gsf': 'application/x-font-type1', '.gsm': 'audio/x-gsm', '.gtar': 'application/x-tar', '.gv': 'text/vnd.graphviz', '.gvp': 'text/x-google-video-pointer', '.gz': 'application/x-gzip', '.h': 'text/x-chdr', '.h++': 'text/x-c++hdr', '.hdf': 'application/x-hdf', '.hh': 'text/x-c++hdr', '.hp': 'text/x-c++hdr', '.hpgl': 'application/vnd.hp-hpgl', '.hpp': 'text/x-c++hdr', '.hs': 'text/x-haskell', '.htm': 'text/html', '.html': 'text/html', '.hwp': 'application/x-hwp', '.hwt': 'application/x-hwt', '.hxx': 'text/x-c++hdr', '.ica': 'application/x-ica', '.icb': 'image/x-tga', '.icns': 'image/x-icns', '.ico': 'image/vnd.microsoft.icon', '.ics': 'text/calendar', '.idl': 'text/x-idl', '.ief': 'image/ief', '.iff': 'image/x-iff', '.ilbm': 'image/x-ilbm', '.ime': 'text/x-imelody', '.imy': 'text/x-imelody', '.ins': 'text/x-tex', '.iptables': 'text/x-iptables', '.iso': 'application/x-cd-image', '.iso9660': 'application/x-cd-image', '.it': 'audio/x-it', '.j2k': 'image/jp2', '.jad': 'text/vnd.sun.j2me.app-descriptor', '.jar': 'application/x-java-archive', '.java': 'text/x-java', '.jng': 'image/x-jng', '.jnlp': 'application/x-java-jnlp-file', '.jp2': 'image/jp2', '.jpc': 'image/jp2', '.jpe': 'image/jpeg', '.jpeg': 'image/jpeg', '.jpf': 'image/jp2', '.jpg': 'image/jpeg', '.jpr': 'application/x-jbuilder-project', '.jpx': 'image/jp2', '.js': 'application/javascript', '.json': 'application/json', '.k25': 'image/x-kodak-k25', '.kar': 'audio/midi', '.karbon': 'application/x-karbon', '.kdc': 'image/x-kodak-kdc', '.kdelnk': 'application/x-desktop', '.kexi': 'application/x-kexiproject-sqlite3', '.kexic': 'application/x-kexi-connectiondata', '.kexis': 'application/x-kexiproject-shortcut', '.kfo': 'application/x-kformula', '.kil': 'application/x-killustrator', '.kino': 'application/smil', '.kml': 'application/vnd.google-earth.kml+xml', '.kmz': 'application/vnd.google-earth.kmz', '.kon': 'application/x-kontour', '.kpm': 'application/x-kpovmodeler', '.kpr': 'application/x-kpresenter', '.kpt': 'application/x-kpresenter', '.kra': 'application/x-krita', '.ksp': 'application/x-kspread', '.kud': 'application/x-kugar', '.kwd': 'application/x-kword', '.kwt': 'application/x-kword', '.la': 'application/x-shared-library-la', '.latex': 'text/x-tex', '.ldif': 'text/x-ldif', '.lha': 'application/x-lha', '.lhs': 'text/x-literate-haskell', '.lhz': 'application/x-lhz', '.log': 'text/x-log', '.ltx': 'text/x-tex', '.lua': 'text/x-lua', '.lwo': 'image/x-lwo', '.lwob': 'image/x-lwo', '.lws': 'image/x-lws', '.ly': 'text/x-lilypond', '.lyx': 'application/x-lyx', '.lz': 'application/x-lzip', '.lzh': 'application/x-lha', '.lzma': 'application/x-lzma', '.lzo': 'application/x-lzop', '.m': 'text/x-matlab', '.m15': 'audio/x-mod', '.m2t': 'video/mpeg', '.m3u': 'audio/x-mpegurl', '.m3u8': 'audio/x-mpegurl', '.m4': 'application/x-m4', '.m4a': 'audio/mp4', '.m4b': 'audio/x-m4b', '.m4v': 'video/mp4', '.mab': 'application/x-markaby', '.man': 'application/x-troff-man', '.mbox': 'application/mbox', '.md': 'application/x-genesis-rom', '.mdb': 'application/vnd.ms-access', '.mdi': 'image/vnd.ms-modi', '.me': 'text/x-troff-me', '.med': 'audio/x-mod', '.metalink': 'application/metalink+xml', '.mgp': 'application/x-magicpoint', '.mid': 'audio/midi', '.midi': 'audio/midi', '.mif': 'application/x-mif', '.minipsf': 'audio/x-minipsf', '.mka': 'audio/x-matroska', '.mkv': 'video/x-matroska', '.ml': 'text/x-ocaml', '.mli': 'text/x-ocaml', '.mm': 'text/x-troff-mm', '.mmf': 'application/x-smaf', '.mml': 'text/mathml', '.mng': 'video/x-mng', '.mo': 'application/x-gettext-translation', '.mo3': 'audio/x-mo3', '.moc': 'text/x-moc', '.mod': 'audio/x-mod', '.mof': 'text/x-mof', '.moov': 'video/quicktime', '.mov': 'video/quicktime', '.movie': 'video/x-sgi-movie', '.mp+': 'audio/x-musepack', '.mp2': 'video/mpeg', '.mp3': 'audio/mpeg', '.mp4': 'video/mp4', '.mpc': 'audio/x-musepack', '.mpe': 'video/mpeg', '.mpeg': 'video/mpeg', '.mpg': 'video/mpeg', '.mpga': 'audio/mpeg', '.mpp': 'audio/x-musepack', '.mrl': 'text/x-mrml', '.mrml': 'text/x-mrml', '.mrw': 'image/x-minolta-mrw', '.ms': 'text/x-troff-ms', '.msi': 'application/x-msi', '.msod': 'image/x-msod', '.msx': 'application/x-msx-rom', '.mtm': 'audio/x-mod', '.mup': 'text/x-mup', '.mxf': 'application/mxf', '.n64': 'application/x-n64-rom', '.nb': 'application/mathematica', '.nc': 'application/x-netcdf', '.nds': 'application/x-nintendo-ds-rom', '.nef': 'image/x-nikon-nef', '.nes': 'application/x-nes-rom', '.nfo': 'text/x-nfo', '.not': 'text/x-mup', '.nsc': 'application/x-netshow-channel', '.nsv': 'video/x-nsv', '.o': 'application/x-object', '.obj': 'application/x-tgif', '.ocl': 'text/x-ocl', '.oda': 'application/oda', '.odb': 'application/vnd.oasis.opendocument.database', '.odc': 'application/vnd.oasis.opendocument.chart', '.odf': 'application/vnd.oasis.opendocument.formula', '.odg': 'application/vnd.oasis.opendocument.graphics', '.odi': 'application/vnd.oasis.opendocument.image', '.odm': 'application/vnd.oasis.opendocument.text-master', '.odp': 'application/vnd.oasis.opendocument.presentation', '.ods': 'application/vnd.oasis.opendocument.spreadsheet', '.odt': 'application/vnd.oasis.opendocument.text', '.oga': 'audio/ogg', '.ogg': 'video/x-theora+ogg', '.ogm': 'video/x-ogm+ogg', '.ogv': 'video/ogg', '.ogx': 'application/ogg', '.old': 'application/x-trash', '.oleo': 'application/x-oleo', '.opml': 'text/x-opml+xml', '.ora': 'image/openraster', '.orf': 'image/x-olympus-orf', '.otc': 'application/vnd.oasis.opendocument.chart-template', '.otf': 'application/x-font-otf', '.otg': 'application/vnd.oasis.opendocument.graphics-template', '.oth': 'application/vnd.oasis.opendocument.text-web', '.otp': 'application/vnd.oasis.opendocument.presentation-template', '.ots': 'application/vnd.oasis.opendocument.spreadsheet-template', '.ott': 'application/vnd.oasis.opendocument.text-template', '.owl': 'application/rdf+xml', '.oxt': 'application/vnd.openofficeorg.extension', '.p': 'text/x-pascal', '.p10': 'application/pkcs10', '.p12': 'application/x-pkcs12', '.p7b': 'application/x-pkcs7-certificates', '.p7s': 'application/pkcs7-signature', '.pack': 'application/x-java-pack200', '.pak': 'application/x-pak', '.par2': 'application/x-par2', '.pas': 'text/x-pascal', '.patch': 'text/x-patch', '.pbm': 'image/x-portable-bitmap', '.pcd': 'image/x-photo-cd', '.pcf': 'application/x-cisco-vpn-settings', '.pcf.gz': 'application/x-font-pcf', '.pcf.z': 'application/x-font-pcf', '.pcl': 'application/vnd.hp-pcl', '.pcx': 'image/x-pcx', '.pdb': 'chemical/x-pdb', '.pdc': 'application/x-aportisdoc', '.pdf': 'application/pdf', '.pdf.bz2': 'application/x-bzpdf', '.pdf.gz': 'application/x-gzpdf', '.pef': 'image/x-pentax-pef', '.pem': 'application/x-x509-ca-cert', '.perl': 'application/x-perl', '.pfa': 'application/x-font-type1', '.pfb': 'application/x-font-type1', '.pfx': 'application/x-pkcs12', '.pgm': 'image/x-portable-graymap', '.pgn': 'application/x-chess-pgn', '.pgp': 'application/pgp-encrypted', '.php': 'application/x-php', '.php3': 'application/x-php', '.php4': 'application/x-php', '.pict': 'image/x-pict', '.pict1': 'image/x-pict', '.pict2': 'image/x-pict', '.pickle': 'application/python-pickle', '.pk': 'application/x-tex-pk', '.pkipath': 'application/pkix-pkipath', '.pkr': 'application/pgp-keys', '.pl': 'application/x-perl', '.pla': 'audio/x-iriver-pla', '.pln': 'application/x-planperfect', '.pls': 'audio/x-scpls', '.pm': 'application/x-perl', '.png': 'image/png', '.pnm': 'image/x-portable-anymap', '.pntg': 'image/x-macpaint', '.po': 'text/x-gettext-translation', '.por': 'application/x-spss-por', '.pot': 'text/x-gettext-translation-template', '.ppm': 'image/x-portable-pixmap', '.pps': 'application/vnd.ms-powerpoint', '.ppt': 'application/vnd.ms-powerpoint', '.pptm': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', '.ppz': 'application/vnd.ms-powerpoint', '.prc': 'application/x-palm-database', '.ps': 'application/postscript', '.ps.bz2': 'application/x-bzpostscript', '.ps.gz': 'application/x-gzpostscript', '.psd': 'image/vnd.adobe.photoshop', '.psf': 'audio/x-psf', '.psf.gz': 'application/x-gz-font-linux-psf', '.psflib': 'audio/x-psflib', '.psid': 'audio/prs.sid', '.psw': 'application/x-pocket-word', '.pw': 'application/x-pw', '.py': 'text/x-python', '.pyc': 'application/x-python-bytecode', '.pyo': 'application/x-python-bytecode', '.qif': 'image/x-quicktime', '.qt': 'video/quicktime', '.qtif': 'image/x-quicktime', '.qtl': 'application/x-quicktime-media-link', '.qtvr': 'video/quicktime', '.ra': 'audio/vnd.rn-realaudio', '.raf': 'image/x-fuji-raf', '.ram': 'application/ram', '.rar': 'application/x-rar', '.ras': 'image/x-cmu-raster', '.raw': 'image/x-panasonic-raw', '.rax': 'audio/vnd.rn-realaudio', '.rb': 'application/x-ruby', '.rdf': 'application/rdf+xml', '.rdfs': 'application/rdf+xml', '.reg': 'text/x-ms-regedit', '.rej': 'application/x-reject', '.rgb': 'image/x-rgb', '.rle': 'image/rle', '.rm': 'application/vnd.rn-realmedia', '.rmj': 'application/vnd.rn-realmedia', '.rmm': 'application/vnd.rn-realmedia', '.rms': 'application/vnd.rn-realmedia', '.rmvb': 'application/vnd.rn-realmedia', '.rmx': 'application/vnd.rn-realmedia', '.roff': 'text/troff', '.rp': 'image/vnd.rn-realpix', '.rpm': 'application/x-rpm', '.rss': 'application/rss+xml', '.rt': 'text/vnd.rn-realtext', '.rtf': 'application/rtf', '.rtx': 'text/richtext', '.rv': 'video/vnd.rn-realvideo', '.rvx': 'video/vnd.rn-realvideo', '.s3m': 'audio/x-s3m', '.sam': 'application/x-amipro', '.sami': 'application/x-sami', '.sav': 'application/x-spss-sav', '.scm': 'text/x-scheme', '.sda': 'application/vnd.stardivision.draw', '.sdc': 'application/vnd.stardivision.calc', '.sdd': 'application/vnd.stardivision.impress', '.sdp': 'application/sdp', '.sds': 'application/vnd.stardivision.chart', '.sdw': 'application/vnd.stardivision.writer', '.sgf': 'application/x-go-sgf', '.sgi': 'image/x-sgi', '.sgl': 'application/vnd.stardivision.writer', '.sgm': 'text/sgml', '.sgml': 'text/sgml', '.sh': 'application/x-shellscript', '.shar': 'application/x-shar', '.shn': 'application/x-shorten', '.siag': 'application/x-siag', '.sid': 'audio/prs.sid', '.sik': 'application/x-trash', '.sis': 'application/vnd.symbian.install', '.sisx': 'x-epoc/x-sisx-app', '.sit': 'application/x-stuffit', '.siv': 'application/sieve', '.sk': 'image/x-skencil', '.sk1': 'image/x-skencil', '.skr': 'application/pgp-keys', '.slk': 'text/spreadsheet', '.smaf': 'application/x-smaf', '.smc': 'application/x-snes-rom', '.smd': 'application/vnd.stardivision.mail', '.smf': 'application/vnd.stardivision.math', '.smi': 'application/x-sami', '.smil': 'application/smil', '.sml': 'application/smil', '.sms': 'application/x-sms-rom', '.snd': 'audio/basic', '.so': 'application/x-sharedlib', '.spc': 'application/x-pkcs7-certificates', '.spd': 'application/x-font-speedo', '.spec': 'text/x-rpm-spec', '.spl': 'application/x-shockwave-flash', '.spx': 'audio/x-speex', '.sql': 'text/x-sql', '.sr2': 'image/x-sony-sr2', '.src': 'application/x-wais-source', '.srf': 'image/x-sony-srf', '.srt': 'application/x-subrip', '.ssa': 'text/x-ssa', '.stc': 'application/vnd.sun.xml.calc.template', '.std': 'application/vnd.sun.xml.draw.template', '.sti': 'application/vnd.sun.xml.impress.template', '.stm': 'audio/x-stm', '.stw': 'application/vnd.sun.xml.writer.template', '.sty': 'text/x-tex', '.sub': 'text/x-subviewer', '.sun': 'image/x-sun-raster', '.sv4cpio': 'application/x-sv4cpio', '.sv4crc': 'application/x-sv4crc', '.svg': 'image/svg+xml', '.svgz': 'image/svg+xml-compressed', '.swf': 'application/x-shockwave-flash', '.sxc': 'application/vnd.sun.xml.calc', '.sxd': 'application/vnd.sun.xml.draw', '.sxg': 'application/vnd.sun.xml.writer.global', '.sxi': 'application/vnd.sun.xml.impress', '.sxm': 'application/vnd.sun.xml.math', '.sxw': 'application/vnd.sun.xml.writer', '.sylk': 'text/spreadsheet', '.t': 'text/troff', '.t2t': 'text/x-txt2tags', '.tar': 'application/x-tar', '.tar.bz': 'application/x-bzip-compressed-tar', '.tar.bz2': 'application/x-bzip-compressed-tar', '.tar.gz': 'application/x-compressed-tar', '.tar.lzma': 'application/x-lzma-compressed-tar', '.tar.lzo': 'application/x-tzo', '.tar.xz': 'application/x-xz-compressed-tar', '.tar.z': 'application/x-tarz', '.tbz': 'application/x-bzip-compressed-tar', '.tbz2': 'application/x-bzip-compressed-tar', '.tcl': 'text/x-tcl', '.tex': 'text/x-tex', '.texi': 'text/x-texinfo', '.texinfo': 'text/x-texinfo', '.tga': 'image/x-tga', '.tgz': 'application/x-compressed-tar', '.theme': 'application/x-theme', '.themepack': 'application/x-windows-themepack', '.tif': 'image/tiff', '.tiff': 'image/tiff', '.tk': 'text/x-tcl', '.tlz': 'application/x-lzma-compressed-tar', '.tnef': 'application/vnd.ms-tnef', '.tnf': 'application/vnd.ms-tnef', '.toc': 'application/x-cdrdao-toc', '.torrent': 'application/x-bittorrent', '.tpic': 'image/x-tga', '.tr': 'text/troff', '.ts': 'application/x-linguist', '.tsv': 'text/tab-separated-values', '.tta': 'audio/x-tta', '.ttc': 'application/x-font-ttf', '.ttf': 'application/x-font-ttf', '.ttx': 'application/x-font-ttx', '.txt': 'text/plain', '.txz': 'application/x-xz-compressed-tar', '.tzo': 'application/x-tzo', '.ufraw': 'application/x-ufraw', '.ui': 'application/x-designer', '.uil': 'text/x-uil', '.ult': 'audio/x-mod', '.uni': 'audio/x-mod', '.uri': 'text/x-uri', '.url': 'text/x-uri', '.ustar': 'application/x-ustar', '.vala': 'text/x-vala', '.vapi': 'text/x-vala', '.vcf': 'text/directory', '.vcs': 'text/calendar', '.vct': 'text/directory', '.vda': 'image/x-tga', '.vhd': 'text/x-vhdl', '.vhdl': 'text/x-vhdl', '.viv': 'video/vivo', '.vivo': 'video/vivo', '.vlc': 'audio/x-mpegurl', '.vob': 'video/mpeg', '.voc': 'audio/x-voc', '.vor': 'application/vnd.stardivision.writer', '.vst': 'image/x-tga', '.wav': 'audio/x-wav', '.wax': 'audio/x-ms-asx', '.wb1': 'application/x-quattropro', '.wb2': 'application/x-quattropro', '.wb3': 'application/x-quattropro', '.wbmp': 'image/vnd.wap.wbmp', '.wcm': 'application/vnd.ms-works', '.wdb': 'application/vnd.ms-works', '.wk1': 'application/vnd.lotus-1-2-3', '.wk3': 'application/vnd.lotus-1-2-3', '.wk4': 'application/vnd.lotus-1-2-3', '.wks': 'application/vnd.ms-works', '.wma': 'audio/x-ms-wma', '.wmf': 'image/x-wmf', '.wml': 'text/vnd.wap.wml', '.wmls': 'text/vnd.wap.wmlscript', '.wmv': 'video/x-ms-wmv', '.wmx': 'audio/x-ms-asx', '.wp': 'application/vnd.wordperfect', '.wp4': 'application/vnd.wordperfect', '.wp5': 'application/vnd.wordperfect', '.wp6': 'application/vnd.wordperfect', '.wpd': 'application/vnd.wordperfect', '.wpg': 'application/x-wpg', '.wpl': 'application/vnd.ms-wpl', '.wpp': 'application/vnd.wordperfect', '.wps': 'application/vnd.ms-works', '.wri': 'application/x-mswrite', '.wrl': 'model/vrml', '.wv': 'audio/x-wavpack', '.wvc': 'audio/x-wavpack-correction', '.wvp': 'audio/x-wavpack', '.wvx': 'audio/x-ms-asx', '.x3f': 'image/x-sigma-x3f', '.xac': 'application/x-gnucash', '.xbel': 'application/x-xbel', '.xbl': 'application/xml', '.xbm': 'image/x-xbitmap', '.xcf': 'image/x-xcf', '.xcf.bz2': 'image/x-compressed-xcf', '.xcf.gz': 'image/x-compressed-xcf', '.xhtml': 'application/xhtml+xml', '.xi': 'audio/x-xi', '.xla': 'application/vnd.ms-excel', '.xlc': 'application/vnd.ms-excel', '.xld': 'application/vnd.ms-excel', '.xlf': 'application/x-xliff', '.xliff': 'application/x-xliff', '.xll': 'application/vnd.ms-excel', '.xlm': 'application/vnd.ms-excel', '.xls': 'application/vnd.ms-excel', '.xlsm': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', '.xlt': 'application/vnd.ms-excel', '.xlw': 'application/vnd.ms-excel', '.xm': 'audio/x-xm', '.xmf': 'audio/x-xmf', '.xmi': 'text/x-xmi', '.xml': 'application/xml', '.xpm': 'image/x-xpixmap', '.xps': 'application/vnd.ms-xpsdocument', '.xsl': 'application/xml', '.xslfo': 'text/x-xslfo', '.xslt': 'application/xml', '.xspf': 'application/xspf+xml', '.xul': 'application/vnd.mozilla.xul+xml', '.xwd': 'image/x-xwindowdump', '.xyz': 'chemical/x-pdb', '.xz': 'application/x-xz', '.w2p': 'application/w2p', '.z': 'application/x-compress', '.zabw': 'application/x-abiword', '.zip': 'application/zip', '.zoo': 'application/x-zoo', } def contenttype(filename, default='text/plain'): """ Returns the Content-Type string matching extension of the given filename. """ i = filename.rfind('.') if i>=0: default = CONTENT_TYPE.get(filename[i:].lower(),default) j = filename.rfind('.', 0, i) if j>=0: default = CONTENT_TYPE.get(filename[j:].lower(),default) if default.startswith('text/'): default += '; charset=utf-8' return default
Python
#!/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) gluon.rewrite parses incoming URLs and formats outgoing URLs for gluon.html.URL. In addition, it rewrites both incoming and outgoing URLs based on the (optional) user-supplied routes.py, which also allows for rewriting of certain error messages. routes.py supports two styles of URL rewriting, depending on whether 'routers' is defined. Refer to router.example.py and routes.example.py for additional documentation. """ import os import re import logging import traceback import threading import urllib from storage import Storage, List from http import HTTP from fileutils import abspath, read_file from settings import global_settings logger = logging.getLogger('web2py.rewrite') thread = threading.local() # thread-local storage for routing parameters def _router_default(): "return new copy of default base router" router = Storage( default_application = 'init', applications = 'ALL', default_controller = 'default', controllers = 'DEFAULT', default_function = 'index', functions = None, default_language = None, languages = None, root_static = ['favicon.ico', 'robots.txt'], domains = None, exclusive_domain = False, map_hyphen = False, acfe_match = r'\w+$', # legal app/ctlr/fcn/ext file_match = r'(\w+[-=./]?)+$', # legal file (path) name args_match = r'([\w@ -]+[=.]?)*$', # legal arg in args ) return router def _params_default(app=None): "return new copy of default parameters" p = Storage() p.name = app or "BASE" p.default_application = app or "init" p.default_controller = "default" p.default_function = "index" p.routes_app = [] p.routes_in = [] p.routes_out = [] p.routes_onerror = [] p.routes_apps_raw = [] p.error_handler = None p.error_message = '<html><body><h1>%s</h1></body></html>' p.error_message_ticket = \ '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body><!-- this is junk text else IE does not display the page: '+('x'*512)+' //--></html>' p.routers = None return p params_apps = dict() params = _params_default(app=None) # regex rewrite parameters thread.routes = params # default to base regex rewrite parameters routers = None ROUTER_KEYS = set(('default_application', 'applications', 'default_controller', 'controllers', 'default_function', 'functions', 'default_language', 'languages', 'domain', 'domains', 'root_static', 'path_prefix', 'exclusive_domain', 'map_hyphen', 'map_static', 'acfe_match', 'file_match', 'args_match')) ROUTER_BASE_KEYS = set(('applications', 'default_application', 'domains', 'path_prefix')) # The external interface to rewrite consists of: # # load: load routing configuration file(s) # url_in: parse and rewrite incoming URL # url_out: assemble and rewrite outgoing URL # # thread.routes.default_application # thread.routes.error_message # thread.routes.error_message_ticket # thread.routes.try_redirect_on_error # thread.routes.error_handler # # filter_url: helper for doctest & unittest # filter_err: helper for doctest & unittest # regex_filter_out: doctest def url_in(request, environ): "parse and rewrite incoming URL" if routers: return map_url_in(request, environ) return regex_url_in(request, environ) def url_out(request, env, application, controller, function, args, other, scheme, host, port): "assemble and rewrite outgoing URL" if routers: acf = map_url_out(request, env, application, controller, function, args, other, scheme, host, port) url = '%s%s' % (acf, other) else: url = '/%s/%s/%s%s' % (application, controller, function, other) url = regex_filter_out(url, env) # # fill in scheme and host if absolute URL is requested # scheme can be a string, eg 'http', 'https', 'ws', 'wss' # if scheme or port is not None: if host is None: # scheme or port implies host host = True if not scheme or scheme is True: if request and request.env: scheme = request.env.get('WSGI_URL_SCHEME', 'http').lower() else: scheme = 'http' # some reasonable default in case we need it if host is not None: if host is True: host = request.env.http_host if host: if port is None: port = '' else: port = ':%s' % port url = '%s://%s%s%s' % (scheme, host, port, url) return url def try_rewrite_on_error(http_response, request, environ, ticket=None): """ called from main.wsgibase to rewrite the http response. """ status = int(str(http_response.status).split()[0]) if status>=399 and thread.routes.routes_onerror: keys=set(('%s/%s' % (request.application, status), '%s/*' % (request.application), '*/%s' % (status), '*/*')) for (key,uri) in thread.routes.routes_onerror: if key in keys: if uri == '!': # do nothing! return http_response, environ elif '?' in uri: path_info, query_string = uri.split('?',1) query_string += '&' else: path_info, query_string = uri, '' query_string += \ 'code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ (status,ticket,request.env.request_uri,request.url) if uri.startswith('http://') or uri.startswith('https://'): # make up a response url = path_info+'?'+query_string message = 'You are being redirected <a href="%s">here</a>' return HTTP(303, message % url, Location=url), environ elif path_info!=environ['PATH_INFO']: # rewrite request, call wsgibase recursively, avoid loop environ['PATH_INFO'] = path_info environ['QUERY_STRING'] = query_string return None, environ # do nothing! return http_response, environ def try_redirect_on_error(http_object, request, ticket=None): "called from main.wsgibase to rewrite the http response" status = int(str(http_object.status).split()[0]) if status>399 and thread.routes.routes_onerror: keys=set(('%s/%s' % (request.application, status), '%s/*' % (request.application), '*/%s' % (status), '*/*')) for (key,redir) in thread.routes.routes_onerror: if key in keys: if redir == '!': break elif '?' in redir: url = '%s&code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ (redir,status,ticket,request.env.request_uri,request.url) else: url = '%s?code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ (redir,status,ticket,request.env.request_uri,request.url) return HTTP(303, 'You are being redirected <a href="%s">here</a>' % url, Location=url) return http_object def load(routes='routes.py', app=None, data=None, rdict=None): """ load: read (if file) and parse routes store results in params (called from main.py at web2py initialization time) If data is present, it's used instead of the routes.py contents. If rdict is present, it must be a dict to be used for routers (unit test) """ global params global routers if app is None: # reinitialize global params_apps params_apps = dict() params = _params_default(app=None) # regex rewrite parameters thread.routes = params # default to base regex rewrite parameters routers = None if isinstance(rdict, dict): symbols = dict(routers=rdict) path = 'rdict' else: if data is not None: path = 'routes' else: if app is None: path = abspath(routes) else: path = abspath('applications', app, routes) if not os.path.exists(path): return data = read_file(path).replace('\r\n','\n') symbols = {} try: exec (data + '\n') in symbols except SyntaxError, e: logger.error( '%s has a syntax error and will not be loaded\n' % path + traceback.format_exc()) raise e p = _params_default(app) for sym in ('routes_app', 'routes_in', 'routes_out'): if sym in symbols: for (k, v) in symbols[sym]: p[sym].append(compile_regex(k, v)) for sym in ('routes_onerror', 'routes_apps_raw', 'error_handler','error_message', 'error_message_ticket', 'default_application','default_controller', 'default_function'): if sym in symbols: p[sym] = symbols[sym] if 'routers' in symbols: p.routers = Storage(symbols['routers']) for key in p.routers: if isinstance(p.routers[key], dict): p.routers[key] = Storage(p.routers[key]) if app is None: params = p # install base rewrite parameters thread.routes = params # install default as current routes # # create the BASE router if routers in use # routers = params.routers # establish routers if present if isinstance(routers, dict): routers = Storage(routers) if routers is not None: router = _router_default() if routers.BASE: router.update(routers.BASE) routers.BASE = router # scan each app in applications/ # create a router, if routers are in use # parse the app-specific routes.py if present # all_apps = [] for appname in [app for app in os.listdir(abspath('applications')) if not app.startswith('.')]: if os.path.isdir(abspath('applications', appname)) and \ os.path.isdir(abspath('applications', appname, 'controllers')): all_apps.append(appname) if routers: router = Storage(routers.BASE) # new copy if appname in routers: for key in routers[appname].keys(): if key in ROUTER_BASE_KEYS: raise SyntaxError, "BASE-only key '%s' in router '%s'" % (key, appname) router.update(routers[appname]) routers[appname] = router if os.path.exists(abspath('applications', appname, routes)): load(routes, appname) if routers: load_routers(all_apps) else: # app params_apps[app] = p if routers and p.routers: if app in p.routers: routers[app].update(p.routers[app]) logger.debug('URL rewrite is on. configuration in %s' % path) regex_at = re.compile(r'(?<!\\)\$[a-zA-Z]\w*') regex_anything = re.compile(r'(?<!\\)\$anything') def compile_regex(k, v): """ Preprocess and compile the regular expressions in routes_app/in/out The resulting regex will match a pattern of the form: [remote address]:[protocol]://[host]:[method] [path] We allow abbreviated regexes on input; here we try to complete them. """ k0 = k # original k for error reporting # bracket regex in ^...$ if not already done if not k[0] == '^': k = '^%s' % k if not k[-1] == '$': k = '%s$' % k # if there are no :-separated parts, prepend a catch-all for the IP address if k.find(':') < 0: # k = '^.*?:%s' % k[1:] k = '^.*?:https?://[^:/]+:[a-z]+ %s' % k[1:] # if there's no ://, provide a catch-all for the protocol, host & method if k.find('://') < 0: i = k.find(':/') if i < 0: raise SyntaxError, "routes pattern syntax error: path needs leading '/' [%s]" % k0 k = r'%s:https?://[^:/]+:[a-z]+ %s' % (k[:i], k[i+1:]) # $anything -> ?P<anything>.* for item in regex_anything.findall(k): k = k.replace(item, '(?P<anything>.*)') # $a (etc) -> ?P<a>\w+ for item in regex_at.findall(k): k = k.replace(item, r'(?P<%s>\w+)' % item[1:]) # same for replacement pattern, but with \g for item in regex_at.findall(v): v = v.replace(item, r'\g<%s>' % item[1:]) return (re.compile(k, re.DOTALL), v) def load_routers(all_apps): "load-time post-processing of routers" for app in routers.keys(): # initialize apps with routers that aren't present, on behalf of unit tests if app not in all_apps: all_apps.append(app) router = Storage(routers.BASE) # new copy if app != 'BASE': for key in routers[app].keys(): if key in ROUTER_BASE_KEYS: raise SyntaxError, "BASE-only key '%s' in router '%s'" % (key, app) router.update(routers[app]) routers[app] = router router = routers[app] for key in router.keys(): if key not in ROUTER_KEYS: raise SyntaxError, "unknown key '%s' in router '%s'" % (key, app) if not router.controllers: router.controllers = set() elif not isinstance(router.controllers, str): router.controllers = set(router.controllers) if router.functions: router.functions = set(router.functions) else: router.functions = set() if router.languages: router.languages = set(router.languages) else: router.languages = set() if app != 'BASE': for base_only in ROUTER_BASE_KEYS: router.pop(base_only, None) if 'domain' in router: routers.BASE.domains[router.domain] = app if isinstance(router.controllers, str) and router.controllers == 'DEFAULT': router.controllers = set() if os.path.isdir(abspath('applications', app)): cpath = abspath('applications', app, 'controllers') for cname in os.listdir(cpath): if os.path.isfile(abspath(cpath, cname)) and cname.endswith('.py'): router.controllers.add(cname[:-3]) if router.controllers: router.controllers.add('static') router.controllers.add(router.default_controller) if router.functions: router.functions.add(router.default_function) if isinstance(routers.BASE.applications, str) and routers.BASE.applications == 'ALL': routers.BASE.applications = list(all_apps) if routers.BASE.applications: routers.BASE.applications = set(routers.BASE.applications) else: routers.BASE.applications = set() for app in routers.keys(): # set router name router = routers[app] router.name = app # compile URL validation patterns router._acfe_match = re.compile(router.acfe_match) router._file_match = re.compile(router.file_match) if router.args_match: router._args_match = re.compile(router.args_match) # convert path_prefix to a list of path elements if router.path_prefix: if isinstance(router.path_prefix, str): router.path_prefix = router.path_prefix.strip('/').split('/') # rewrite BASE.domains as tuples # # key: 'domain[:port]' -> (domain, port) # value: 'application[/controller] -> (application, controller) # (port and controller may be None) # domains = dict() if routers.BASE.domains: for (domain, app) in [(d.strip(':'), a.strip('/')) for (d, a) in routers.BASE.domains.items()]: port = None if ':' in domain: (domain, port) = domain.split(':') ctlr = None if '/' in app: (app, ctlr) = app.split('/') if app not in all_apps and app not in routers: raise SyntaxError, "unknown app '%s' in domains" % app domains[(domain, port)] = (app, ctlr) routers.BASE.domains = domains def regex_uri(e, regexes, tag, default=None): "filter incoming URI against a list of regexes" path = e['PATH_INFO'] host = e.get('HTTP_HOST', 'localhost').lower() i = host.find(':') if i > 0: host = host[:i] key = '%s:%s://%s:%s %s' % \ (e.get('REMOTE_ADDR','localhost'), e.get('WSGI_URL_SCHEME', 'http').lower(), host, e.get('REQUEST_METHOD', 'get').lower(), path) for (regex, value) in regexes: if regex.match(key): rewritten = regex.sub(value, key) logger.debug('%s: [%s] [%s] -> %s' % (tag, key, value, rewritten)) return rewritten logger.debug('%s: [%s] -> %s (not rewritten)' % (tag, key, default)) return default def regex_select(env=None, app=None, request=None): """ select a set of regex rewrite params for the current request """ if app: thread.routes = params_apps.get(app, params) elif env and params.routes_app: if routers: map_url_in(request, env, app=True) else: app = regex_uri(env, params.routes_app, "routes_app") thread.routes = params_apps.get(app, params) else: thread.routes = params # default to base rewrite parameters logger.debug("select routing parameters: %s" % thread.routes.name) return app # for doctest def regex_filter_in(e): "regex rewrite incoming URL" query = e.get('QUERY_STRING', None) e['WEB2PY_ORIGINAL_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '') if thread.routes.routes_in: path = regex_uri(e, thread.routes.routes_in, "routes_in", e['PATH_INFO']) items = path.split('?', 1) e['PATH_INFO'] = items[0] if len(items) > 1: if query: query = items[1] + '&' + query else: query = items[1] e['QUERY_STRING'] = query e['REQUEST_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '') return e # pattern to replace spaces with underscore in URL # also the html escaped variants '+' and '%20' are covered regex_space = re.compile('(\+|\s|%20)+') # pattern to find valid paths in url /application/controller/... # this could be: # for static pages: # /<b:application>/static/<x:file> # for dynamic pages: # /<a:application>[/<c:controller>[/<f:function>[.<e:ext>][/<s:args>]]] # application, controller, function and ext may only contain [a-zA-Z0-9_] # file and args may also contain '-', '=', '.' and '/' # apps in routes_apps_raw must parse raw_args into args regex_static = re.compile(r''' (^ # static pages /(?P<b> \w+) # b=app /static # /b/static /(?P<x> (\w[\-\=\./]?)* ) # x=file $) ''', re.X) regex_url = re.compile(r''' (^( # (/a/c/f.e/s) /(?P<a> [\w\s+]+ ) # /a=app ( # (/c.f.e/s) /(?P<c> [\w\s+]+ ) # /a/c=controller ( # (/f.e/s) /(?P<f> [\w\s+]+ ) # /a/c/f=function ( # (.e) \.(?P<e> [\w\s+]+ ) # /a/c/f.e=extension )? ( # (/s) /(?P<r> # /a/c/f.e/r=raw_args .* ) )? )? )? )? /?$) ''', re.X) regex_args = re.compile(r''' (^ (?P<s> ( [\w@/-][=.]? )* # s=args )? /?$) # trailing slash ''', re.X) def regex_url_in(request, environ): "rewrite and parse incoming URL" # ################################################## # select application # rewrite URL if routes_in is defined # update request.env # ################################################## regex_select(env=environ, request=request) if thread.routes.routes_in: environ = regex_filter_in(environ) for (key, value) in environ.items(): request.env[key.lower().replace('.', '_')] = value path = request.env.path_info.replace('\\', '/') # ################################################## # serve if a static file # ################################################## match = regex_static.match(regex_space.sub('_', path)) if match and match.group('x'): static_file = os.path.join(request.env.applications_parent, 'applications', match.group('b'), 'static', match.group('x')) return (static_file, environ) # ################################################## # parse application, controller and function # ################################################## path = re.sub('%20', ' ', path) match = regex_url.match(path) if not match or match.group('c') == 'static': raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid path') request.application = \ regex_space.sub('_', match.group('a') or thread.routes.default_application) request.controller = \ regex_space.sub('_', match.group('c') or thread.routes.default_controller) request.function = \ regex_space.sub('_', match.group('f') or thread.routes.default_function) group_e = match.group('e') request.raw_extension = group_e and regex_space.sub('_', group_e) or None request.extension = request.raw_extension or 'html' request.raw_args = match.group('r') request.args = List([]) if request.application in thread.routes.routes_apps_raw: # application is responsible for parsing args request.args = None elif request.raw_args: match = regex_args.match(request.raw_args.replace(' ', '_')) if match: group_s = match.group('s') request.args = \ List((group_s and group_s.split('/')) or []) if request.args and request.args[-1] == '': request.args.pop() # adjust for trailing empty arg else: raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid path (args)') return (None, environ) def regex_filter_out(url, e=None): "regex rewrite outgoing URL" if not hasattr(thread, 'routes'): regex_select() # ensure thread.routes is set (for application threads) if routers: return url # already filtered if thread.routes.routes_out: items = url.split('?', 1) if e: host = e.get('http_host', 'localhost').lower() i = host.find(':') if i > 0: host = host[:i] items[0] = '%s:%s://%s:%s %s' % \ (e.get('remote_addr', ''), e.get('wsgi_url_scheme', 'http').lower(), host, e.get('request_method', 'get').lower(), items[0]) else: items[0] = ':http://localhost:get %s' % items[0] for (regex, value) in thread.routes.routes_out: if regex.match(items[0]): rewritten = '?'.join([regex.sub(value, items[0])] + items[1:]) logger.debug('routes_out: [%s] -> %s' % (url, rewritten)) return rewritten logger.debug('routes_out: [%s] not rewritten' % url) return url def filter_url(url, method='get', remote='0.0.0.0', out=False, app=False, lang=None, domain=(None,None), env=False, scheme=None, host=None, port=None): "doctest/unittest interface to regex_filter_in() and regex_filter_out()" regex_url = re.compile(r'^(?P<scheme>http|https|HTTP|HTTPS)\://(?P<host>[^/]*)(?P<uri>.*)') match = regex_url.match(url) urlscheme = match.group('scheme').lower() urlhost = match.group('host').lower() uri = match.group('uri') k = uri.find('?') if k < 0: k = len(uri) (path_info, query_string) = (uri[:k], uri[k+1:]) path_info = urllib.unquote(path_info) # simulate server e = { 'REMOTE_ADDR': remote, 'REQUEST_METHOD': method, 'WSGI_URL_SCHEME': urlscheme, 'HTTP_HOST': urlhost, 'REQUEST_URI': uri, 'PATH_INFO': path_info, 'QUERY_STRING': query_string, #for filter_out request.env use lowercase 'remote_addr': remote, 'request_method': method, 'wsgi_url_scheme': urlscheme, 'http_host': urlhost } request = Storage() e["applications_parent"] = global_settings.applications_parent request.env = Storage(e) request.uri_language = lang # determine application only # if app: if routers: return map_url_in(request, e, app=True) return regex_select(e) # rewrite outbound URL # if out: (request.env.domain_application, request.env.domain_controller) = domain items = path_info.lstrip('/').split('/') if items[-1] == '': items.pop() # adjust trailing empty args assert len(items) >= 3, "at least /a/c/f is required" a = items.pop(0) c = items.pop(0) f = items.pop(0) if not routers: return regex_filter_out(uri, e) acf = map_url_out(request, None, a, c, f, items, None, scheme, host, port) if items: url = '%s/%s' % (acf, '/'.join(items)) if items[-1] == '': url += '/' else: url = acf if query_string: url += '?' + query_string return url # rewrite inbound URL # (static, e) = url_in(request, e) if static: return static result = "/%s/%s/%s" % (request.application, request.controller, request.function) if request.extension and request.extension != 'html': result += ".%s" % request.extension if request.args: result += " %s" % request.args if e['QUERY_STRING']: result += " ?%s" % e['QUERY_STRING'] if request.uri_language: result += " (%s)" % request.uri_language if env: return request.env return result def filter_err(status, application='app', ticket='tkt'): "doctest/unittest interface to routes_onerror" if status > 399 and thread.routes.routes_onerror: keys = set(('%s/%s' % (application, status), '%s/*' % (application), '*/%s' % (status), '*/*')) for (key,redir) in thread.routes.routes_onerror: if key in keys: if redir == '!': break elif '?' in redir: url = redir + '&' + 'code=%s&ticket=%s' % (status,ticket) else: url = redir + '?' + 'code=%s&ticket=%s' % (status,ticket) return url # redirection return status # no action # router support # class MapUrlIn(object): "logic for mapping incoming URLs" def __init__(self, request=None, env=None): "initialize a map-in object" self.request = request self.env = env self.router = None self.application = None self.language = None self.controller = None self.function = None self.extension = 'html' self.controllers = set() self.functions = set() self.languages = set() self.default_language = None self.map_hyphen = False self.exclusive_domain = False path = self.env['PATH_INFO'] self.query = self.env.get('QUERY_STRING', None) path = path.lstrip('/') self.env['PATH_INFO'] = '/' + path self.env['WEB2PY_ORIGINAL_URI'] = self.env['PATH_INFO'] + (self.query and ('?' + self.query) or '') # to handle empty args, strip exactly one trailing slash, if present # .../arg1// represents one trailing empty arg # if path.endswith('/'): path = path[:-1] self.args = List(path and path.split('/') or []) # see http://www.python.org/dev/peps/pep-3333/#url-reconstruction for URL composition self.remote_addr = self.env.get('REMOTE_ADDR','localhost') self.scheme = self.env.get('WSGI_URL_SCHEME', 'http').lower() self.method = self.env.get('REQUEST_METHOD', 'get').lower() self.host = self.env.get('HTTP_HOST') self.port = None if not self.host: self.host = self.env.get('SERVER_NAME') self.port = self.env.get('SERVER_PORT') if not self.host: self.host = 'localhost' self.port = '80' if ':' in self.host: (self.host, self.port) = self.host.split(':') if not self.port: if self.scheme == 'https': self.port = '443' else: self.port = '80' def map_prefix(self): "strip path prefix, if present in its entirety" prefix = routers.BASE.path_prefix if prefix: prefixlen = len(prefix) if prefixlen > len(self.args): return for i in xrange(prefixlen): if prefix[i] != self.args[i]: return # prefix didn't match self.args = List(self.args[prefixlen:]) # strip the prefix def map_app(self): "determine application name" base = routers.BASE # base router self.domain_application = None self.domain_controller = None arg0 = self.harg0 if base.applications and arg0 in base.applications: self.application = arg0 elif (self.host, self.port) in base.domains: (self.application, self.domain_controller) = base.domains[(self.host, self.port)] self.env['domain_application'] = self.application self.env['domain_controller'] = self.domain_controller elif (self.host, None) in base.domains: (self.application, self.domain_controller) = base.domains[(self.host, None)] self.env['domain_application'] = self.application self.env['domain_controller'] = self.domain_controller elif arg0 and not base.applications: self.application = arg0 else: self.application = base.default_application or '' self.pop_arg_if(self.application == arg0) if not base._acfe_match.match(self.application): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error="invalid application: '%s'" % self.application) if self.application not in routers and \ (self.application != thread.routes.default_application or self.application == 'welcome'): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error="unknown application: '%s'" % self.application) # set the application router # logger.debug("select application=%s" % self.application) self.request.application = self.application if self.application not in routers: self.router = routers.BASE # support gluon.main.wsgibase init->welcome else: self.router = routers[self.application] # application router self.controllers = self.router.controllers self.default_controller = self.domain_controller or self.router.default_controller self.functions = self.router.functions self.languages = self.router.languages self.default_language = self.router.default_language self.map_hyphen = self.router.map_hyphen self.exclusive_domain = self.router.exclusive_domain self._acfe_match = self.router._acfe_match self._file_match = self.router._file_match self._args_match = self.router._args_match def map_root_static(self): ''' handle root-static files (no hyphen mapping) a root-static file is one whose incoming URL expects it to be at the root, typically robots.txt & favicon.ico ''' if len(self.args) == 1 and self.arg0 in self.router.root_static: self.controller = self.request.controller = 'static' root_static_file = os.path.join(self.request.env.applications_parent, 'applications', self.application, self.controller, self.arg0) logger.debug("route: root static=%s" % root_static_file) return root_static_file return None def map_language(self): "handle language (no hyphen mapping)" arg0 = self.arg0 # no hyphen mapping if arg0 and self.languages and arg0 in self.languages: self.language = arg0 else: self.language = self.default_language if self.language: logger.debug("route: language=%s" % self.language) self.pop_arg_if(self.language == arg0) arg0 = self.arg0 def map_controller(self): "identify controller" # handle controller # arg0 = self.harg0 # map hyphens if not arg0 or (self.controllers and arg0 not in self.controllers): self.controller = self.default_controller or '' else: self.controller = arg0 self.pop_arg_if(arg0 == self.controller) logger.debug("route: controller=%s" % self.controller) if not self.router._acfe_match.match(self.controller): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid controller') def map_static(self): ''' handle static files file_match but no hyphen mapping ''' if self.controller != 'static': return None file = '/'.join(self.args) if not self.router._file_match.match(file): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid static file') # # support language-specific static subdirectories, # eg /appname/en/static/filename => applications/appname/static/en/filename # if language-specific file doesn't exist, try same file in static # if self.language: static_file = os.path.join(self.request.env.applications_parent, 'applications', self.application, 'static', self.language, file) if not self.language or not os.path.isfile(static_file): static_file = os.path.join(self.request.env.applications_parent, 'applications', self.application, 'static', file) logger.debug("route: static=%s" % static_file) return static_file def map_function(self): "handle function.extension" arg0 = self.harg0 # map hyphens if not arg0 or self.functions and arg0 not in self.functions and self.controller == self.default_controller: self.function = self.router.default_function or "" self.pop_arg_if(arg0 and self.function == arg0) else: func_ext = arg0.split('.') if len(func_ext) > 1: self.function = func_ext[0] self.extension = func_ext[-1] else: self.function = arg0 self.pop_arg_if(True) logger.debug("route: function.ext=%s.%s" % (self.function, self.extension)) if not self.router._acfe_match.match(self.function): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid function') if self.extension and not self.router._acfe_match.match(self.extension): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid extension') def validate_args(self): ''' check args against validation pattern ''' for arg in self.args: if not self.router._args_match.match(arg): raise HTTP(400, thread.routes.error_message % 'invalid request', web2py_error='invalid arg <%s>' % arg) def update_request(self): ''' update request from self build env.request_uri make lower-case versions of http headers in env ''' self.request.application = self.application self.request.controller = self.controller self.request.function = self.function self.request.extension = self.extension self.request.args = self.args if self.language: self.request.uri_language = self.language uri = '/%s/%s/%s' % (self.application, self.controller, self.function) if self.map_hyphen: uri = uri.replace('_', '-') if self.extension != 'html': uri += '.' + self.extension if self.language: uri = '/%s%s' % (self.language, uri) uri += self.args and urllib.quote('/' + '/'.join([str(x) for x in self.args])) or '' uri += (self.query and ('?' + self.query) or '') self.env['REQUEST_URI'] = uri for (key, value) in self.env.items(): self.request.env[key.lower().replace('.', '_')] = value @property def arg0(self): "return first arg" return self.args(0) @property def harg0(self): "return first arg with optional hyphen mapping" if self.map_hyphen and self.args(0): return self.args(0).replace('-', '_') return self.args(0) def pop_arg_if(self, dopop): "conditionally remove first arg and return new first arg" if dopop: self.args.pop(0) class MapUrlOut(object): "logic for mapping outgoing URLs" def __init__(self, request, env, application, controller, function, args, other, scheme, host, port): "initialize a map-out object" self.default_application = routers.BASE.default_application if application in routers: self.router = routers[application] else: self.router = routers.BASE self.request = request self.env = env self.application = application self.controller = controller self.function = function self.args = args self.other = other self.scheme = scheme self.host = host self.port = port self.applications = routers.BASE.applications self.controllers = self.router.controllers self.functions = self.router.functions self.languages = self.router.languages self.default_language = self.router.default_language self.exclusive_domain = self.router.exclusive_domain self.map_hyphen = self.router.map_hyphen self.map_static = self.router.map_static self.path_prefix = routers.BASE.path_prefix self.domain_application = request and self.request.env.domain_application self.domain_controller = request and self.request.env.domain_controller self.default_function = self.router.default_function if (self.router.exclusive_domain and self.domain_application and self.domain_application != self.application and not self.host): raise SyntaxError, 'cross-domain conflict: must specify host' lang = request and request.uri_language if lang and self.languages and lang in self.languages: self.language = lang else: self.language = None self.omit_application = False self.omit_language = False self.omit_controller = False self.omit_function = False def omit_lang(self): "omit language if possible" if not self.language or self.language == self.default_language: self.omit_language = True def omit_acf(self): "omit what we can of a/c/f" router = self.router # Handle the easy no-args case of tail-defaults: /a/c /a / # if not self.args and self.function == router.default_function: self.omit_function = True if self.controller == router.default_controller: self.omit_controller = True if self.application == self.default_application: self.omit_application = True # omit default application # (which might be the domain default application) # default_application = self.domain_application or self.default_application if self.application == default_application: self.omit_application = True # omit controller if default controller # default_controller = ((self.application == self.domain_application) and self.domain_controller) or router.default_controller or '' if self.controller == default_controller: self.omit_controller = True # omit function if default controller/function # if self.functions and self.function == self.default_function and self.omit_controller: self.omit_function = True # prohibit ambiguous cases # # because we presume the lang string to be unambiguous, its presence protects application omission # if self.omit_language: if not self.applications or self.controller in self.applications: self.omit_application = False if self.omit_application: if not self.applications or self.function in self.applications: self.omit_controller = False if not self.controllers or self.function in self.controllers: self.omit_controller = False if self.args: if self.args[0] in self.functions or self.args[0] in self.controllers or self.args[0] in self.applications: self.omit_function = False if self.omit_controller: if self.function in self.controllers or self.function in self.applications: self.omit_controller = False if self.omit_application: if self.controller in self.applications: self.omit_application = False # handle static as a special case # (easier for external static handling) # if self.controller == 'static' or self.controller.startswith('static/'): if not self.map_static: self.omit_application = False if self.language: self.omit_language = False self.omit_controller = False self.omit_function = False def build_acf(self): "build acf from components" acf = '' if self.map_hyphen: self.application = self.application.replace('_', '-') self.controller = self.controller.replace('_', '-') if self.controller != 'static' and not self.controller.startswith('static/'): self.function = self.function.replace('_', '-') if not self.omit_application: acf += '/' + self.application if not self.omit_language: acf += '/' + self.language if not self.omit_controller: acf += '/' + self.controller if not self.omit_function: acf += '/' + self.function if self.path_prefix: acf = '/' + '/'.join(self.path_prefix) + acf if self.args: return acf return acf or '/' def acf(self): "convert components to /app/lang/controller/function" if not routers: return None # use regex filter self.omit_lang() # try to omit language self.omit_acf() # try to omit a/c/f return self.build_acf() # build and return the /a/lang/c/f string def map_url_in(request, env, app=False): "route incoming URL" # initialize router-url object # thread.routes = params # default to base routes map = MapUrlIn(request=request, env=env) map.map_prefix() # strip prefix if present map.map_app() # determine application # configure thread.routes for error rewrite # if params.routes_app: thread.routes = params_apps.get(app, params) if app: return map.application root_static_file = map.map_root_static() # handle root-static files if root_static_file: return (root_static_file, map.env) map.map_language() map.map_controller() static_file = map.map_static() if static_file: return (static_file, map.env) map.map_function() map.validate_args() map.update_request() return (None, map.env) def map_url_out(request, env, application, controller, function, args, other, scheme, host, port): ''' supply /a/c/f (or /a/lang/c/f) portion of outgoing url The basic rule is that we can only make transformations that map_url_in can reverse. Suppose that the incoming arguments are a,c,f,args,lang and that the router defaults are da, dc, df, dl. We can perform these transformations trivially if args=[] and lang=None or dl: /da/dc/df => / /a/dc/df => /a /a/c/df => /a/c We would also like to be able to strip the default application or application/controller from URLs with function/args present, thus: /da/c/f/args => /c/f/args /da/dc/f/args => /f/args We use [applications] and [controllers] and [functions] to suppress ambiguous omissions. We assume that language names do not collide with a/c/f names. ''' map = MapUrlOut(request, env, application, controller, function, args, other, scheme, host, port) return map.acf() def get_effective_router(appname): "return a private copy of the effective router for the specified application" if not routers or appname not in routers: return None return Storage(routers[appname]) # return a copy
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ from SimpleXMLRPCServer import SimpleXMLRPCDispatcher def handler(request, response, methods): response.session_id = None # no sessions for xmlrpc dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None) for method in methods: dispatcher.register_function(method) dispatcher.register_introspection_functions() response.headers['Content-Type'] = 'text/xml' dispatch = getattr(dispatcher, '_dispatch', None) return dispatcher._marshaled_dispatch(request.body.read(), dispatch)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and Limodou <limodou@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This makes uses of the pywin32 package (http://sourceforge.net/projects/pywin32/). You do not need to install this package to use web2py. """ import time import os import sys import traceback try: import win32serviceutil import win32service import win32event except: if os.name == 'nt': print "Warning, winservice is unable to install the Mark Hammond Win32 extensions" import servicemanager import _winreg from fileutils import up __all__ = ['web2py_windows_service_handler'] class Service(win32serviceutil.ServiceFramework): _svc_name_ = '_unNamed' _svc_display_name_ = '_Service Template' def __init__(self, *args): win32serviceutil.ServiceFramework.__init__(self, *args) self.stop_event = win32event.CreateEvent(None, 0, 0, None) def log(self, msg): servicemanager.LogInfoMsg(str(msg)) def SvcDoRun(self): self.ReportServiceStatus(win32service.SERVICE_START_PENDING) try: self.ReportServiceStatus(win32service.SERVICE_RUNNING) self.start() win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE) except: self.log(traceback.format_exc(sys.exc_info)) self.SvcStop() self.ReportServiceStatus(win32service.SERVICE_STOPPED) def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) try: self.stop() except: self.log(traceback.format_exc(sys.exc_info)) win32event.SetEvent(self.stop_event) self.ReportServiceStatus(win32service.SERVICE_STOPPED) # to be overridden def start(self): pass # to be overridden def stop(self): pass class Web2pyService(Service): _svc_name_ = 'web2py' _svc_display_name_ = 'web2py Service' _exe_args_ = 'options' server = None def chdir(self): try: h = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\CurrentControlSet\Services\%s' % self._svc_name_) try: cls = _winreg.QueryValue(h, 'PythonClass') finally: _winreg.CloseKey(h) dir = os.path.dirname(cls) os.chdir(dir) return True except: self.log("Can't change to web2py working path; server is stopped") return False def start(self): self.log('web2py server starting') if not self.chdir(): return if len(sys.argv) == 2: opt_mod = sys.argv[1] else: opt_mod = self._exe_args_ options = __import__(opt_mod, [], [], '') if True: # legacy support for old options files, which have only (deprecated) numthreads if hasattr(options, 'numthreads') and not hasattr(options, 'minthreads'): options.minthreads = options.numthreads if not hasattr(options, 'minthreads'): options.minthreads = None if not hasattr(options, 'maxthreads'): options.maxthreads = None import main self.server = main.HttpServer( ip=options.ip, port=options.port, password=options.password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_filename=options.profiler_filename, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=options.request_queue_size, timeout=options.timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder ) try: self.server.start() except: # self.server.stop() self.server = None raise def stop(self): self.log('web2py server stopping') if not self.chdir(): return if self.server: self.server.stop() time.sleep(1) def web2py_windows_service_handler(argv=None, opt_file='options'): path = os.path.dirname(__file__) classstring = os.path.normpath(os.path.join(up(path), 'gluon.winservice.Web2pyService')) if opt_file: Web2pyService._exe_args_ = opt_file win32serviceutil.HandleCommandLine(Web2pyService, serviceClassString=classstring, argv=['', 'install']) win32serviceutil.HandleCommandLine(Web2pyService, serviceClassString=classstring, argv=argv) if __name__ == '__main__': web2py_windows_service_handler()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import cgi import os import re import copy import types import urllib import base64 import sanitizer import rewrite import itertools import decoder import copy_reg import cPickle import marshal from HTMLParser import HTMLParser from htmlentitydefs import name2codepoint from contrib.markmin.markmin2html import render from storage import Storage from highlight import highlight from utils import web2py_uuid, hmac_hash import hmac import hashlib regex_crlf = re.compile('\r|\n') join = ''.join __all__ = [ 'A', 'B', 'BEAUTIFY', 'BODY', 'BR', 'BUTTON', 'CENTER', 'CAT', 'CODE', 'DIV', 'EM', 'EMBED', 'FIELDSET', 'FORM', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'HEAD', 'HR', 'HTML', 'I', 'IFRAME', 'IMG', 'INPUT', 'LABEL', 'LEGEND', 'LI', 'LINK', 'OL', 'UL', 'MARKMIN', 'MENU', 'META', 'OBJECT', 'ON', 'OPTION', 'P', 'PRE', 'SCRIPT', 'OPTGROUP', 'SELECT', 'SPAN', 'STYLE', 'TABLE', 'TAG', 'TD', 'TEXTAREA', 'TH', 'THEAD', 'TBODY', 'TFOOT', 'TITLE', 'TR', 'TT', 'URL', 'XHTML', 'XML', 'xmlescape', 'embed64', ] def xmlescape(data, quote = True): """ returns an escaped string of the provided data :param data: the data to be escaped :param quote: optional (default False) """ # first try the xml function if hasattr(data,'xml') and callable(data.xml): return data.xml() # otherwise, make it a string if not isinstance(data, (str, unicode)): data = str(data) elif isinstance(data, unicode): data = data.encode('utf8', 'xmlcharrefreplace') # ... and do the escaping data = cgi.escape(data, quote).replace("'","&#x27;") return data def URL( a=None, c=None, f=None, r=None, args=[], vars={}, anchor='', extension=None, env=None, hmac_key=None, hash_vars=True, salt=None, user_signature=None, scheme=None, host=None, port=None, ): """ generate a URL example:: >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':1, 'q':2}, anchor='1')) '/a/c/f/x/y/z?p=1&q=2#1' >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':(1,3), 'q':2}, anchor='1')) '/a/c/f/x/y/z?p=1&p=3&q=2#1' >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':(3,1), 'q':2}, anchor='1')) '/a/c/f/x/y/z?p=3&p=1&q=2#1' >>> str(URL(a='a', c='c', f='f', anchor='1+2')) '/a/c/f#1%2B2' >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':(1,3), 'q':2}, anchor='1', hmac_key='key')) '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=5d06bb8a4a6093dd325da2ee591c35c61afbd3c6#1' generates a url '/a/c/f' corresponding to application a, controller c and function f. If r=request is passed, a, c, f are set, respectively, to r.application, r.controller, r.function. The more typical usage is: URL(r=request, f='index') that generates a url for the index function within the present application and controller. :param a: application (default to current if r is given) :param c: controller (default to current if r is given) :param f: function (default to current if r is given) :param r: request (optional) :param args: any arguments (optional) :param vars: any variables (optional) :param anchor: anchorname, without # (optional) :param hmac_key: key to use when generating hmac signature (optional) :param hash_vars: which of the vars to include in our hmac signature True (default) - hash all vars, False - hash none of the vars, iterable - hash only the included vars ['key1','key2'] :param scheme: URI scheme (True, 'http' or 'https', etc); forces absolute URL (optional) :param host: string to force absolute URL with host (True means http_host) :param port: optional port number (forces absolute URL) :raises SyntaxError: when no application, controller or function is available :raises SyntaxError: when a CRLF is found in the generated url """ if args in (None,[]): args = [] vars = vars or {} application = None controller = None function = None if not r: if a and not c and not f: (f,a,c)=(a,c,f) elif a and c and not f: (c,f,a)=(a,c,f) from globals import current if hasattr(current,'request'): r = current.request if r: application = r.application controller = r.controller function = r.function env = r.env if extension is None and r.extension != 'html': extension = r.extension if a: application = a if c: controller = c if f: if not isinstance(f, str): function = f.__name__ elif '.' in f: function, extension = f.split('.', 1) else: function = f function2 = '%s.%s' % (function,extension or 'html') if not (application and controller and function): raise SyntaxError, 'not enough information to build the url' if not isinstance(args, (list, tuple)): args = [args] other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) or '' if other.endswith('/'): other += '/' # add trailing slash to make last trailing empty arg explicit if vars.has_key('_signature'): vars.pop('_signature') list_vars = [] for (key, vals) in sorted(vars.items()): if not isinstance(vals, (list, tuple)): vals = [vals] for val in vals: list_vars.append((key, val)) if user_signature: from globals import current if current.session.auth: hmac_key = current.session.auth.hmac_key if hmac_key: # generate an hmac signature of the vars & args so can later # verify the user hasn't messed with anything h_args = '/%s/%s/%s%s' % (application, controller, function2, other) # how many of the vars should we include in our hash? if hash_vars is True: # include them all h_vars = list_vars elif hash_vars is False: # include none of them h_vars = '' else: # include just those specified if hash_vars and not isinstance(hash_vars, (list, tuple)): hash_vars = [hash_vars] h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars] # re-assembling the same way during hash authentication message = h_args + '?' + urllib.urlencode(sorted(h_vars)) sig = hmac_hash(message,hmac_key,salt=salt) # add the signature into vars list_vars.append(('_signature', sig)) if list_vars: other += '?%s' % urllib.urlencode(list_vars) if anchor: other += '#' + urllib.quote(str(anchor)) if extension: function += '.' + extension if regex_crlf.search(join([application, controller, function, other])): raise SyntaxError, 'CRLF Injection Detected' url = rewrite.url_out(r, env, application, controller, function, args, other, scheme, host, port) return url def verifyURL(request, hmac_key=None, hash_vars=True, salt=None, user_signature=None): """ Verifies that a request's args & vars have not been tampered with by the user :param request: web2py's request object :param hmac_key: the key to authenticate with, must be the same one previously used when calling URL() :param hash_vars: which vars to include in our hashing. (Optional) Only uses the 1st value currently True (or undefined) means all, False none, an iterable just the specified keys do not call directly. Use instead: URL.verify(hmac_key='...') the key has to match the one used to generate the URL. >>> r = Storage() >>> gv = Storage(p=(1,3),q=2,_signature='5d06bb8a4a6093dd325da2ee591c35c61afbd3c6') >>> r.update(dict(application='a', controller='c', function='f')) >>> r['args'] = ['x', 'y', 'z'] >>> r['get_vars'] = gv >>> verifyURL(r, 'key') True >>> verifyURL(r, 'kay') False >>> r.get_vars.p = (3, 1) >>> verifyURL(r, 'key') True >>> r.get_vars.p = (3, 2) >>> verifyURL(r, 'key') False """ if not request.get_vars.has_key('_signature'): return False # no signature in the request URL # check if user_signature requires if user_signature: from globals import current if not current.session: return False hmac_key = current.session.auth.hmac_key if not hmac_key: return False # get our sig from request.get_vars for later comparison original_sig = request.get_vars._signature # now generate a new hmac for the remaining args & vars vars, args = request.get_vars, request.args # remove the signature var since it was not part of our signed message request.get_vars.pop('_signature') # join all the args & vars into one long string # always include all of the args other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) or '' h_args = '/%s/%s/%s.%s%s' % (request.application, request.controller, request.function, request.extension, other) # but only include those vars specified (allows more flexibility for use with # forms or ajax) list_vars = [] for (key, vals) in sorted(vars.items()): if not isinstance(vals, (list, tuple)): vals = [vals] for val in vals: list_vars.append((key, val)) # which of the vars are to be included? if hash_vars is True: # include them all h_vars = list_vars elif hash_vars is False: # include none of them h_vars = '' else: # include just those specified # wrap in a try - if the desired vars have been removed it'll fail try: if hash_vars and not isinstance(hash_vars, (list, tuple)): hash_vars = [hash_vars] h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars] except: # user has removed one of our vars! Immediate fail return False # build the full message string with both args & vars message = h_args + '?' + urllib.urlencode(sorted(h_vars)) # hash with the hmac_key provided sig = hmac_hash(message,str(hmac_key),salt=salt) # put _signature back in get_vars just in case a second call to URL.verify is performed # (otherwise it'll immediately return false) request.get_vars['_signature'] = original_sig # return whether or not the signature in the request matched the one we just generated # (I.E. was the message the same as the one we originally signed) return original_sig == sig URL.verify = verifyURL ON = True class XmlComponent(object): """ Abstract root for all Html components """ # TODO: move some DIV methods to here def xml(self): raise NotImplementedError class XML(XmlComponent): """ use it to wrap a string that contains XML/HTML so that it will not be escaped by the template example: >>> XML('<h1>Hello</h1>').xml() '<h1>Hello</h1>' """ def __init__( self, text, sanitize = False, permitted_tags = [ 'a', 'b', 'blockquote', 'br/', 'i', 'li', 'ol', 'ul', 'p', 'cite', 'code', 'pre', 'img/', 'h1','h2','h3','h4','h5','h6', 'table','tr','td','div', ], allowed_attributes = { 'a': ['href', 'title'], 'img': ['src', 'alt'], 'blockquote': ['type'], 'td': ['colspan'], }, ): """ :param text: the XML text :param sanitize: sanitize text using the permitted tags and allowed attributes (default False) :param permitted_tags: list of permitted tags (default: simple list of tags) :param allowed_attributes: dictionary of allowed attributed (default for A, IMG and BlockQuote). The key is the tag; the value is a list of allowed attributes. """ if sanitize: text = sanitizer.sanitize(text, permitted_tags, allowed_attributes) if isinstance(text, unicode): text = text.encode('utf8', 'xmlcharrefreplace') elif not isinstance(text, str): text = str(text) self.text = text def xml(self): return self.text def __str__(self): return self.xml() def __add__(self,other): return '%s%s' % (self,other) def __radd__(self,other): return '%s%s' % (other,self) def __cmp__(self,other): return cmp(str(self),str(other)) def __hash__(self): return hash(str(self)) def __getattr__(self,name): return getattr(str(self),name) def __getitem__(self,i): return str(self)[i] def __getslice__(self,i,j): return str(self)[i:j] def __iter__(self): for c in str(self): yield c def __len__(self): return len(str(self)) def flatten(self,render=None): """ return the text stored by the XML object rendered by the render function """ if render: return render(self.text,None,{}) return self.text def elements(self, *args, **kargs): """ to be considered experimental since the behavior of this method is questionable another options could be TAG(self.text).elements(*args,**kargs) """ return [] ### important to allow safe session.flash=T(....) def XML_unpickle(data): return marshal.loads(data) def XML_pickle(data): return XML_unpickle, (marshal.dumps(str(data)),) copy_reg.pickle(XML, XML_pickle, XML_unpickle) class DIV(XmlComponent): """ HTML helper, for easy generating and manipulating a DOM structure. Little or no validation is done. Behaves like a dictionary regarding updating of attributes. Behaves like a list regarding inserting/appending components. example:: >>> DIV('hello', 'world', _style='color:red;').xml() '<div style=\"color:red;\">helloworld</div>' all other HTML helpers are derived from DIV. _something=\"value\" attributes are transparently translated into something=\"value\" HTML attributes """ # name of the tag, subclasses should update this # tags ending with a '/' denote classes that cannot # contain components tag = 'div' def __init__(self, *components, **attributes): """ :param *components: any components that should be nested in this element :param **attributes: any attributes you want to give to this element :raises SyntaxError: when a stand alone tag receives components """ if self.tag[-1:] == '/' and components: raise SyntaxError, '<%s> tags cannot have components'\ % self.tag if len(components) == 1 and isinstance(components[0], (list,tuple)): self.components = list(components[0]) else: self.components = list(components) self.attributes = attributes self._fixup() # converts special attributes in components attributes self._postprocessing() self.parent = None for c in self.components: self._setnode(c) def update(self, **kargs): """ dictionary like updating of the tag attributes """ for (key, value) in kargs.items(): self[key] = value return self def append(self, value): """ list style appending of components >>> a=DIV() >>> a.append(SPAN('x')) >>> print a <div><span>x</span></div> """ self._setnode(value) ret = self.components.append(value) self._fixup() return ret def insert(self, i, value): """ list style inserting of components >>> a=DIV() >>> a.insert(0,SPAN('x')) >>> print a <div><span>x</span></div> """ self._setnode(value) ret = self.components.insert(i, value) self._fixup() return ret def __getitem__(self, i): """ gets attribute with name 'i' or component #i. If attribute 'i' is not found returns None :param i: index if i is a string: the name of the attribute otherwise references to number of the component """ if isinstance(i, str): try: return self.attributes[i] except KeyError: return None else: return self.components[i] def __setitem__(self, i, value): """ sets attribute with name 'i' or component #i. :param i: index if i is a string: the name of the attribute otherwise references to number of the component :param value: the new value """ self._setnode(value) if isinstance(i, (str, unicode)): self.attributes[i] = value else: self.components[i] = value def __delitem__(self, i): """ deletes attribute with name 'i' or component #i. :param i: index if i is a string: the name of the attribute otherwise references to number of the component """ if isinstance(i, str): del self.attributes[i] else: del self.components[i] def __len__(self): """ returns the number of included components """ return len(self.components) def __nonzero__(self): """ always return True """ return True def _fixup(self): """ Handling of provided components. Nothing to fixup yet. May be overridden by subclasses, eg for wrapping some components in another component or blocking them. """ return def _wrap_components(self, allowed_parents, wrap_parent = None, wrap_lambda = None): """ helper for _fixup. Checks if a component is in allowed_parents, otherwise wraps it in wrap_parent :param allowed_parents: (tuple) classes that the component should be an instance of :param wrap_parent: the class to wrap the component in, if needed :param wrap_lambda: lambda to use for wrapping, if needed """ components = [] for c in self.components: if isinstance(c, allowed_parents): pass elif wrap_lambda: c = wrap_lambda(c) else: c = wrap_parent(c) if isinstance(c,DIV): c.parent = self components.append(c) self.components = components def _postprocessing(self): """ Handling of attributes (normally the ones not prefixed with '_'). Nothing to postprocess yet. May be overridden by subclasses """ return def _traverse(self, status, hideerror=False): # TODO: docstring newstatus = status for c in self.components: if hasattr(c, '_traverse') and callable(c._traverse): c.vars = self.vars c.request_vars = self.request_vars c.errors = self.errors c.latest = self.latest c.session = self.session c.formname = self.formname c['hideerror']=hideerror newstatus = c._traverse(status,hideerror) and newstatus # for input, textarea, select, option # deal with 'value' and 'validation' name = self['_name'] if newstatus: newstatus = self._validate() self._postprocessing() elif 'old_value' in self.attributes: self['value'] = self['old_value'] self._postprocessing() elif name and name in self.vars: self['value'] = self.vars[name] self._postprocessing() if name: self.latest[name] = self['value'] return newstatus def _validate(self): """ nothing to validate yet. May be overridden by subclasses """ return True def _setnode(self,value): if isinstance(value,DIV): value.parent = self def _xml(self): """ helper for xml generation. Returns separately: - the component attributes - the generated xml of the inner components Component attributes start with an underscore ('_') and do not have a False or None value. The underscore is removed. A value of True is replaced with the attribute name. :returns: tuple: (attributes, components) """ # get the attributes for this component # (they start with '_', others may have special meanings) fa = '' for key in sorted(self.attributes): value = self[key] if key[:1] != '_': continue name = key[1:] if value is True: value = name elif value is False or value is None: continue fa += ' %s="%s"' % (name, xmlescape(value, True)) # get the xml for the inner components co = join([xmlescape(component) for component in self.components]) return (fa, co) def xml(self): """ generates the xml for this component. """ (fa, co) = self._xml() if not self.tag: return co if self.tag[-1:] == '/': # <tag [attributes] /> return '<%s%s />' % (self.tag[:-1], fa) # else: <tag [attributes]> inner components xml </tag> return '<%s%s>%s</%s>' % (self.tag, fa, co, self.tag) def __str__(self): """ str(COMPONENT) returns equals COMPONENT.xml() """ return self.xml() def flatten(self, render=None): """ return the text stored by the DIV object rendered by the render function the render function must take text, tagname, and attributes render=None is equivalent to render=lambda text, tag, attr: text >>> markdown = lambda text,tag=None,attributes={}: \ {None: re.sub('\s+',' ',text), \ 'h1':'#'+text+'\\n\\n', \ 'p':text+'\\n'}.get(tag,text) >>> a=TAG('<h1>Header</h1><p>this is a test</p>') >>> a.flatten(markdown) '#Header\\n\\nthis is a test\\n' """ text = '' for c in self.components: if isinstance(c,XmlComponent): s=c.flatten(render) elif render: s=render(str(c)) else: s=str(c) text+=s if render: text = render(text,self.tag,self.attributes) return text regex_tag=re.compile('^[\w\-\:]+') regex_id=re.compile('#([\w\-]+)') regex_class=re.compile('\.([\w\-]+)') regex_attr=re.compile('\[([\w\-\:]+)=(.*?)\]') def elements(self, *args, **kargs): """ find all component that match the supplied attribute dictionary, or None if nothing could be found All components of the components are searched. >>> a = DIV(DIV(SPAN('x'),3,DIV(SPAN('y')))) >>> for c in a.elements('span',first_only=True): c[0]='z' >>> print a <div><div><span>z</span>3<div><span>y</span></div></div></div> >>> for c in a.elements('span'): c[0]='z' >>> print a <div><div><span>z</span>3<div><span>z</span></div></div></div> It also supports a syntax compatible with jQuery >>> a=TAG('<div><span><a id="1-1" u:v=$>hello</a></span><p class="this is a test">world</p></div>') >>> for e in a.elements('div a#1-1, p.is'): print e.flatten() hello world >>> for e in a.elements('#1-1'): print e.flatten() hello >>> a.elements('a[u:v=$]')[0].xml() '<a id="1-1" u:v="$">hello</a>' >>> a=FORM( INPUT(_type='text'), SELECT(range(1)), TEXTAREA() ) >>> for c in a.elements('input, select, textarea'): c['_disabled'] = 'disabled' >>> a.xml() '<form action="" enctype="multipart/form-data" method="post"><input disabled="disabled" type="text" /><select disabled="disabled"><option value="0">0</option></select><textarea cols="40" disabled="disabled" rows="10"></textarea></form>' """ if len(args)==1: args = [a.strip() for a in args[0].split(',')] if len(args)>1: subset = [self.elements(a,**kargs) for a in args] return reduce(lambda a,b:a+b,subset,[]) elif len(args)==1: items = args[0].split() if len(items)>1: subset=[a.elements(' '.join(items[1:]),**kargs) for a in self.elements(items[0])] return reduce(lambda a,b:a+b,subset,[]) else: item=items[0] if '#' in item or '.' in item or '[' in item: match_tag = self.regex_tag.search(item) match_id = self.regex_id.search(item) match_class = self.regex_class.search(item) match_attr = self.regex_attr.finditer(item) args = [] if match_tag: args = [match_tag.group()] if match_id: kargs['_id'] = match_id.group(1) if match_class: kargs['_class'] = re.compile('(?<!\w)%s(?!\w)' % \ match_class.group(1).replace('-','\\-').replace(':','\\:')) for item in match_attr: kargs['_'+item.group(1)]=item.group(2) return self.elements(*args,**kargs) # make a copy of the components matches = [] first_only = False if kargs.has_key("first_only"): first_only = kargs["first_only"] del kargs["first_only"] # check if the component has an attribute with the same # value as provided check = True tag = getattr(self,'tag').replace("/","") if args and tag not in args: check = False for (key, value) in kargs.items(): if isinstance(value,(str,int)): if self[key] != str(value): check = False elif key in self.attributes: if not value.search(str(self[key])): check = False else: check = False if 'find' in kargs: find = kargs['find'] for c in self.components: if isinstance(find,(str,int)): if isinstance(c,str) and str(find) in c: check = True else: if isinstance(c,str) and find.search(c): check = True # if found, return the component if check: matches.append(self) if first_only: return matches # loop the copy for c in self.components: if isinstance(c, XmlComponent): kargs['first_only'] = first_only child_matches = c.elements( *args, **kargs ) if first_only and len(child_matches) != 0: return child_matches matches.extend( child_matches ) return matches def element(self, *args, **kargs): """ find the first component that matches the supplied attribute dictionary, or None if nothing could be found Also the components of the components are searched. """ kargs['first_only'] = True elements = self.elements(*args, **kargs) if not elements: # we found nothing return None return elements[0] def siblings(self,*args,**kargs): """ find all sibling components that match the supplied argument list and attribute dictionary, or None if nothing could be found """ sibs = [s for s in self.parent.components if not s == self] matches = [] first_only = False if kargs.has_key("first_only"): first_only = kargs["first_only"] del kargs["first_only"] for c in sibs: try: check = True tag = getattr(c,'tag').replace("/","") if args and tag not in args: check = False for (key, value) in kargs.items(): if c[key] != value: check = False if check: matches.append(c) if first_only: break except: pass return matches def sibling(self,*args,**kargs): """ find the first sibling component that match the supplied argument list and attribute dictionary, or None if nothing could be found """ kargs['first_only'] = True sibs = self.siblings(*args, **kargs) if not sibs: return None return sibs[0] class CAT(DIV): tag = '' def TAG_unpickler(data): return cPickle.loads(data) def TAG_pickler(data): d = DIV() d.__dict__ = data.__dict__ marshal_dump = cPickle.dumps(d) return (TAG_unpickler, (marshal_dump,)) class __TAG__(XmlComponent): """ TAG factory example:: >>> print TAG.first(TAG.second('test'), _key = 3) <first key=\"3\"><second>test</second></first> """ def __getitem__(self, name): return self.__getattr__(name) def __getattr__(self, name): if name[-1:] == '_': name = name[:-1] + '/' if isinstance(name,unicode): name = name.encode('utf-8') class __tag__(DIV): tag = name copy_reg.pickle(__tag__, TAG_pickler, TAG_unpickler) return lambda *a, **b: __tag__(*a, **b) def __call__(self,html): return web2pyHTMLParser(decoder.decoder(html)).tree TAG = __TAG__() class HTML(DIV): """ There are four predefined document type definitions. They can be specified in the 'doctype' parameter: -'strict' enables strict doctype -'transitional' enables transitional doctype (default) -'frameset' enables frameset doctype -'html5' enables HTML 5 doctype -any other string will be treated as user's own doctype 'lang' parameter specifies the language of the document. Defaults to 'en'. See also :class:`DIV` """ tag = 'html' strict = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n' transitional = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' frameset = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n' html5 = '<!DOCTYPE HTML>\n' def xml(self): lang = self['lang'] if not lang: lang = 'en' self.attributes['_lang'] = lang doctype = self['doctype'] if doctype: if doctype == 'strict': doctype = self.strict elif doctype == 'transitional': doctype = self.transitional elif doctype == 'frameset': doctype = self.frameset elif doctype == 'html5': doctype = self.html5 else: doctype = '%s\n' % doctype else: doctype = self.transitional (fa, co) = self._xml() return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag) class XHTML(DIV): """ This is XHTML version of the HTML helper. There are three predefined document type definitions. They can be specified in the 'doctype' parameter: -'strict' enables strict doctype -'transitional' enables transitional doctype (default) -'frameset' enables frameset doctype -any other string will be treated as user's own doctype 'lang' parameter specifies the language of the document and the xml document. Defaults to 'en'. 'xmlns' parameter specifies the xml namespace. Defaults to 'http://www.w3.org/1999/xhtml'. See also :class:`DIV` """ tag = 'html' strict = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n' transitional = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n' frameset = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">\n' xmlns = 'http://www.w3.org/1999/xhtml' def xml(self): xmlns = self['xmlns'] if xmlns: self.attributes['_xmlns'] = xmlns else: self.attributes['_xmlns'] = self.xmlns lang = self['lang'] if not lang: lang = 'en' self.attributes['_lang'] = lang self.attributes['_xml:lang'] = lang doctype = self['doctype'] if doctype: if doctype == 'strict': doctype = self.strict elif doctype == 'transitional': doctype = self.transitional elif doctype == 'frameset': doctype = self.frameset else: doctype = '%s\n' % doctype else: doctype = self.transitional (fa, co) = self._xml() return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag) class HEAD(DIV): tag = 'head' class TITLE(DIV): tag = 'title' class META(DIV): tag = 'meta/' class LINK(DIV): tag = 'link/' class SCRIPT(DIV): tag = 'script' def xml(self): (fa, co) = self._xml() # no escaping of subcomponents co = '\n'.join([str(component) for component in self.components]) if co: # <script [attributes]><!--//--><![CDATA[//><!-- # script body # //--><!]]></script> # return '<%s%s><!--//--><![CDATA[//><!--\n%s\n//--><!]]></%s>' % (self.tag, fa, co, self.tag) return '<%s%s><!--\n%s\n//--></%s>' % (self.tag, fa, co, self.tag) else: return DIV.xml(self) class STYLE(DIV): tag = 'style' def xml(self): (fa, co) = self._xml() # no escaping of subcomponents co = '\n'.join([str(component) for component in self.components]) if co: # <style [attributes]><!--/*--><![CDATA[/*><!--*/ # style body # /*]]>*/--></style> return '<%s%s><!--/*--><![CDATA[/*><!--*/\n%s\n/*]]>*/--></%s>' % (self.tag, fa, co, self.tag) else: return DIV.xml(self) class IMG(DIV): tag = 'img/' class SPAN(DIV): tag = 'span' class BODY(DIV): tag = 'body' class H1(DIV): tag = 'h1' class H2(DIV): tag = 'h2' class H3(DIV): tag = 'h3' class H4(DIV): tag = 'h4' class H5(DIV): tag = 'h5' class H6(DIV): tag = 'h6' class P(DIV): """ Will replace ``\\n`` by ``<br />`` if the `cr2br` attribute is provided. see also :class:`DIV` """ tag = 'p' def xml(self): text = DIV.xml(self) if self['cr2br']: text = text.replace('\n', '<br />') return text class B(DIV): tag = 'b' class BR(DIV): tag = 'br/' class HR(DIV): tag = 'hr/' class A(DIV): tag = 'a' def xml(self): if self['callback']: self['_onclick']="ajax('%s',[],'%s');return false;" % \ (self['callback'],self['target'] or '') self['_href'] = self['_href'] or '#null' elif self['cid']: self['_onclick']='web2py_component("%s","%s");return false;' % \ (self['_href'],self['cid']) return DIV.xml(self) class BUTTON(DIV): tag = 'button' class EM(DIV): tag = 'em' class EMBED(DIV): tag = 'embed/' class TT(DIV): tag = 'tt' class PRE(DIV): tag = 'pre' class CENTER(DIV): tag = 'center' class CODE(DIV): """ displays code in HTML with syntax highlighting. :param attributes: optional attributes: - language: indicates the language, otherwise PYTHON is assumed - link: can provide a link - styles: for styles Example:: {{=CODE(\"print 'hello world'\", language='python', link=None, counter=1, styles={}, highlight_line=None)}} supported languages are \"python\", \"html_plain\", \"c\", \"cpp\", \"web2py\", \"html\". The \"html\" language interprets {{ and }} tags as \"web2py\" code, \"html_plain\" doesn't. if a link='/examples/global/vars/' is provided web2py keywords are linked to the online docs. the counter is used for line numbering, counter can be None or a prompt string. """ def xml(self): language = self['language'] or 'PYTHON' link = self['link'] counter = self.attributes.get('counter', 1) highlight_line = self.attributes.get('highlight_line', None) styles = self['styles'] or {} return highlight( join(self.components), language=language, link=link, counter=counter, styles=styles, attributes=self.attributes, highlight_line=highlight_line, ) class LABEL(DIV): tag = 'label' class LI(DIV): tag = 'li' class UL(DIV): """ UL Component. If subcomponents are not LI-components they will be wrapped in a LI see also :class:`DIV` """ tag = 'ul' def _fixup(self): self._wrap_components(LI, LI) class OL(UL): tag = 'ol' class TD(DIV): tag = 'td' class TH(DIV): tag = 'th' class TR(DIV): """ TR Component. If subcomponents are not TD/TH-components they will be wrapped in a TD see also :class:`DIV` """ tag = 'tr' def _fixup(self): self._wrap_components((TD, TH), TD) class THEAD(DIV): tag = 'thead' def _fixup(self): self._wrap_components(TR, TR) class TBODY(DIV): tag = 'tbody' def _fixup(self): self._wrap_components(TR, TR) class TFOOT(DIV): tag = 'tfoot' def _fixup(self): self._wrap_components(TR, TR) class COL(DIV): tag = 'col' class COLGROUP(DIV): tag = 'colgroup' class TABLE(DIV): """ TABLE Component. If subcomponents are not TR/TBODY/THEAD/TFOOT-components they will be wrapped in a TR see also :class:`DIV` """ tag = 'table' def _fixup(self): self._wrap_components((TR, TBODY, THEAD, TFOOT, COL, COLGROUP), TR) class I(DIV): tag = 'i' class IFRAME(DIV): tag = 'iframe' class INPUT(DIV): """ INPUT Component examples:: >>> INPUT(_type='text', _name='name', value='Max').xml() '<input name=\"name\" type=\"text\" value=\"Max\" />' >>> INPUT(_type='checkbox', _name='checkbox', value='on').xml() '<input checked=\"checked\" name=\"checkbox\" type=\"checkbox\" value=\"on\" />' >>> INPUT(_type='radio', _name='radio', _value='yes', value='yes').xml() '<input checked=\"checked\" name=\"radio\" type=\"radio\" value=\"yes\" />' >>> INPUT(_type='radio', _name='radio', _value='no', value='yes').xml() '<input name=\"radio\" type=\"radio\" value=\"no\" />' the input helper takes two special attributes value= and requires=. :param value: used to pass the initial value for the input field. value differs from _value because it works for checkboxes, radio, textarea and select/option too. - for a checkbox value should be '' or 'on'. - for a radio or select/option value should be the _value of the checked/selected item. :param requires: should be None, or a validator or a list of validators for the value of the field. """ tag = 'input/' def _validate(self): # # this only changes value, not _value name = self['_name'] if name is None or name == '': return True name = str(name) if self['_type'] != 'checkbox': self['old_value'] = self['value'] or self['_value'] or '' value = self.request_vars.get(name, '') self['value'] = value else: self['old_value'] = self['value'] or False value = self.request_vars.get(name) if isinstance(value, (tuple, list)): self['value'] = self['_value'] in value else: self['value'] = self['_value'] == value requires = self['requires'] if requires: if not isinstance(requires, (list, tuple)): requires = [requires] for validator in requires: (value, errors) = validator(value) if errors != None: self.vars[name] = value self.errors[name] = errors break if not name in self.errors: self.vars[name] = value return True return False def _postprocessing(self): t = self['_type'] if not t: t = self['_type'] = 'text' t = t.lower() value = self['value'] if self['_value'] == None: _value = None else: _value = str(self['_value']) if t == 'checkbox': if not _value: _value = self['_value'] = 'on' if not value: value = [] elif value is True: value = [_value] elif not isinstance(value,(list,tuple)): value = str(value).split('|') self['_checked'] = _value in value and 'checked' or None elif t == 'radio': if str(value) == str(_value): self['_checked'] = 'checked' else: self['_checked'] = None elif t == 'text' or t == 'hidden': if value != None: self['_value'] = value else: self['value'] = _value def xml(self): name = self.attributes.get('_name', None) if name and hasattr(self, 'errors') \ and self.errors.get(name, None) \ and self['hideerror'] != True: return DIV.xml(self) + DIV(self.errors[name], _class='error', errors=None, _id='%s__error' % name).xml() else: return DIV.xml(self) class TEXTAREA(INPUT): """ example:: TEXTAREA(_name='sometext', value='blah '*100, requires=IS_NOT_EMPTY()) 'blah blah blah ...' will be the content of the textarea field. """ tag = 'textarea' def _postprocessing(self): if not '_rows' in self.attributes: self['_rows'] = 10 if not '_cols' in self.attributes: self['_cols'] = 40 if self['value'] != None: self.components = [self['value']] elif self.components: self['value'] = self.components[0] class OPTION(DIV): tag = 'option' def _fixup(self): if not '_value' in self.attributes: self.attributes['_value'] = str(self.components[0]) class OBJECT(DIV): tag = 'object' class OPTGROUP(DIV): tag = 'optgroup' def _fixup(self): components = [] for c in self.components: if isinstance(c, OPTION): components.append(c) else: components.append(OPTION(c, _value=str(c))) self.components = components class SELECT(INPUT): """ example:: >>> from validators import IS_IN_SET >>> SELECT('yes', 'no', _name='selector', value='yes', ... requires=IS_IN_SET(['yes', 'no'])).xml() '<select name=\"selector\"><option selected=\"selected\" value=\"yes\">yes</option><option value=\"no\">no</option></select>' """ tag = 'select' def _fixup(self): components = [] for c in self.components: if isinstance(c, (OPTION, OPTGROUP)): components.append(c) else: components.append(OPTION(c, _value=str(c))) self.components = components def _postprocessing(self): component_list = [] for c in self.components: if isinstance(c, OPTGROUP): component_list.append(c.components) else: component_list.append([c]) options = itertools.chain(*component_list) value = self['value'] if value != None: if not self['_multiple']: for c in options: # my patch if value and str(c['_value'])==str(value): c['_selected'] = 'selected' else: c['_selected'] = None else: if isinstance(value,(list,tuple)): values = [str(item) for item in value] else: values = [str(value)] for c in options: # my patch if value and str(c['_value']) in values: c['_selected'] = 'selected' else: c['_selected'] = None class FIELDSET(DIV): tag = 'fieldset' class LEGEND(DIV): tag = 'legend' class FORM(DIV): """ example:: >>> from validators import IS_NOT_EMPTY >>> form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY())) >>> form.xml() '<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"test\" type=\"text\" /></form>' a FORM is container for INPUT, TEXTAREA, SELECT and other helpers form has one important method:: form.accepts(request.vars, session) if form is accepted (and all validators pass) form.vars contains the accepted vars, otherwise form.errors contains the errors. in case of errors the form is modified to present the errors to the user. """ tag = 'form' def __init__(self, *components, **attributes): DIV.__init__(self, *components, **attributes) self.vars = Storage() self.errors = Storage() self.latest = Storage() def accepts( self, vars, session=None, formname='default', keepvalues=False, onvalidation=None, hideerror=False, ): if vars.__class__.__name__ == 'Request': vars=vars.post_vars self.errors.clear() self.request_vars = Storage() self.request_vars.update(vars) self.session = session self.formname = formname self.keepvalues = keepvalues # if this tag is a form and we are in accepting mode (status=True) # check formname and formkey status = True if self.session: formkey = self.session.get('_formkey[%s]' % self.formname, None) # check if user tampering with form and void CSRF if formkey != self.request_vars._formkey: status = False if self.formname != self.request_vars._formname: status = False if status and self.session: # check if editing a record that has been modified by the server if hasattr(self,'record_hash') and self.record_hash != formkey: status = False self.record_changed = True status = self._traverse(status,hideerror) if onvalidation: if isinstance(onvalidation, dict): onsuccess = onvalidation.get('onsuccess', None) onfailure = onvalidation.get('onfailure', None) if onsuccess and status: onsuccess(self) if onfailure and vars and not status: onfailure(self) status = len(self.errors) == 0 elif status: if isinstance(onvalidation, (list, tuple)): [f(self) for f in onvalidation] else: onvalidation(self) if self.errors: status = False if session != None: if hasattr(self,'record_hash'): formkey = self.record_hash else: formkey = web2py_uuid() self.formkey = session['_formkey[%s]' % formname] = formkey if status and not keepvalues: self._traverse(False,hideerror) return status def _postprocessing(self): if not '_action' in self.attributes: self['_action'] = '' if not '_method' in self.attributes: self['_method'] = 'post' if not '_enctype' in self.attributes: self['_enctype'] = 'multipart/form-data' def hidden_fields(self): c = [] if 'hidden' in self.attributes: for (key, value) in self.attributes.get('hidden',{}).items(): c.append(INPUT(_type='hidden', _name=key, _value=value)) if hasattr(self, 'formkey') and self.formkey: c.append(INPUT(_type='hidden', _name='_formkey', _value=self.formkey)) if hasattr(self, 'formname') and self.formname: c.append(INPUT(_type='hidden', _name='_formname', _value=self.formname)) return DIV(c, _class="hidden") def xml(self): newform = FORM(*self.components, **self.attributes) hidden_fields = self.hidden_fields() if hidden_fields.components: newform.append(hidden_fields) return DIV.xml(newform) def validate(self, values=None, session=None, formname='default', keepvalues=False, onvalidation=None, hideerror=False, onsuccess='flash', onfailure='flash', message_onsuccess=None, message_onfailure=None, ): """ This function validates the form, you can use it instead of directly form.accepts. Usage: In controller def action(): form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY())) form.validate() #you can pass some args here - see below return dict(form=form) This can receive a bunch of arguments onsuccess = 'flash' - will show message_onsuccess in response.flash None - will do nothing can be a function (lambda form: pass) onfailure = 'flash' - will show message_onfailure in response.flash None - will do nothing can be a function (lambda form: pass) values = values to test the validation - dictionary, response.vars, session or other - Default to (request.vars, session) message_onsuccess message_onfailure """ from gluon import current if not session: session = current.session if not values: values = current.request.post_vars message_onsuccess = message_onsuccess or current.T("Success!") message_onfailure = message_onfailure or \ current.T("Errors in form, please check it out.") if self.accepts(values, session): if onsuccess == 'flash': current.response.flash = message_onsuccess elif callable(onsuccess): onsuccess(self) return True elif self.errors: if onfailure == 'flash': current.response.flash = message_onfailure elif callable(onfailure): onfailure(self) return False def process(self, values=None, session=None, **args): """ Perform the .validate() method but returns the form Usage in controllers: # directly on return def action(): #some code here return dict(form=FORM(...).process(...)) You can use it with FORM, SQLFORM or FORM based plugins Examples: #response.flash messages def action(): form = SQLFORM(db.table).process(message_onsuccess='Sucess!') retutn dict(form=form) # callback function # callback receives True or False as first arg, and a list of args. def my_callback(status, msg): response.flash = "Success! "+msg if status else "Errors occured" # after argument can be 'flash' to response.flash messages # or a function name to use as callback or None to do nothing. def action(): return dict(form=SQLFORM(db.table).process(onsuccess=my_callback) """ self.validate(values=values, session=session, **args) return self class BEAUTIFY(DIV): """ example:: >>> BEAUTIFY(['a', 'b', {'hello': 'world'}]).xml() '<div><table><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;">hello</td><td valign="top">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>' turns any list, dictionary, etc into decent looking html. Two special attributes are :sorted: a function that takes the dict and returned sorted keys :keyfilter: a funciton that takes a key and returns its representation or None if the key is to be skipped. By default key[:1]=='_' is skipped. """ tag = 'div' @staticmethod def no_underscore(key): if key[:1]=='_': return None return key def __init__(self, component, **attributes): self.components = [component] self.attributes = attributes sorter = attributes.get('sorted',sorted) keyfilter = attributes.get('keyfilter',BEAUTIFY.no_underscore) components = [] attributes = copy.copy(self.attributes) level = attributes['level'] = attributes.get('level',6) - 1 if '_class' in attributes: attributes['_class'] += 'i' if level == 0: return for c in self.components: if hasattr(c,'xml') and callable(c.xml): components.append(c) continue elif hasattr(c,'keys') and callable(c.keys): rows = [] try: keys = (sorter and sorter(c)) or c for key in keys: if isinstance(key,(str,unicode)) and keyfilter: filtered_key = keyfilter(key) else: filtered_key = str(key) if filtered_key is None: continue value = c[key] if type(value) == types.LambdaType: continue rows.append(TR(TD(filtered_key, _style='font-weight:bold;'), TD(':',_valign='top'), TD(BEAUTIFY(value, **attributes)))) components.append(TABLE(*rows, **attributes)) continue except: pass if isinstance(c, str): components.append(str(c)) elif isinstance(c, unicode): components.append(c.encode('utf8')) elif isinstance(c, (list, tuple)): items = [TR(TD(BEAUTIFY(item, **attributes))) for item in c] components.append(TABLE(*items, **attributes)) elif isinstance(c, cgi.FieldStorage): components.append('FieldStorage object') else: components.append(repr(c)) self.components = components class MENU(DIV): """ Used to build menus Optional arguments _class: defaults to 'web2py-menu web2py-menu-vertical' ul_class: defaults to 'web2py-menu-vertical' li_class: defaults to 'web2py-menu-expand' Example: menu = MENU([['name', False, URL(...), [submenu]], ...]) {{=menu}} """ tag = 'ul' def __init__(self, data, **args): self.data = data self.attributes = args if not '_class' in self.attributes: self['_class'] = 'web2py-menu web2py-menu-vertical' if not 'ul_class' in self.attributes: self['ul_class'] = 'web2py-menu-vertical' if not 'li_class' in self.attributes: self['li_class'] = 'web2py-menu-expand' if not 'li_active' in self.attributes: self['li_active'] = 'web2py-menu-active' def serialize(self, data, level=0): if level == 0: ul = UL(**self.attributes) else: ul = UL(_class=self['ul_class']) for item in data: (name, active, link) = item[:3] if isinstance(link,DIV): li = LI(link) elif 'no_link_url' in self.attributes and self['no_link_url']==link: li = LI(DIV(name)) elif link: li = LI(A(name, _href=link)) else: li = LI(A(name, _href='#', _onclick='javascript:void(0);return false;')) if len(item) > 3 and item[3]: li['_class'] = self['li_class'] li.append(self.serialize(item[3], level+1)) if active or ('active_url' in self.attributes and self['active_url']==link): if li['_class']: li['_class'] = li['_class']+' '+self['li_active'] else: li['_class'] = self['li_active'] ul.append(li) return ul def xml(self): return self.serialize(self.data, 0).xml() def embed64( filename = None, file = None, data = None, extension = 'image/gif', ): """ helper to encode the provided (binary) data into base64. :param filename: if provided, opens and reads this file in 'rb' mode :param file: if provided, reads this file :param data: if provided, uses the provided data """ if filename and os.path.exists(file): fp = open(filename, 'rb') data = fp.read() fp.close() data = base64.b64encode(data) return 'data:%s;base64,%s' % (extension, data) def test(): """ Example: >>> from validators import * >>> print DIV(A('click me', _href=URL(a='a', c='b', f='c')), BR(), HR(), DIV(SPAN(\"World\"), _class='unknown')).xml() <div><a href=\"/a/b/c\">click me</a><br /><hr /><div class=\"unknown\"><span>World</span></div></div> >>> print DIV(UL(\"doc\",\"cat\",\"mouse\")).xml() <div><ul><li>doc</li><li>cat</li><li>mouse</li></ul></div> >>> print DIV(UL(\"doc\", LI(\"cat\", _class='feline'), 18)).xml() <div><ul><li>doc</li><li class=\"feline\">cat</li><li>18</li></ul></div> >>> print TABLE(['a', 'b', 'c'], TR('d', 'e', 'f'), TR(TD(1), TD(2), TD(3))).xml() <table><tr><td>a</td><td>b</td><td>c</td></tr><tr><td>d</td><td>e</td><td>f</td></tr><tr><td>1</td><td>2</td><td>3</td></tr></table> >>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_EXPR('int(value)<10'))) >>> print form.xml() <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" /></form> >>> print form.accepts({'myvar':'34'}, formname=None) False >>> print form.xml() <form action="" enctype="multipart/form-data" method="post"><input name="myvar" type="text" value="34" /><div class="error" id="myvar__error">invalid expression</div></form> >>> print form.accepts({'myvar':'4'}, formname=None, keepvalues=True) True >>> print form.xml() <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"4\" /></form> >>> form=FORM(SELECT('cat', 'dog', _name='myvar')) >>> print form.accepts({'myvar':'dog'}, formname=None, keepvalues=True) True >>> print form.xml() <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><select name=\"myvar\"><option value=\"cat\">cat</option><option selected=\"selected\" value=\"dog\">dog</option></select></form> >>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_MATCH('^\w+$', 'only alphanumeric!'))) >>> print form.accepts({'myvar':'as df'}, formname=None) False >>> print form.xml() <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"as df\" /><div class=\"error\" id=\"myvar__error\">only alphanumeric!</div></form> >>> session={} >>> form=FORM(INPUT(value=\"Hello World\", _name=\"var\", requires=IS_MATCH('^\w+$'))) >>> if form.accepts({}, session,formname=None): print 'passed' >>> if form.accepts({'var':'test ', '_formkey': session['_formkey[None]']}, session, formname=None): print 'passed' """ pass class web2pyHTMLParser(HTMLParser): """ obj = web2pyHTMLParser(text) parses and html/xml text into web2py helpers. obj.tree contains the root of the tree, and tree can be manipulated >>> str(web2pyHTMLParser('hello<div a="b" c=3>wor&lt;ld<span>xxx</span>y<script/>yy</div>zzz').tree) 'hello<div a="b" c="3">wor&lt;ld<span>xxx</span>y<script></script>yy</div>zzz' >>> str(web2pyHTMLParser('<div>a<span>b</div>c').tree) '<div>a<span>b</span></div>c' >>> tree = web2pyHTMLParser('hello<div a="b">world</div>').tree >>> tree.element(_a='b')['_c']=5 >>> str(tree) 'hello<div a="b" c="5">world</div>' """ def __init__(self,text,closed=('input','link')): HTMLParser.__init__(self) self.tree = self.parent = TAG['']() self.closed = closed self.tags = [x for x in __all__ if isinstance(eval(x),DIV)] self.last = None self.feed(text) def handle_starttag(self, tagname, attrs): if tagname.upper() in self.tags: tag=eval(tagname.upper()) else: if tagname in self.closed: tagname+='/' tag = TAG[tagname]() for key,value in attrs: tag['_'+key]=value tag.parent = self.parent self.parent.append(tag) if not tag.tag.endswith('/'): self.parent=tag else: self.last = tag.tag[:-1] def handle_data(self,data): try: self.parent.append(data.encode('utf8','xmlcharref')) except: self.parent.append(data.decode('latin1').encode('utf8','xmlcharref')) def handle_charref(self,name): if name[1].lower()=='x': self.parent.append(unichr(int(name[2:], 16)).encode('utf8')) else: self.parent.append(unichr(int(name[1:], 10)).encode('utf8')) def handle_entityref(self,name): self.parent.append(unichr(name2codepoint[name]).encode('utf8')) def handle_endtag(self, tagname): # this deals with unbalanced tags if tagname==self.last: return while True: try: parent_tagname=self.parent.tag self.parent = self.parent.parent except: raise RuntimeError, "unable to balance tag %s" % tagname if parent_tagname[:len(tagname)]==tagname: break def markdown_serializer(text,tag=None,attr={}): if tag is None: return re.sub('\s+',' ',text) if tag=='br': return '\n\n' if tag=='h1': return '#'+text+'\n\n' if tag=='h2': return '#'*2+text+'\n\n' if tag=='h3': return '#'*3+text+'\n\n' if tag=='h4': return '#'*4+text+'\n\n' if tag=='p': return text+'\n\n' if tag=='b' or tag=='strong': return '**%s**' % text if tag=='em' or tag=='i': return '*%s*' % text if tag=='tt' or tag=='code': return '`%s`' % text if tag=='a': return '[%s](%s)' % (text,attr.get('_href','')) if tag=='img': return '![%s](%s)' % (attr.get('_alt',''),attr.get('_src','')) return text def markmin_serializer(text,tag=None,attr={}): # if tag is None: return re.sub('\s+',' ',text) if tag=='br': return '\n\n' if tag=='h1': return '# '+text+'\n\n' if tag=='h2': return '#'*2+' '+text+'\n\n' if tag=='h3': return '#'*3+' '+text+'\n\n' if tag=='h4': return '#'*4+' '+text+'\n\n' if tag=='p': return text+'\n\n' if tag=='li': return '\n- '+text.replace('\n',' ') if tag=='tr': return text[3:].replace('\n',' ')+'\n' if tag in ['table','blockquote']: return '\n-----\n'+text+'\n------\n' if tag in ['td','th']: return ' | '+text if tag in ['b','strong','label']: return '**%s**' % text if tag in ['em','i']: return "''%s''" % text if tag in ['tt']: return '``%s``' % text.strip() if tag in ['code']: return '``\n%s``' % text if tag=='a': return '[[%s %s]]' % (text,attr.get('_href','')) if tag=='img': return '[[%s %s left]]' % (attr.get('_alt','no title'),attr.get('_src','')) return text class MARKMIN(XmlComponent): """ For documentation: http://web2py.com/examples/static/markmin.html """ def __init__(self, text, extra={}, allowed={}, sep='p'): self.text = text self.extra = extra self.allowed = allowed self.sep = sep def xml(self): """ calls the gluon.contrib.markmin render function to convert the wiki syntax """ return render(self.text,extra=self.extra,allowed=self.allowed,sep=self.sep) def __str__(self): return self.xml() def flatten(self,render=None): """ return the text stored by the MARKMIN object rendered by the render function """ return self.text def elements(self, *args, **kargs): """ to be considered experimental since the behavior of this method is questionable another options could be TAG(self.text).elements(*args,**kargs) """ return [self.text] if __name__ == '__main__': import doctest doctest.testmod()
Python
""" Extract client information from http user agent The module does not try to detect all capabilities of browser in current form (it can easily be extended though). Aim is * fast * very easy to extend * reliable enough for practical purposes * and assist python web apps to detect clients. Taken from http://pypi.python.org/pypi/httpagentparser (MIT license) Modified my Ross Peoples for web2py to better support iPhone and iPad. """ import sys from storage import Storage class DetectorsHub(dict): _known_types = ['os', 'dist', 'flavor', 'browser'] def __init__(self, *args, **kw): dict.__init__(self, *args, **kw) for typ in self._known_types: self.setdefault(typ, []) self.registerDetectors() def register(self, detector): if detector.info_type not in self._known_types: self[detector.info_type] = [detector] self._known_types.insert(detector.order, detector.info_type) else: self[detector.info_type].append(detector) def reorderByPrefs(self, detectors, prefs): if prefs is None: return [] elif prefs == []: return detectors else: prefs.insert(0, '') def key_name(d): return d.name in prefs and prefs.index(d.name) or sys.maxint return sorted(detectors, key=key_name) def __iter__(self): return iter(self._known_types) def registerDetectors(self): detectors = [v() for v in globals().values() \ if DetectorBase in getattr(v, '__mro__', [])] for d in detectors: if d.can_register: self.register(d) class DetectorBase(object): name = "" # "to perform match in DetectorsHub object" info_type = "override me" result_key = "override me" order = 10 # 0 is highest look_for = "string to look for" skip_if_found = [] # strings if present stop processin can_register = False prefs = Storage() # dict(info_type = [name1, name2], ..) version_splitters = ["/", " "] _suggested_detectors = None def __init__(self): if not self.name: self.name = self.__class__.__name__ self.can_register = (self.__class__.__dict__.get('can_register', True)) def detect(self, agent, result): if agent and self.checkWords(agent): result[self.info_type] = Storage(name=self.name) version = self.getVersion(agent) if version: result[self.info_type].version = version return True return False def checkWords(self, agent): for w in self.skip_if_found: if w in agent: return False if self.look_for: return True return False def getVersion(self, agent): # -> version string /None vs = self.version_splitters return agent.split(self.look_for + vs[0])[-1].split(vs[1])[0].strip() class OS(DetectorBase): info_type = "os" can_register = False version_splitters = [";", " "] class Dist(DetectorBase): info_type = "dist" can_register = False class Flavor(DetectorBase): info_type = "flavor" can_register = False class Browser(DetectorBase): info_type = "browser" can_register = False class Macintosh(OS): look_for = 'Macintosh' prefs = Storage(dist=None) def getVersion(self, agent): pass class Firefox(Browser): look_for = "Firefox" class Konqueror(Browser): look_for = "Konqueror" version_splitters = ["/", ";"] class Opera(Browser): look_for = "Opera" def getVersion(self, agent): return agent.split(self.look_for)[1][1:].split(' ')[0] class Netscape(Browser): look_for = "Netscape" class MSIE(Browser): look_for = "MSIE" skip_if_found = ["Opera"] name = "Microsoft Internet Explorer" version_splitters = [" ", ";"] class Galeon(Browser): look_for = "Galeon" class Safari(Browser): look_for = "Safari" def checkWords(self, agent): unless_list = ["Chrome", "OmniWeb"] if self.look_for in agent: for word in unless_list: if word in agent: return False return True def getVersion(self, agent): if "Version/" in agent: return agent.split('Version/')[-1].split(' ')[0].strip() else: # Mobile Safari return agent.split('Safari ')[-1].split(' ')[0].strip() class Linux(OS): look_for = 'Linux' prefs = Storage(browser=["Firefox"], dist=["Ubuntu", "Android"], flavor=None) def getVersion(self, agent): pass class Macintosh(OS): look_for = 'Macintosh' prefs = Storage(dist=None, flavor=['MacOS']) def getVersion(self, agent): pass class MacOS(Flavor): look_for = 'Mac OS' prefs = Storage(browser=['Firefox', 'Opera', "Microsoft Internet Explorer"]) def getVersion(self, agent): version_end_chars = [';', ')'] part = agent.split('Mac OS')[-1].strip() for c in version_end_chars: if c in part: version = part.split(c)[0] break return version.replace('_', '.') class Windows(OS): look_for = 'Windows' prefs = Storage(browser=["Microsoft Internet Explorer", 'Firefox'], dict=None, flavor=None) def getVersion(self, agent): v = agent.split('Windows')[-1].split(';')[0].strip() if ')' in v: v = v.split(')')[0] return v class Ubuntu(Dist): look_for = 'Ubuntu' version_splitters = ["/", " "] prefs = Storage(browser=['Firefox']) class Debian(Dist): look_for = 'Debian' version_splitters = ["/", " "] prefs = Storage(browser=['Firefox']) class Chrome(Browser): look_for = "Chrome" version_splitters = ["/", " "] class ChromeOS(OS): look_for = "CrOS" version_splitters = [" ", " "] prefs = Storage(browser=['Chrome']) def getVersion(self, agent): vs = self.version_splitters return agent.split(self.look_for+vs[0])[-1].split(vs[1])[1].strip()[:-1] class Android(Dist): look_for = 'Android' def getVersion(self, agent): return agent.split('Android')[-1].split(';')[0].strip() class iPhone(Dist): look_for = 'iPhone' def getVersion(self, agent): version_end_chars = ['like', ';', ')'] part = agent.split('CPU OS')[-1].strip() for c in version_end_chars: if c in part: version = 'iOS ' + part.split(c)[0].strip() break return version.replace('_', '.') class iPad(Dist): look_for = 'iPad' def getVersion(self, agent): version_end_chars = ['like', ';', ')'] part = agent.split('CPU OS')[-1].strip() for c in version_end_chars: if c in part: version = 'iOS ' + part.split(c)[0].strip() break return version.replace('_', '.') detectorshub = DetectorsHub() def detect(agent): result = Storage() prefs = Storage() _suggested_detectors = [] for info_type in detectorshub: if not _suggested_detectors: detectors = detectorshub[info_type] _d_prefs = prefs.get(info_type, []) detectors = detectorshub.reorderByPrefs(detectors, _d_prefs) if "detector" in locals(): detector._suggested_detectors = detectors else: detectors = _suggested_detectors for detector in detectors: print "detector name: ", detector.name if detector.detect(agent, result): prefs = detector.prefs _suggested_detectors = detector._suggested_detectors break return result class Result(Storage): def __missing__(self, k): return "" def detect(agent): result = Result() _suggested_detectors = [] for info_type in detectorshub: detectors = _suggested_detectors or detectorshub[info_type] for detector in detectors: if detector.detect(agent, result): if detector.prefs and not detector._suggested_detectors: _suggested_detectors = detectorshub.reorderByPrefs( detectors, detector.prefs.get(info_type)) detector._suggested_detectors = _suggested_detectors break return result def simple_detect(agent): """ -> (os, browser) # tuple of strings """ result = detect(agent) os_list = [] if 'flavor' in result: os_list.append(result['flavor']['name']) if 'dist' in result: os_list.append(result['dist']['name']) if 'os' in result: os_list.append(result['os']['name']) os = os_list and " ".join(os_list) or "Unknown OS" os_version = os_list and (result['flavor'] and result['flavor'].get( 'version')) or (result['dist'] and result['dist'].get('version')) \ or (result['os'] and result['os'].get('version')) or "" browser = 'browser' in result and result['browser']['name'] \ or 'Unknown Browser' browser_version = 'browser' in result \ and result['browser'].get('version') or "" if browser_version: browser = " ".join((browser, browser_version)) if os_version: os = " ".join((os, os_version)) return os, browser if __name__ == '__main__': import time import unittest data = ( ("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-GB; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10", ('MacOS Macintosh X 10.5', 'Firefox 3.0.10'), {'flavor': {'version': 'X 10.5', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '3.0.10', 'name': 'Firefox'}},), ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.3 Safari/534.24,gzip(gfe)", ('MacOS Macintosh X 10.6.6', 'Chrome 11.0.696.3'), {'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '11.0.696.3', 'name': 'Chrome'}},), ("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2) Gecko/20100308 Ubuntu/10.04 (lucid) Firefox/3.6 GTB7.1", ('Ubuntu Linux 10.04', 'Firefox 3.6'), {'dist': {'version': '10.04', 'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '3.6', 'name': 'Firefox'}},), ("Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1", ('Android Linux 2.2.1', 'Safari 4.0'), {'dist': {'version': '2.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},), ("Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3", ('MacOS IPhone X', 'Safari 3.0'), {'flavor': {'version': 'X', 'name': 'MacOS'}, 'dist': {'version': 'X', 'name': 'IPhone'}, 'browser': {'version': '3.0', 'name': 'Safari'}},), ("Mozilla/5.0 (X11; CrOS i686 0.0.0) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.27 Safari/534.24,gzip(gfe)", ('ChromeOS 0.0.0', 'Chrome 11.0.696.27'), {'os': {'name': 'ChromeOS', 'version': '0.0.0'}, 'browser': {'name': 'Chrome', 'version': '11.0.696.27'}},), ("Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.1) Opera 7.02 [en]", ('Windows NT 5.1', 'Opera 7.02'), {'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Opera', 'version': '7.02'}},), ("Opera/9.80 (X11; Linux i686; U; en) Presto/2.9.168 Version/11.50", ("Linux", "Opera 9.80"), {"os": {"name": "Linux"}, "browser": {"name": "Opera", "version": "9.80"}},), ("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1", ("Windows NT 5.1", "Netscape 8.1"), {'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Netscape', 'version': '8.1'}},), ) class TestHAP(unittest.TestCase): def setUp(self): self.harass_repeat = 1000 self.data = data def test_simple_detect(self): for agent, simple_res, res in data: self.assertEqual(simple_detect(agent), simple_res) def test_detect(self): for agent, simple_res, res in data: self.assertEqual(detect(agent), res) def test_harass(self): then = time.time() for agent, simple_res, res in data * self.harass_repeat: detect(agent) time_taken = time.time() - then no_of_tests = len(self.data) * self.harass_repeat print "\nTime taken for %s detecttions: %s" \ % (no_of_tests, time_taken) print "Time taken for single detecttion: ", \ time_taken / (len(self.data) * self.harass_repeat) unittest.main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.1 or later Recommended: Python 2.3 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ # + "$Revision: 1.92 $"[11:15] + "-cvs" __version__ = '4.1' __license__ = \ """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = 'Mark Pilgrim <http://diveintomark.org/>' __contributors__ = ['Jason Diamond <http://injektilo.org/>', 'John Beimler <http://john.beimler.org/>', 'Fazal Majid <http://www.majid.info/mylos/weblog/>' , 'Aaron Swartz <http://aaronsw.com/>', 'Kevin Marks <http://epeus.blogspot.com/>'] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = 'UniversalFeedParser/%s +http://feedparser.org/'\ % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = \ 'application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1' # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ['drv_libxml2'] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ['uTidy', 'mxTidy'] # ---------- required modules (should come with any Python distribution) ---------- import sgmllib import re import sys import copy import urlparse import time import rfc822 import types import cgi import urllib import urllib2 try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') return data # base64 support for Atom feeds that contain embedded binary data try: import base64 import binascii except: base64 = binascii = None # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1 except: chardet = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]') SUPPORTED_VERSIONS = { '': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS', } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for (k, v) in aList: rc[k] = v return rc class FeedParserDict(UserDict): keymap = { 'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['subtitle', 'summary'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail', } def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) if UserDict.has_key(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.has_key(self, key) except AttributeError: return False def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15, 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31, 128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7, 144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26, 32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33, 38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94, 45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63, 186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34, 195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200, 201, 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205, 206, 207, 208, 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 123, 65, 66, 67, 68, 69, 70, 71, 72, 73, 232, 233, 234, 235, 236, 237, 125, 74, 75, 76, 77, 78, 79, 80, 81, 82, 238, 239, 240, 241, 242, 243, 92, 159, 83, 84, 85, 86, 87, 88, 89, 90, 244, 245, 246, 247, 248, 249, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 250, 251, 252, 253, 254, 255, ) import string _ebcdic_to_ascii_map = string.maketrans(''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = { '': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/XML/1998/namespace': 'xml', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', } _matchnamespaces = {} can_be_relative_uri = [ 'link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo', ] can_contain_relative_uris = [ 'content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description', ] can_contain_dangerous_markup = [ 'content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description', ] html_types = ['text/html', 'application/xhtml+xml'] def __init__( self, baseuri=None, baselang=None, encoding='utf-8', ): if _debug: sys.stderr.write('initializing FeedParser\n') if not self._matchnamespaces: for (k, v) in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None if baselang: self.feeddata['language'] = baselang def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for (k, v) in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for (k, v) in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base'))\ or self.baseuri self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for (prefix, uri) in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.has_key('type')\ and not self.contentparams.get('type', 'xml' ).endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type')\ == 'application/xhtml+xml': # Note: probably shouldn't simply recreate localname here, but # our namespace handling isn't actually 100% correct in cases where # the feed redefines the default namespace (which is actually # the usual case for inline content, thanks Sam), so here we # cheat and just reconstruct the element based on localname # because that compensates for the bugs in our namespace handling. # This will horribly munge inline content with non-empty qnames, # but nobody actually does that, so I'm not fixing it. tag = tag.split(':')[-1] return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0) # match namespaces if tag.find(':') != -1: (prefix, suffix) = tag.split(':', 1) else: (prefix, suffix) = ('', tag) prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if not prefix and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if not prefix and tag not in ( 'title', 'link', 'description', 'url', 'href', 'width', 'height', ): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: return self.push(prefix + suffix, 1) def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') != -1: (prefix, suffix) = tag.split(':', 1) else: (prefix, suffix) = ('', tag) prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.has_key('type')\ and not self.contentparams.get('type', 'xml' ).endswith('xml'): # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type')\ == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ( '34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e', ): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref else: # entity resolution graciously donated by Aaron Swartz def name2cp(k): import htmlentitydefs if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 return htmlentitydefs.name2codepoint[k] k = htmlentitydefs.entitydefs[k] if k.startswith('&#') and k.endswith(';'): return int(k[2:-1]) # not in latin-1 return ord(k) try: name2cp(ref) except KeyError: text = '&%s;' % ref else: text = unichr(name2cp(ref)).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type')\ == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write('entering parse_declaration\n') if self.rawdata[i:i + 9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) self.handle_data(_xmlescape(self.rawdata[i + 9:k]), 0) return k + 3 else: k = self.rawdata.find('>', i) return k + 1 def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/' ) and not self.version: self.version = 'rss090' if loweruri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if loweruri == 'http://www.w3.org/2005/atom'\ and not self.version: self.version = 'atom10' if loweruri.find('backend.userland.com/rss') != -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if self._matchnamespaces.has_key(loweruri): self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return (element, expectingText, pieces) = self.elementstack.pop() output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = base64.decodestring(output) except binascii.Error: pass except binascii.Incomplete: pass # resolve relative URIs if element in self.can_be_relative_uri and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass # resolve relative URIs within embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding) # sanitize embedded markup if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding) if self.encoding and type(output) != type(u''): try: output = unicode(output, self.encoding) except: pass # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = \ contentparams elif (self.infeed or self.insource) and not self.intextinput\ and not self.inimage: context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent( self, tag, attrsD, defaultContentType, expectingText, ): self.incontent += 1 self.contentparams = FeedParserDict({'type' : self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base' : self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos != -1: prefix = name[:colonpos] suffix = name[colonpos + 1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value): context = self._getContext() context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = { '0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094', } if not self.version: attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): self.inimage = 1 self.push('image', 0) context = self._getContext() context.setdefault('image', FeedParserDict()) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): self.intextinput = 1 self.push('textinput', 0) context = self._getContext() context.setdefault('textinput', FeedParserDict()) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['textinput']['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['image']['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) elif self.inimage: context = self._getContext() context['image']['href'] = value elif self.intextinput: context = self._getContext() context['textinput']['link'] = value _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author( self, key, value, prefix='author', ): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author = context.get(key) if not author: return emailmatch = \ re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''' , author) if not emailmatch: return email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.strip() if author and author[0] == '(': author = author[1:] if author and author[-1] == ')': author = author[:-1] author = author.strip() context.setdefault('%s_detail' % key, FeedParserDict()) context['%s_detail' % key]['name'] = author context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value)) _end_dcterms_issued = _end_published _end_issued = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_pubdate = _start_updated _start_dc_date = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_pubdate = _end_updated _end_dc_date = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value)) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired'))) def _start_cc_license(self, attrsD): self.push('license', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('license') def _start_creativecommons_license(self, attrsD): self.push('license', 1) def _end_creativecommons_license(self): self.pop('license') def _addTag( self, term, scheme, label, ): context = self._getContext() tags = context.setdefault('tags', []) if not term and not scheme and not label: return value = FeedParserDict({'term': term, 'scheme': scheme, 'label' : label}) if value not in tags: tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) def _start_category(self, attrsD): if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(): self._addTag(term, 'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') attrsD.setdefault('type', 'text/html') attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context = self._getContext() context.setdefault('links', []) context['links'].append(FeedParserDict(attrsD)) if attrsD['rel'] == 'enclosure': self._start_enclosure(attrsD) if attrsD.has_key('href'): expectingText = 0 if attrsD.get('rel') == 'alternate'\ and self.mapContentType(attrsD.get('type'))\ in self.html_types: context['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') context = self._getContext() if self.intextinput: context['textinput']['link'] = value if self.inimage: context['image']['link'] = value _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = attrsD.get('ispermalink', 'true') == 'true' self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) def _start_title(self, attrsD): self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): value = self.popContent('title') context = self._getContext() if self.intextinput: context['textinput']['title'] = value elif self.inimage: context['image']['title'] = value _end_dc_title = _end_title _end_media_title = _end_title def _start_description(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') context = self._getContext() if self.intextinput: context['textinput']['description'] = value elif self.inimage: context['image']['description'] = value self._summaryKey = None _end_abstract = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if context.has_key('generator_detail'): context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href' : value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) href = attrsD.get('href') if href: context = self._getContext() if not context.get('id'): context['id'] = href def _start_source(self, attrsD): self.insource = 1 def _end_source(self): self.insource = 0 self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_prodlink(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToDescription = \ self.mapContentType(self.contentparams.get('type'))\ in ['text/plain'] + self.html_types value = self.popContent('content') if copyToDescription: self._save('description', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) self._getContext()['image'] = FeedParserDict({'href' : attrsD.get('href')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = value == 'yes' and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) self._getContext()['itunes_explicit'] = value == 'yes' and 1\ or 0 if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__( self, baseuri, baselang, encoding, ): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) def startElementNS( self, name, qname, attrs, ): (namespace, localname) = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') != -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or prefix == '' and lowernamespace == '')\ and not self.namespacesInUse.has_key(givenprefix): raise UndeclaredNamespace, \ "'%s' is not associated with a namespace"\ % givenprefix if prefix: localname = prefix + ':' + localname localname = str(localname).lower() if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % ( qname, namespace, givenprefix, prefix, attrs.items(), localname, )) # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD = {} for ((namespace, attrlocalname), attrvalue) in \ attrs._attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = \ attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): (namespace, localname) = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): elements_no_end_tag = [ 'area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img', 'input', 'isindex', 'link', 'meta', 'param', ] def __init__(self, encoding): self.encoding = encoding if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) # data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') if self.encoding and type(data) == type(u''): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) def normalize_attrs(self, attrs): # utility method to be called by descendants attrs = [(k.lower(), v) for (k, v) in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for (k, v) in attrs] return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) uattrs = [] # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds for (key, value) in attrs: if type(value) != type(u''): value = unicode(value, self.encoding) uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join([u' %s="%s"' % (key, value) for (key, value) in uattrs]).encode(self.encoding) if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append('</%(tag)s>' % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. self.pieces.append('&#%(ref)s;' % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. self.pieces.append('&%(ref)s;' % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%(text)s-->' % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%(text)s>' % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%(text)s>' % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*' ).match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return (None, -1) m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if i + len(s) == n: return (None, -1) # end of buffer return (name.lower(), m.end()) else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return (None, -1) def output(self): """Return processed HTML as a single string""" return ''.join([str(p) for p in self.pieces]) class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__( self, baseuri, baselang, encoding, ): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.has_key('type')\ and not self.contentparams.get('type', 'xml' ).endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [ ('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src'), ] def __init__(self, baseuri, encoding): _BaseHTMLProcessor.__init__(self, encoding) self.baseuri = baseuri def resolveURI(self, uri): return _urljoin(self.baseuri, uri) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, (tag, key) in self.relative_uris and self.resolveURI(value) or value) for (key, value) in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding): if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') p = _RelativeURIResolver(baseURI, encoding) p.feed(htmlSource) return p.output() class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = [ 'a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', ] acceptable_attributes = [ 'abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width', ] unacceptable_elements_with_end_tag = ['script', 'applet'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 def unknown_starttag(self, tag, attrs): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 return attrs = self.normalize_attrs(attrs) attrs = [(key, value) for (key, value) in attrs if key in self.acceptable_attributes] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def _sanitizeHTML(htmlSource, encoding): p = _HTMLSanitizer(encoding) p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == 'uTidy': from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == 'mxTidy': from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): (nerrors, nwarnings, data, errordata) = \ _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = type(data) == type(u'') if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding='utf8') if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default( self, req, fp, code, msg, headers, ): if code / 100 == 3 and code != 304: return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302( self, req, fp, code, msg, headers, ): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302( self, req, fp, code, msg, headers, ) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301( self, req, fp, code, msg, headers, ): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301( self, req, fp, code, msg, headers, ) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def http_error_401( self, req, fp, code, msg, headers, ): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] try: assert sys.version.split()[0] >= '2.3.3' assert base64 != None (user, passw) = \ base64.decodestring(req.headers['Authorization' ].split(' ')[1]).split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry except: return self.http_error_default(req, fp, code, msg, headers) def _open_resource( url_file_stream_or_string, etag, modified, agent, referrer, handlers, ): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it must be a tuple of 9 integers as returned by gmtime() in the standard Python time module. This MUST be in GMT (Greenwich Mean Time). The formatted date/time will be used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if url_file_stream_or_string == '-': return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: (urltype, rest) = \ urllib.splittype(url_file_stream_or_string) (realhost, rest) = urllib.splithost(rest) if realhost: (user_passwd, realhost) = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.encodestring(user_passwd).strip() # try to open with urllib2 (to use optional headers) request = urllib2.Request(url_file_stream_or_string) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', ] months = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', ] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % ( short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5], )) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) request.add_header('A-IM', 'feed') # RFC 3229 support opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string) except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) _date_handlers = [] def registerDateHandler(func): """Register a date handler function (takes string, returns 9-tuple date in GMT)""" _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = [ 'YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', '', ] _iso8601_re = [tmpl.replace('YYYY', r'(?P<year>\d{4})').replace('YY', r'(?P<year>\d\d)').replace('MM', r'(?P<month>[01]\d)' ).replace('DD', r'(?P<day>[0123]\d)').replace('OOO', r'(?P<ordinal>[0123]\d\d)').replace('CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] del tmpl _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] del regex def _parse_date_iso8601(dateString): """Parse a variety of ISO-8601-compatible formats like 20040105""" m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or params.get('year', 0)\ or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params.keys(): year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(params.get('second', 0)) # weekday is normalized by mktime(), we can ignore it weekday = 0 # daylight savings is complex, but not needed for feedparser's purposes # as time zones, if specified, include mention of whether it is active # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and # and most implementations have DST bugs daylight_savings_flag = 0 tm = [ year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag, ] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tm)) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): """Parse a string according to the OnBlog 8-bit date format""" m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = \ '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s'\ % { 'year': m.group(1), 'month': m.group(2), 'day': m.group(3), 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6), 'zonediff': '+09:00', } if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): """Parse a string according to the Nate 8-bit date format""" m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if ampm == _korean_pm: hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = \ '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s'\ % { 'year': m.group(1), 'month': m.group(2), 'day': m.group(3), 'hour': hour, 'minute': m.group(6), 'second': m.group(7), 'zonediff': '+09:00', } if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?' ) def _parse_date_mssql(dateString): """Parse a string according to the MS SQL date format""" m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = \ '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s'\ % { 'year': m.group(1), 'month': m.group(2), 'day': m.group(3), 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6), 'zonediff': '+09:00', } if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = { u'\u0399\u03b1\u03bd': u'Jan', u'\u03a6\u03b5\u03b2': u'Feb', u'\u039c\u03ac\u03ce': u'Mar', u'\u039c\u03b1\u03ce': u'Mar', u'\u0391\u03c0\u03c1': u'Apr', u'\u039c\u03ac\u03b9': u'May', u'\u039c\u03b1\u03ca': u'May', u'\u039c\u03b1\u03b9': u'May', u'\u0399\u03bf\u03cd\u03bd': u'Jun', u'\u0399\u03bf\u03bd': u'Jun', u'\u0399\u03bf\u03cd\u03bb': u'Jul', u'\u0399\u03bf\u03bb': u'Jul', u'\u0391\u03cd\u03b3': u'Aug', u'\u0391\u03c5\u03b3': u'Aug', u'\u03a3\u03b5\u03c0': u'Sep', u'\u039f\u03ba\u03c4': u'Oct', u'\u039d\u03bf\u03ad': u'Nov', u'\u039d\u03bf\u03b5': u'Nov', u'\u0394\u03b5\u03ba': u'Dec', } _greek_wdays = { u'\u039a\u03c5\u03c1': u'Sun', u'\u0394\u03b5\u03c5': u'Mon', u'\u03a4\u03c1\u03b9': u'Tue', u'\u03a4\u03b5\u03c4': u'Wed', u'\u03a0\u03b5\u03bc': u'Thu', u'\u03a0\u03b1\u03c1': u'Fri', u'\u03a3\u03b1\u03b2': u'Sat', } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)' ) def _parse_date_greek(dateString): """Parse a string according to a Greek 8-bit date format.""" m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = \ '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s'\ % { 'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4), 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7), 'zonediff': m.group(8), } if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = { u'janu\u00e1r': u'01', u'febru\u00e1ri': u'02', u'm\u00e1rcius': u'03', u'\u00e1prilis': u'04', u'm\u00e1ujus': u'05', u'j\u00fanius': u'06', u'j\u00falius': u'07', u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))' ) def _parse_date_hungarian(dateString): """Parse a string according to a Hungarian 8-bit date format.""" m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = \ '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % { 'year': m.group(1), 'month': month, 'day': day, 'hour': hour, 'minute': m.group(5), 'zonediff': m.group(6), } if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return (0, 0, 0) julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime(( year, month, day, 0, 0, 0, 0, 0, 0, )) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return (year, month, day) month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return (year, month, day) def __extract_time(m): if not m: return (0, 0, 0) hours = m.group('hours') if not hours: return (0, 0, 0) hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return (hours, minutes, seconds) def __extract_tzd(m): """Return the Time Zone Designator as an offset in seconds from UTC.""" if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours * 60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = \ '(?P<year>\\d\\d\\d\\d)(?:(?P<dsep>-|)(?:(?P<julian>\\d\\d\\d)|(?P<month>\\d\\d)(?:(?P=dsep)(?P<day>\\d\\d))?))?' __tzd_re = \ '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' __tzd_rx = re.compile(__tzd_re) __time_re = \ '(?P<hours>\\d\\d)(?P<tsep>:|)(?P<minutes>\\d\\d)(?:(?P=tsep)(?P<seconds>\\d\\d(?:[.,]\\d+)?))?'\ + __tzd_re __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if m is None or m.group() != dateString: return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime((time.mktime(gmt) + __extract_tzd(m)) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): """Parse an RFC822, RFC1123, RFC2822, or asctime-style date""" data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i + 1:]] else: data.append('') dateString = ' '.join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # 'ET' is equivalent to 'EST', etc. _additional_timezones = { 'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800, } rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date(dateString): """Parses a variety of date formats into a 9-tuple in GMT""" for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write('date handler function must return 9-tuple\n' ) raise ValueError, 'date handler function must return 9-tuple' map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): """Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to 'utf-8' if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to 'us-ascii' if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to 'iso-8859-1' as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ """ def _parseHTTPContentType(content_type): """takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings """ content_type = content_type or '' (content_type, params) = cgi.parse_header(content_type) return (content_type, params.get('charset', '').replace("'", '' )) sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' (http_content_type, http_encoding) = \ _parseHTTPContentType(http_headers.get('content-type')) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif len(xml_data) >= 4 and xml_data[:2] == '\xfe\xff'\ and xml_data[2:4] != '\x00\x00': # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif len(xml_data) >= 4 and xml_data[:2] == '\xff\xfe'\ and xml_data[2:4] != '\x00\x00': # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = \ re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>' ).match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and xml_encoding in ( 'iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16', ): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity' ) text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if http_content_type in application_content_types\ or http_content_type.startswith('application/')\ and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif http_content_type in text_content_types\ or http_content_type.startswith('text/')\ and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and not http_headers.has_key('content-type'): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' return (true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type) def _toUTF8(data, encoding): """Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases """ if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if len(data) >= 4 and data[:2] == '\xfe\xff' and data[2:4]\ != '\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif len(data) >= 4 and data[:2] == '\xff\xfe' and data[2:4]\ != '\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode('utf-8') def _stripDoctype(data): """Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document, minus the DOCTYPE """ entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE) data = entity_pattern.sub('', data) doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE) doctype_results = doctype_pattern.findall(data) doctype = doctype_results and doctype_results[0] or '' if doctype.lower().count('netscape'): version = 'rss091n' else: version = None data = doctype_pattern.sub('', data) return (version, data) def parse( url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], ): """Parse a feed from a URL, file, stream, or string""" result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource( url_file_stream_or_string, etag, modified, agent, referrer, handlers, ) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '')\ == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) (result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type) = \ _getCharacterEncoding(http_headers, data) if http_headers and not acceptable_content_type: if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type'\ % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) (result['version'], data) = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = \ 'The feed has not changed since you last checked, '\ + 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if not known_encoding and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and proposed_encoding\ not in tried_encodings: tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if not known_encoding and 'utf-8' not in tried_encodings: try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if not known_encoding and 'windows-1252' not in tried_encodings: try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = \ CharacterEncodingUnknown('document encoding unknown, I tried ' + '%s, %s, utf-8, and windows-1252 but nothing worked' % (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = \ CharacterEncodingOverride('documented declared as %s, but parsed as %s' % (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace' : 'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result if __name__ == '__main__': if not sys.argv[1:]: print __doc__ sys.exit(0) else: urls = sys.argv[1:] zopeCompatibilityHack() from pprint import pprint for url in urls: print url print result = parse(url) pprint(result) print # REVISION HISTORY # 1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, # added Simon Fell's test suite # 1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections # 2.0 - 10/19/2002 # JD - use inchannel to watch out for image and textinput elements which can # also contain title, link, and description elements # JD - check for isPermaLink='false' attribute on guid elements # JD - replaced openAnything with open_resource supporting ETag and # If-Modified-Since request headers # JD - parse now accepts etag, modified, agent, and referrer optional # arguments # JD - modified parse to return a dictionary instead of a tuple so that any # etag or modified information can be returned and cached by the caller # 2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything # because of etag/modified, return the old etag/modified to the caller to # indicate why nothing is being returned # 2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its # useless. Fixes the problem JD was addressing by adding it. # 2.1 - 11/14/2002 - MAP - added gzip support # 2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. # start_admingeneratoragent is an example of how to handle elements with # only attributes, no content. # 2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); # also, make sure we send the User-Agent even if urllib2 isn't available. # Match any variation of backend.userland.com/rss namespace. # 2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. # 2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's # snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed # project name # 2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); # removed unnecessary urllib code -- urllib2 should always be available anyway; # return actual url, status, and full HTTP headers (as result['url'], # result['status'], and result['headers']) if parsing a remote feed over HTTP -- # this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; # added the latest namespace-of-the-week for RSS 2.0 # 2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom # User-Agent (otherwise urllib2 sends two, which confuses some servers) # 2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for # inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds # 2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or # textInput, and also to return the character encoding (if specified) # 2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking # nested divs within content (JohnD); fixed missing sys import (JohanS); # fixed regular expression to capture XML character encoding (Andrei); # added support for Atom 0.3-style links; fixed bug with textInput tracking; # added support for cloud (MartijnP); added support for multiple # category/dc:subject (MartijnP); normalize content model: 'description' gets # description (which can come from description, summary, or full content if no # description), 'content' gets dict of base/language/type/value (which can come # from content:encoded, xhtml:body, content, or fullitem); # fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang # tracking; fixed bug tracking unknown tags; fixed bug tracking content when # <content> element is not in default namespace (like Pocketsoap feed); # resolve relative URLs in link, guid, docs, url, comments, wfw:comment, # wfw:commentRSS; resolve relative URLs within embedded HTML markup in # description, xhtml:body, content, content:encoded, title, subtitle, # summary, info, tagline, and copyright; added support for pingback and # trackback namespaces # 2.7 - 1/5/2004 - MAP - really added support for trackback and pingback # namespaces, as opposed to 2.6 when I said I did but didn't really; # sanitize HTML markup within some elements; added mxTidy support (if # installed) to tidy HTML markup within some elements; fixed indentation # bug in _parse_date (FazalM); use socket.setdefaulttimeout if available # (FazalM); universal date parsing and normalization (FazalM): 'created', modified', # 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', # 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' # and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa # 2.7.1 - 1/9/2004 - MAP - fixed bug handling &quot; and &apos;. fixed memory # leak not closing url opener (JohnD); added dc:publisher support (MarekK); # added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) # 2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in # encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); # fixed relative URI processing for guid (skadz); added ICBM support; added # base64 support # 2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many # blogspot.com sites); added _debug variable # 2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing # 3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); # added several new supported namespaces; fixed bug tracking naked markup in # description; added support for enclosure; added support for source; re-added # support for cloud which got dropped somehow; added support for expirationDate # 3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking # xml:base URI, one for documents that don't define one explicitly and one for # documents that define an outer and an inner xml:base that goes out of scope # before the end of the document # 3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level # 3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] # will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; # added support for creativeCommons:license and cc:license; added support for # full Atom content model in title, tagline, info, copyright, summary; fixed bug # with gzip encoding (not always telling server we support it when we do) # 3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail # (dictionary of 'name', 'url', 'email'); map author to author_detail if author # contains name + email address # 3.0b8 - 1/28/2004 - MAP - added support for contributor # 3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added # support for summary # 3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from # xml.util.iso8601 # 3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain # dangerous markup; fiddled with decodeEntities (not right); liberalized # date parsing even further # 3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); # added support to Atom 0.2 subtitle; added support for Atom content model # in copyright; better sanitizing of dangerous HTML elements with end tags # (script, frameset) # 3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, # etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />) # 3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under # Python 2.1 # 3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; # fixed bug capturing author and contributor URL; fixed bug resolving relative # links in author and contributor URL; fixed bug resolvin relative links in # generator URL; added support for recognizing RSS 1.0; passed Simon Fell's # namespace tests, and included them permanently in the test suite with his # permission; fixed namespace handling under Python 2.1 # 3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) # 3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 # 3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); # use libxml2 (if available) # 3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author # name was in parentheses; removed ultra-problematic mxTidy support; patch to # workaround crash in PyXML/expat when encountering invalid entities # (MarkMoraes); support for textinput/textInput # 3.0b20 - 4/7/2004 - MAP - added CDF support # 3.0b21 - 4/14/2004 - MAP - added Hot RSS support # 3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in # results dict; changed results dict to allow getting values with results.key # as well as results[key]; work around embedded illformed HTML with half # a DOCTYPE; work around malformed Content-Type header; if character encoding # is wrong, try several common ones before falling back to regexes (if this # works, bozo_exception is set to CharacterEncodingOverride); fixed character # encoding issues in BaseHTMLProcessor by tracking encoding and converting # from Unicode to raw strings before feeding data to sgmllib.SGMLParser; # convert each value in results to Unicode (if possible), even if using # regex-based parsing # 3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain # high-bit characters in attributes in embedded HTML in description (thanks # Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in # FeedParserDict; tweaked FeedParserDict.has_key to return True if asking # about a mapped key # 3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and # results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could # cause the same encoding to be tried twice (even if it failed the first time); # fixed DOCTYPE stripping when DOCTYPE contained entity declarations; # better textinput and image tracking in illformed RSS 1.0 feeds # 3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed # my blink tag tests # 3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that # failed to parse utf-16 encoded feeds; made source into a FeedParserDict; # duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; # added support for image; refactored parse() fallback logic to try other # encodings if SAX parsing fails (previously it would only try other encodings # if re-encoding failed); remove unichr madness in normalize_attrs now that # we're properly tracking encoding in and out of BaseHTMLProcessor; set # feed.language from root-level xml:lang; set entry.id from rdf:about; # send Accept header # 3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between # iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are # windows-1252); fixed regression that could cause the same encoding to be # tried twice (even if it failed the first time) # 3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; # recover from malformed content-type header parameter with no equals sign # ('text/xml; charset:iso-8859-1') # 3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities # to Unicode equivalents in illformed feeds (aaronsw); added and # passed tests for converting character entities to Unicode equivalents # in illformed feeds (aaronsw); test for valid parsers when setting # XML_AVAILABLE; make version and encoding available when server returns # a 304; add handlers parameter to pass arbitrary urllib2 handlers (like # digest auth or proxy support); add code to parse username/password # out of url and send as basic authentication; expose downloading-related # exceptions in bozo_exception (aaronsw); added __contains__ method to # FeedParserDict (aaronsw); added publisher_detail (aaronsw) # 3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always # convert feed to UTF-8 before passing to XML parser; completely revamped # logic for determining character encoding and attempting XML parsing # (much faster); increased default timeout to 20 seconds; test for presence # of Location header on redirects; added tests for many alternate character # encodings; support various EBCDIC encodings; support UTF-16BE and # UTF16-LE with or without a BOM; support UTF-8 with a BOM; support # UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no # XML parsers are available; added support for 'Content-encoding: deflate'; # send blank 'Accept-encoding: ' header if neither gzip nor zlib modules # are available # 3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure # problem tracking xml:base and xml:lang if element declares it, child # doesn't, first grandchild redeclares it, and second grandchild doesn't; # refactored date parsing; defined public registerDateHandler so callers # can add support for additional date formats at runtime; added support # for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added # zopeCompatibilityHack() which turns FeedParserDict into a regular # dictionary, required for Zope compatibility, and also makes command- # line debugging easier because pprint module formats real dictionaries # better than dictionary-like objects; added NonXMLContentType exception, # which is stored in bozo_exception when a feed is served with a non-XML # media type such as 'text/plain'; respect Content-Language as default # language if not xml:lang is present; cloud dict is now FeedParserDict; # generator dict is now FeedParserDict; better tracking of xml:lang, # including support for xml:lang='' to unset the current language; # recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default # namespace; don't overwrite final status on redirects (scenarios: # redirecting to a URL that returns 304, redirecting to a URL that # redirects to another URL with a different type of redirect); add # support for HTTP 303 redirects # 4.0 - MAP - support for relative URIs in xml:base attribute; fixed # encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; # support for Atom 1.0; support for iTunes extensions; new 'tags' for # categories/keywords/etc. as array of dict # {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 # terminology; parse RFC 822-style dates with no time; lots of other # bug fixes # 4.1 - MAP - removed socket timeout; added support for chardet library
Python
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $Id$ """ fcgi - a FastCGI/WSGI gateway. For more information about FastCGI, see <http://www.fastcgi.com/>. For more information about the Web Server Gateway Interface, see <http://www.python.org/peps/pep-0333.html>. Example usage: #!/usr/bin/env python from myapplication import app # Assume app is your WSGI application object from fcgi import WSGIServer WSGIServer(app).run() See the documentation for WSGIServer/Server for more information. On most platforms, fcgi will fallback to regular CGI behavior if run in a non-FastCGI context. If you want to force CGI behavior, set the environment variable FCGI_FORCE_CGI to "Y" or "y". """ __author__ = 'Allan Saddi <allan@saddi.com>' __version__ = '$Revision$' import sys import os import signal import struct import cStringIO as StringIO import select import socket import errno import traceback try: import thread import threading thread_available = True except ImportError: import dummy_thread as thread import dummy_threading as threading thread_available = False # Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case. if not hasattr(socket, 'SHUT_WR'): socket.SHUT_WR = 1 __all__ = ['WSGIServer'] # Constants from the spec. FCGI_LISTENSOCK_FILENO = 0 FCGI_HEADER_LEN = 8 FCGI_VERSION_1 = 1 FCGI_BEGIN_REQUEST = 1 FCGI_ABORT_REQUEST = 2 FCGI_END_REQUEST = 3 FCGI_PARAMS = 4 FCGI_STDIN = 5 FCGI_STDOUT = 6 FCGI_STDERR = 7 FCGI_DATA = 8 FCGI_GET_VALUES = 9 FCGI_GET_VALUES_RESULT = 10 FCGI_UNKNOWN_TYPE = 11 FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE FCGI_NULL_REQUEST_ID = 0 FCGI_KEEP_CONN = 1 FCGI_RESPONDER = 1 FCGI_AUTHORIZER = 2 FCGI_FILTER = 3 FCGI_REQUEST_COMPLETE = 0 FCGI_CANT_MPX_CONN = 1 FCGI_OVERLOADED = 2 FCGI_UNKNOWN_ROLE = 3 FCGI_MAX_CONNS = 'FCGI_MAX_CONNS' FCGI_MAX_REQS = 'FCGI_MAX_REQS' FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS' FCGI_Header = '!BBHHBx' FCGI_BeginRequestBody = '!HB5x' FCGI_EndRequestBody = '!LB3x' FCGI_UnknownTypeBody = '!B7x' FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody) FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody) if __debug__: import time # Set non-zero to write debug output to a file. DEBUG = 0 DEBUGLOG = '/tmp/fcgi.log' def _debug(level, msg): if DEBUG < level: return try: f = open(DEBUGLOG, 'a') f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg)) f.close() except: pass class InputStream(object): """ File-like object representing FastCGI input streams (FCGI_STDIN and FCGI_DATA). Supports the minimum methods required by WSGI spec. """ def __init__(self, conn): self._conn = conn # See Server. self._shrinkThreshold = conn.server.inputStreamShrinkThreshold self._buf = '' self._bufList = [] self._pos = 0 # Current read position. self._avail = 0 # Number of bytes currently available. self._eof = False # True when server has sent EOF notification. def _shrinkBuffer(self): """Gets rid of already read data (since we can't rewind).""" if self._pos >= self._shrinkThreshold: self._buf = self._buf[self._pos:] self._avail -= self._pos self._pos = 0 assert self._avail >= 0 def _waitForData(self): """Waits for more data to become available.""" self._conn.process_input() def read(self, n=-1): if self._pos == self._avail and self._eof: return '' while True: if n < 0 or (self._avail - self._pos) < n: # Not enough data available. if self._eof: # And there's no more coming. newPos = self._avail break else: # Wait for more data. self._waitForData() continue else: newPos = self._pos + n break # Merge buffer list, if necessary. if self._bufList: self._buf += ''.join(self._bufList) self._bufList = [] r = self._buf[self._pos:newPos] self._pos = newPos self._shrinkBuffer() return r def readline(self, length=None): if self._pos == self._avail and self._eof: return '' while True: # Unfortunately, we need to merge the buffer list early. if self._bufList: self._buf += ''.join(self._bufList) self._bufList = [] # Find newline. i = self._buf.find('\n', self._pos) if i < 0: # Not found? if self._eof: # No more data coming. newPos = self._avail break else: # Wait for more to come. self._waitForData() continue else: newPos = i + 1 break if length is not None: if self._pos + length < newPos: newPos = self._pos + length r = self._buf[self._pos:newPos] self._pos = newPos self._shrinkBuffer() return r def readlines(self, sizehint=0): total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def __iter__(self): return self def next(self): r = self.readline() if not r: raise StopIteration return r def add_data(self, data): if not data: self._eof = True else: self._bufList.append(data) self._avail += len(data) class MultiplexedInputStream(InputStream): """ A version of InputStream meant to be used with MultiplexedConnections. Assumes the MultiplexedConnection (the producer) and the Request (the consumer) are running in different threads. """ def __init__(self, conn): super(MultiplexedInputStream, self).__init__(conn) # Arbitrates access to this InputStream (it's used simultaneously # by a Request and its owning Connection object). lock = threading.RLock() # Notifies Request thread that there is new data available. self._lock = threading.Condition(lock) def _waitForData(self): # Wait for notification from add_data(). self._lock.wait() def read(self, n=-1): self._lock.acquire() try: return super(MultiplexedInputStream, self).read(n) finally: self._lock.release() def readline(self, length=None): self._lock.acquire() try: return super(MultiplexedInputStream, self).readline(length) finally: self._lock.release() def add_data(self, data): self._lock.acquire() try: super(MultiplexedInputStream, self).add_data(data) self._lock.notify() finally: self._lock.release() class OutputStream(object): """ FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to write() or writelines() immediately result in Records being sent back to the server. Buffering should be done in a higher level! """ def __init__(self, conn, req, type, buffered=False): self._conn = conn self._req = req self._type = type self._buffered = buffered self._bufList = [] # Used if buffered is True self.dataWritten = False self.closed = False def _write(self, data): length = len(data) while length: toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN) rec = Record(self._type, self._req.requestId) rec.contentLength = toWrite rec.contentData = data[:toWrite] self._conn.writeRecord(rec) data = data[toWrite:] length -= toWrite def write(self, data): assert not self.closed if not data: return self.dataWritten = True if self._buffered: self._bufList.append(data) else: self._write(data) def writelines(self, lines): assert not self.closed for line in lines: self.write(line) def flush(self): # Only need to flush if this OutputStream is actually buffered. if self._buffered: data = ''.join(self._bufList) self._bufList = [] self._write(data) # Though available, the following should NOT be called by WSGI apps. def close(self): """Sends end-of-stream notification, if necessary.""" if not self.closed and self.dataWritten: self.flush() rec = Record(self._type, self._req.requestId) self._conn.writeRecord(rec) self.closed = True class TeeOutputStream(object): """ Simple wrapper around two or more output file-like objects that copies written data to all streams. """ def __init__(self, streamList): self._streamList = streamList def write(self, data): for f in self._streamList: f.write(data) def writelines(self, lines): for line in lines: self.write(line) def flush(self): for f in self._streamList: f.flush() class StdoutWrapper(object): """ Wrapper for sys.stdout so we know if data has actually been written. """ def __init__(self, stdout): self._file = stdout self.dataWritten = False def write(self, data): if data: self.dataWritten = True self._file.write(data) def writelines(self, lines): for line in lines: self.write(line) def __getattr__(self, name): return getattr(self._file, name) def decode_pair(s, pos=0): """ Decodes a name/value pair. The number of bytes decoded as well as the name/value pair are returned. """ nameLength = ord(s[pos]) if nameLength & 128: nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff pos += 4 else: pos += 1 valueLength = ord(s[pos]) if valueLength & 128: valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff pos += 4 else: pos += 1 name = s[pos:pos+nameLength] pos += nameLength value = s[pos:pos+valueLength] pos += valueLength return (pos, (name, value)) def encode_pair(name, value): """ Encodes a name/value pair. The encoded string is returned. """ nameLength = len(name) if nameLength < 128: s = chr(nameLength) else: s = struct.pack('!L', nameLength | 0x80000000L) valueLength = len(value) if valueLength < 128: s += chr(valueLength) else: s += struct.pack('!L', valueLength | 0x80000000L) return s + name + value class Record(object): """ A FastCGI Record. Used for encoding/decoding records. """ def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID): self.version = FCGI_VERSION_1 self.type = type self.requestId = requestId self.contentLength = 0 self.paddingLength = 0 self.contentData = '' def _recvall(sock, length): """ Attempts to receive length bytes from a socket, blocking if necessary. (Socket may be blocking or non-blocking.) """ dataList = [] recvLen = 0 while length: try: data = sock.recv(length) except socket.error, e: if e[0] == errno.EAGAIN: select.select([sock], [], []) continue else: raise if not data: # EOF break dataList.append(data) dataLen = len(data) recvLen += dataLen length -= dataLen return ''.join(dataList), recvLen _recvall = staticmethod(_recvall) def read(self, sock): """Read and decode a Record from a socket.""" try: header, length = self._recvall(sock, FCGI_HEADER_LEN) except: raise EOFError if length < FCGI_HEADER_LEN: raise EOFError self.version, self.type, self.requestId, self.contentLength, \ self.paddingLength = struct.unpack(FCGI_Header, header) if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, ' 'contentLength = %d' % (sock.fileno(), self.type, self.requestId, self.contentLength)) if self.contentLength: try: self.contentData, length = self._recvall(sock, self.contentLength) except: raise EOFError if length < self.contentLength: raise EOFError if self.paddingLength: try: self._recvall(sock, self.paddingLength) except: raise EOFError def _sendall(sock, data): """ Writes data to a socket and does not return until all the data is sent. """ length = len(data) while length: try: sent = sock.send(data) except socket.error, e: if e[0] == errno.EAGAIN: select.select([], [sock], []) continue else: raise data = data[sent:] length -= sent _sendall = staticmethod(_sendall) def write(self, sock): """Encode and write a Record to a socket.""" self.paddingLength = -self.contentLength & 7 if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, ' 'contentLength = %d' % (sock.fileno(), self.type, self.requestId, self.contentLength)) header = struct.pack(FCGI_Header, self.version, self.type, self.requestId, self.contentLength, self.paddingLength) self._sendall(sock, header) if self.contentLength: self._sendall(sock, self.contentData) if self.paddingLength: self._sendall(sock, '\x00'*self.paddingLength) class Request(object): """ Represents a single FastCGI request. These objects are passed to your handler and is the main interface between your handler and the fcgi module. The methods should not be called by your handler. However, server, params, stdin, stdout, stderr, and data are free for your handler's use. """ def __init__(self, conn, inputStreamClass): self._conn = conn self.server = conn.server self.params = {} self.stdin = inputStreamClass(conn) self.stdout = OutputStream(conn, self, FCGI_STDOUT) self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True) self.data = inputStreamClass(conn) def run(self): """Runs the handler, flushes the streams, and ends the request.""" try: protocolStatus, appStatus = self.server.handler(self) except: traceback.print_exc(file=self.stderr) self.stderr.flush() if not self.stdout.dataWritten: self.server.error(self) protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0 if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' % (protocolStatus, appStatus)) self._flush() self._end(appStatus, protocolStatus) def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE): self._conn.end_request(self, appStatus, protocolStatus) def _flush(self): self.stdout.close() self.stderr.close() class CGIRequest(Request): """A normal CGI request disguised as a FastCGI request.""" def __init__(self, server): # These are normally filled in by Connection. self.requestId = 1 self.role = FCGI_RESPONDER self.flags = 0 self.aborted = False self.server = server self.params = dict(os.environ) self.stdin = sys.stdin self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity! self.stderr = sys.stderr self.data = StringIO.StringIO() def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE): sys.exit(appStatus) def _flush(self): # Not buffered, do nothing. pass class Connection(object): """ A Connection with the web server. Each Connection is associated with a single socket (which is connected to the web server) and is responsible for handling all the FastCGI message processing for that socket. """ _multiplexed = False _inputStreamClass = InputStream def __init__(self, sock, addr, server): self._sock = sock self._addr = addr self.server = server # Active Requests for this Connection, mapped by request ID. self._requests = {} def _cleanupSocket(self): """Close the Connection's socket.""" try: self._sock.shutdown(socket.SHUT_WR) except: return try: while True: r, w, e = select.select([self._sock], [], []) if not r or not self._sock.recv(1024): break except: pass self._sock.close() def run(self): """Begin processing data from the socket.""" self._keepGoing = True while self._keepGoing: try: self.process_input() except EOFError: break except (select.error, socket.error), e: if e[0] == errno.EBADF: # Socket was closed by Request. break raise self._cleanupSocket() def process_input(self): """Attempt to read a single Record from the socket and process it.""" # Currently, any children Request threads notify this Connection # that it is no longer needed by closing the Connection's socket. # We need to put a timeout on select, otherwise we might get # stuck in it indefinitely... (I don't like this solution.) while self._keepGoing: try: r, w, e = select.select([self._sock], [], [], 1.0) except ValueError: # Sigh. ValueError gets thrown sometimes when passing select # a closed socket. raise EOFError if r: break if not self._keepGoing: return rec = Record() rec.read(self._sock) if rec.type == FCGI_GET_VALUES: self._do_get_values(rec) elif rec.type == FCGI_BEGIN_REQUEST: self._do_begin_request(rec) elif rec.type == FCGI_ABORT_REQUEST: self._do_abort_request(rec) elif rec.type == FCGI_PARAMS: self._do_params(rec) elif rec.type == FCGI_STDIN: self._do_stdin(rec) elif rec.type == FCGI_DATA: self._do_data(rec) elif rec.requestId == FCGI_NULL_REQUEST_ID: self._do_unknown_type(rec) else: # Need to complain about this. pass def writeRecord(self, rec): """ Write a Record to the socket. """ rec.write(self._sock) def end_request(self, req, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE, remove=True): """ End a Request. Called by Request objects. An FCGI_END_REQUEST Record is sent to the web server. If the web server no longer requires the connection, the socket is closed, thereby ending this Connection (run() returns). """ rec = Record(FCGI_END_REQUEST, req.requestId) rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus, protocolStatus) rec.contentLength = FCGI_EndRequestBody_LEN self.writeRecord(rec) if remove: del self._requests[req.requestId] if __debug__: _debug(2, 'end_request: flags = %d' % req.flags) if not (req.flags & FCGI_KEEP_CONN) and not self._requests: self._cleanupSocket() self._keepGoing = False def _do_get_values(self, inrec): """Handle an FCGI_GET_VALUES request from the web server.""" outrec = Record(FCGI_GET_VALUES_RESULT) pos = 0 while pos < inrec.contentLength: pos, (name, value) = decode_pair(inrec.contentData, pos) cap = self.server.capability.get(name) if cap is not None: outrec.contentData += encode_pair(name, str(cap)) outrec.contentLength = len(outrec.contentData) self.writeRecord(outrec) def _do_begin_request(self, inrec): """Handle an FCGI_BEGIN_REQUEST from the web server.""" role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData) req = self.server.request_class(self, self._inputStreamClass) req.requestId, req.role, req.flags = inrec.requestId, role, flags req.aborted = False if not self._multiplexed and self._requests: # Can't multiplex requests. self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False) else: self._requests[inrec.requestId] = req def _do_abort_request(self, inrec): """ Handle an FCGI_ABORT_REQUEST from the web server. We just mark a flag in the associated Request. """ req = self._requests.get(inrec.requestId) if req is not None: req.aborted = True def _start_request(self, req): """Run the request.""" # Not multiplexed, so run it inline. req.run() def _do_params(self, inrec): """ Handle an FCGI_PARAMS Record. If the last FCGI_PARAMS Record is received, start the request. """ req = self._requests.get(inrec.requestId) if req is not None: if inrec.contentLength: pos = 0 while pos < inrec.contentLength: pos, (name, value) = decode_pair(inrec.contentData, pos) req.params[name] = value else: self._start_request(req) def _do_stdin(self, inrec): """Handle the FCGI_STDIN stream.""" req = self._requests.get(inrec.requestId) if req is not None: req.stdin.add_data(inrec.contentData) def _do_data(self, inrec): """Handle the FCGI_DATA stream.""" req = self._requests.get(inrec.requestId) if req is not None: req.data.add_data(inrec.contentData) def _do_unknown_type(self, inrec): """Handle an unknown request type. Respond accordingly.""" outrec = Record(FCGI_UNKNOWN_TYPE) outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type) outrec.contentLength = FCGI_UnknownTypeBody_LEN self.writeRecord(rec) class MultiplexedConnection(Connection): """ A version of Connection capable of handling multiple requests simultaneously. """ _multiplexed = True _inputStreamClass = MultiplexedInputStream def __init__(self, sock, addr, server): super(MultiplexedConnection, self).__init__(sock, addr, server) # Used to arbitrate access to self._requests. lock = threading.RLock() # Notification is posted everytime a request completes, allowing us # to quit cleanly. self._lock = threading.Condition(lock) def _cleanupSocket(self): # Wait for any outstanding requests before closing the socket. self._lock.acquire() while self._requests: self._lock.wait() self._lock.release() super(MultiplexedConnection, self)._cleanupSocket() def writeRecord(self, rec): # Must use locking to prevent intermingling of Records from different # threads. self._lock.acquire() try: # Probably faster than calling super. ;) rec.write(self._sock) finally: self._lock.release() def end_request(self, req, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE, remove=True): self._lock.acquire() try: super(MultiplexedConnection, self).end_request(req, appStatus, protocolStatus, remove) self._lock.notify() finally: self._lock.release() def _do_begin_request(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_begin_request(inrec) finally: self._lock.release() def _do_abort_request(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_abort_request(inrec) finally: self._lock.release() def _start_request(self, req): thread.start_new_thread(req.run, ()) def _do_params(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_params(inrec) finally: self._lock.release() def _do_stdin(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_stdin(inrec) finally: self._lock.release() def _do_data(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_data(inrec) finally: self._lock.release() class Server(object): """ The FastCGI server. Waits for connections from the web server, processing each request. If run in a normal CGI context, it will instead instantiate a CGIRequest and run the handler through there. """ request_class = Request cgirequest_class = CGIRequest # Limits the size of the InputStream's string buffer to this size + the # server's maximum Record size. Since the InputStream is not seekable, # we throw away already-read data once this certain amount has been read. inputStreamShrinkThreshold = 102400 - 8192 def __init__(self, handler=None, maxwrite=8192, bindAddress=None, umask=None, multiplexed=False): """ handler, if present, must reference a function or method that takes one argument: a Request object. If handler is not specified at creation time, Server *must* be subclassed. (The handler method below is abstract.) maxwrite is the maximum number of bytes (per Record) to write to the server. I've noticed mod_fastcgi has a relatively small receive buffer (8K or so). bindAddress, if present, must either be a string or a 2-tuple. If present, run() will open its own listening socket. You would use this if you wanted to run your application as an 'external' FastCGI app. (i.e. the webserver would no longer be responsible for starting your app) If a string, it will be interpreted as a filename and a UNIX socket will be opened. If a tuple, the first element, a string, is the interface name/IP to bind to, and the second element (an int) is the port number. Set multiplexed to True if you want to handle multiple requests per connection. Some FastCGI backends (namely mod_fastcgi) don't multiplex requests at all, so by default this is off (which saves on thread creation/locking overhead). If threads aren't available, this keyword is ignored; it's not possible to multiplex requests at all. """ if handler is not None: self.handler = handler self.maxwrite = maxwrite if thread_available: try: import resource # Attempt to glean the maximum number of connections # from the OS. maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0] except ImportError: maxConns = 100 # Just some made up number. maxReqs = maxConns if multiplexed: self._connectionClass = MultiplexedConnection maxReqs *= 5 # Another made up number. else: self._connectionClass = Connection self.capability = { FCGI_MAX_CONNS: maxConns, FCGI_MAX_REQS: maxReqs, FCGI_MPXS_CONNS: multiplexed and 1 or 0 } else: self._connectionClass = Connection self.capability = { # If threads aren't available, these are pretty much correct. FCGI_MAX_CONNS: 1, FCGI_MAX_REQS: 1, FCGI_MPXS_CONNS: 0 } self._bindAddress = bindAddress self._umask = umask def _setupSocket(self): if self._bindAddress is None: # Run as a normal FastCGI? isFCGI = True sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET, socket.SOCK_STREAM) try: sock.getpeername() except socket.error, e: if e[0] == errno.ENOTSOCK: # Not a socket, assume CGI context. isFCGI = False elif e[0] != errno.ENOTCONN: raise # FastCGI/CGI discrimination is broken on Mac OS X. # Set the environment variable FCGI_FORCE_CGI to "Y" or "y" # if you want to run your app as a simple CGI. (You can do # this with Apache's mod_env [not loaded by default in OS X # client, ha ha] and the SetEnv directive.) if not isFCGI or \ os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'): req = self.cgirequest_class(self) req.run() sys.exit(0) else: # Run as a server oldUmask = None if type(self._bindAddress) is str: # Unix socket sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: os.unlink(self._bindAddress) except OSError: pass if self._umask is not None: oldUmask = os.umask(self._umask) else: # INET socket assert type(self._bindAddress) is tuple assert len(self._bindAddress) == 2 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(self._bindAddress) sock.listen(socket.SOMAXCONN) if oldUmask is not None: os.umask(oldUmask) return sock def _cleanupSocket(self, sock): """Closes the main socket.""" sock.close() def _installSignalHandlers(self): self._oldSIGs = [(x,signal.getsignal(x)) for x in (signal.SIGHUP, signal.SIGINT, signal.SIGTERM)] signal.signal(signal.SIGHUP, self._hupHandler) signal.signal(signal.SIGINT, self._intHandler) signal.signal(signal.SIGTERM, self._intHandler) def _restoreSignalHandlers(self): for signum,handler in self._oldSIGs: signal.signal(signum, handler) def _hupHandler(self, signum, frame): self._hupReceived = True self._keepGoing = False def _intHandler(self, signum, frame): self._keepGoing = False def run(self, timeout=1.0): """ The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if SIGHUP was received, False otherwise. """ web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS') if web_server_addrs is not None: web_server_addrs = map(lambda x: x.strip(), web_server_addrs.split(',')) sock = self._setupSocket() self._keepGoing = True self._hupReceived = False # Install signal handlers. self._installSignalHandlers() while self._keepGoing: try: r, w, e = select.select([sock], [], [], timeout) except select.error, e: if e[0] == errno.EINTR: continue raise if r: try: clientSock, addr = sock.accept() except socket.error, e: if e[0] in (errno.EINTR, errno.EAGAIN): continue raise if web_server_addrs and \ (len(addr) != 2 or addr[0] not in web_server_addrs): clientSock.close() continue # Instantiate a new Connection and begin processing FastCGI # messages (either in a new thread or this thread). conn = self._connectionClass(clientSock, addr, self) thread.start_new_thread(conn.run, ()) self._mainloopPeriodic() # Restore signal handlers. self._restoreSignalHandlers() self._cleanupSocket(sock) return self._hupReceived def _mainloopPeriodic(self): """ Called with just about each iteration of the main loop. Meant to be overridden. """ pass def _exit(self, reload=False): """ Protected convenience method for subclasses to force an exit. Not really thread-safe, which is why it isn't public. """ if self._keepGoing: self._keepGoing = False self._hupReceived = reload def handler(self, req): """ Default handler, which just raises an exception. Unless a handler is passed at initialization time, this must be implemented by a subclass. """ raise NotImplementedError, self.__class__.__name__ + '.handler' def error(self, req): """ Called by Request if an exception occurs within the handler. May and should be overridden. """ import cgitb req.stdout.write('Content-Type: text/html\r\n\r\n' + cgitb.html(sys.exc_info())) class WSGIServer(Server): """ FastCGI server that supports the Web Server Gateway Interface. See <http://www.python.org/peps/pep-0333.html>. """ def __init__(self, application, environ=None, multithreaded=True, **kw): """ environ, if present, must be a dictionary-like object. Its contents will be copied into application's environ. Useful for passing application-specific variables. Set multithreaded to False if your application is not MT-safe. """ if kw.has_key('handler'): del kw['handler'] # Doesn't make sense to let this through super(WSGIServer, self).__init__(**kw) if environ is None: environ = {} self.application = application self.environ = environ self.multithreaded = multithreaded # Used to force single-threadedness self._app_lock = thread.allocate_lock() def handler(self, req): """Special handler for WSGI.""" if req.role != FCGI_RESPONDER: return FCGI_UNKNOWN_ROLE, 0 # Mostly taken from example CGI gateway. environ = req.params environ.update(self.environ) environ['wsgi.version'] = (1,0) environ['wsgi.input'] = req.stdin if self._bindAddress is None: stderr = req.stderr else: stderr = TeeOutputStream((sys.stderr, req.stderr)) environ['wsgi.errors'] = stderr environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \ thread_available and self.multithreaded # Rationale for the following: If started by the web server # (self._bindAddress is None) in either FastCGI or CGI mode, the # possibility of being spawned multiple times simultaneously is quite # real. And, if started as an external server, multiple copies may be # spawned for load-balancing/redundancy. (Though I don't think # mod_fastcgi supports this?) environ['wsgi.multiprocess'] = True environ['wsgi.run_once'] = isinstance(req, CGIRequest) if environ.get('HTTPS', 'off') in ('on', '1'): environ['wsgi.url_scheme'] = 'https' else: environ['wsgi.url_scheme'] = 'http' self._sanitizeEnv(environ) headers_set = [] headers_sent = [] result = None def write(data): assert type(data) is str, 'write() argument must be string' assert headers_set, 'write() before start_response()' if not headers_sent: status, responseHeaders = headers_sent[:] = headers_set found = False for header,value in responseHeaders: if header.lower() == 'content-length': found = True break if not found and result is not None: try: if len(result) == 1: responseHeaders.append(('Content-Length', str(len(data)))) except: pass s = 'Status: %s\r\n' % status for header in responseHeaders: s += '%s: %s\r\n' % header s += '\r\n' req.stdout.write(s) req.stdout.write(data) req.stdout.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: # Re-raise if too late raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None # avoid dangling circular ref else: assert not headers_set, 'Headers already set!' assert type(status) is str, 'Status must be a string' assert len(status) >= 4, 'Status must be at least 4 characters' assert int(status[:3]), 'Status must begin with 3-digit code' assert status[3] == ' ', 'Status must have a space after code' assert type(response_headers) is list, 'Headers must be a list' if __debug__: for name,val in response_headers: assert type(name) is str, 'Header names must be strings' assert type(val) is str, 'Header values must be strings' headers_set[:] = [status, response_headers] return write if not self.multithreaded: self._app_lock.acquire() try: try: result = self.application(environ, start_response) try: for data in result: if data: write(data) if not headers_sent: write('') # in case body was empty finally: if hasattr(result, 'close'): result.close() except socket.error, e: if e[0] != errno.EPIPE: raise # Don't let EPIPE propagate beyond server finally: if not self.multithreaded: self._app_lock.release() return FCGI_REQUEST_COMPLETE, 0 def _sanitizeEnv(self, environ): """Ensure certain values are present, if required by WSGI.""" if not environ.has_key('SCRIPT_NAME'): environ['SCRIPT_NAME'] = '' if not environ.has_key('PATH_INFO'): environ['PATH_INFO'] = '' # If any of these are missing, it probably signifies a broken # server... for name,default in [('REQUEST_METHOD', 'GET'), ('SERVER_NAME', 'localhost'), ('SERVER_PORT', '80'), ('SERVER_PROTOCOL', 'HTTP/1.0')]: if not environ.has_key(name): environ['wsgi.errors'].write('%s: missing FastCGI param %s ' 'required by WSGI!\n' % (self.__class__.__name__, name)) environ[name] = default if __name__ == '__main__': def test_app(environ, start_response): """Probably not the most efficient example.""" import cgi start_response('200 OK', [('Content-Type', 'text/html')]) yield '<html><head><title>Hello World!</title></head>\n' \ '<body>\n' \ '<p>Hello World!</p>\n' \ '<table border="1">' names = environ.keys() names.sort() for name in names: yield '<tr><td>%s</td><td>%s</td></tr>\n' % ( name, cgi.escape(`environ[name]`)) form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=1) if form.list: yield '<tr><th colspan="2">Form data</th></tr>' for field in form.list: yield '<tr><td>%s</td><td>%s</td></tr>\n' % ( field.name, field.value) yield '</table>\n' \ '</body></html>\n' WSGIServer(test_app).run()
Python
#!/usr/bin/env python # coding: utf8 """ RPX Authentication for web2py Developed by Nathan Freeze (Copyright © 2009) Email <nathan@freezable.com> Modified by Massimo Di Pierro This file contains code to allow using RPXNow.com (now Jainrain.com) services with web2py """ import re import urllib from gluon.html import * from gluon.tools import fetch from gluon.storage import Storage import gluon.contrib.simplejson as json class RPXAccount(object): """ from gluon.contrib.login_methods.rpx_account import RPXAccount auth.settings.actions_disabled=['register','change_password','request_reset_password'] auth.settings.login_form = RPXAccount(request, api_key="...", domain="...", url = "http://localhost:8000/%s/default/user/login" % request.application) """ def __init__(self, request, api_key = "", domain = "", url = "", embed = True, auth_url = "https://rpxnow.com/api/v2/auth_info", language= "en", prompt='rpx', on_login_failure = None, ): self.request=request self.api_key=api_key self.embed = embed self.auth_url = auth_url self.domain = domain self.token_url = url self.language = language self.profile = None self.prompt = prompt self.on_login_failure = on_login_failure self.mappings = Storage() self.mappings.Facebook = lambda profile:\ dict(registration_id = profile.get("identifier",""), username = profile.get("preferredUsername",""), email = profile.get("email",""), first_name = profile.get("name","").get("givenName",""), last_name = profile.get("name","").get("familyName","")) self.mappings.Google = lambda profile:\ dict(registration_id=profile.get("identifier",""), username=profile.get("preferredUsername",""), email=profile.get("email",""), first_name=profile.get("name","").get("givenName",""), last_name=profile.get("name","").get("familyName","")) self.mappings.default = lambda profile:\ dict(registration_id=profile.get("identifier",""), username=profile.get("preferredUsername",""), email=profile.get("email",""), first_name=profile.get("preferredUsername",""), last_name='') def get_user(self): request = self.request if request.vars.token: user = Storage() data = urllib.urlencode(dict(apiKey = self.api_key, token=request.vars.token)) auth_info_json = fetch(self.auth_url+'?'+data) auth_info = json.loads(auth_info_json) if auth_info['stat'] == 'ok': self.profile = auth_info['profile'] provider = re.sub('[^\w\-]','',self.profile['providerName']) user = self.mappings.get(provider,self.mappings.default)(self.profile) return user elif self.on_login_failure: redirect(self.on_login_failure) return None def login_form(self): request = self.request args = request.args if self.embed: JANRAIN_URL = \ "https://%s.rpxnow.com/openid/embed?token_url=%s&language_preference=%s" rpxform = IFRAME(_src=JANRAIN_URL % (self.domain,self.token_url,self.language), _scrolling="no", _frameborder="no", _style="width:400px;height:240px;") else: JANRAIN_URL = \ "https://%s.rpxnow.com/openid/v2/signin?token_url=%s" rpxform = DIV(SCRIPT(_src="https://rpxnow.com/openid/v2/widget", _type="text/javascript"), SCRIPT("RPXNOW.overlay = true;", "RPXNOW.language_preference = '%s';" % self.language, "RPXNOW.realm = '%s';" % self.domain, "RPXNOW.token_url = '%s';" % self.token_url, "RPXNOW.show();", _type="text/javascript")) return rpxform
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>. License: GPL v2 Thanks to Hans Donner <hans.donner@pobox.com> for GaeGoogleAccount. """ from google.appengine.api import users class GaeGoogleAccount(object): """ Login will be done via Google's Appengine login object, instead of web2py's login form. Include in your model (eg db.py):: from gluon.contrib.login_methods.gae_google_account import \ GaeGoogleAccount auth.settings.login_form=GaeGoogleAccount() """ def login_url(self, next="/"): return users.create_login_url(next) def logout_url(self, next="/"): return users.create_logout_url(next) def get_user(self): user = users.get_current_user() if user: return dict(nickname=user.nickname(), email=user.email(), user_id=user.user_id(), source="google account")
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>. License: GPL v2 Thanks to Hans Donner <hans.donner@pobox.com> for GaeGoogleAccount. """ from gluon.http import HTTP try: import linkedin except ImportError: raise HTTP(400,"linkedin module not found") class LinkedInAccount(object): """ Login will be done via Google's Appengine login object, instead of web2py's login form. Include in your model (eg db.py):: from gluon.contrib.login_methods.linkedin_account import LinkedInAccount auth.settings.login_form=LinkedInAccount(request,KEY,SECRET,RETURN_URL) """ def __init__(self,request,key,secret,return_url): self.request = request self.api = linkedin.LinkedIn(key,secret,return_url) self.token = result = self.api.requestToken() def login_url(self, next="/"): return self.api.getAuthorizeURL(self.token) def logout_url(self, next="/"): return '' def get_user(self): result = self.request.vars.verifier and self.api.accessToken(verifier = self.request.vars.verifier ) if result: profile = self.api.GetProfile() profile = self.api.GetProfile(profile).public_url = "http://www.linkedin.com/in/ozgurv" return dict(first_name = profile.first_name, last_name = profile.last_name, username = profile.id)
Python
import sys import logging try: import ldap ldap.set_option(ldap.OPT_REFERRALS, 0) except Exception, e: logging.error('missing ldap, try "easy_install python-ldap"') raise e def ldap_auth(server='ldap', port=None, base_dn='ou=users,dc=domain,dc=com', mode='uid', secure=False, cert_path=None, bind_dn=None, bind_pw=None, filterstr='objectClass=*'): """ to use ldap login with MS Active Directory:: from gluon.contrib.login_methods.ldap_auth import ldap_auth auth.settings.login_methods.append(ldap_auth( mode='ad', server='my.domain.controller', base_dn='ou=Users,dc=domain,dc=com')) to use ldap login with Notes Domino:: auth.settings.login_methods.append(ldap_auth( mode='domino',server='my.domino.server')) to use ldap login with OpenLDAP:: auth.settings.login_methods.append(ldap_auth( server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com')) to use ldap login with OpenLDAP and subtree search and (optionally) multiple DNs: auth.settings.login_methods.append(ldap_auth( mode='uid_r', server='my.ldap.server', base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com'])) or (if using CN):: auth.settings.login_methods.append(ldap_auth( mode='cn', server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com')) If using secure ldaps:// pass secure=True and cert_path="..." If you need to bind to the directory with an admin account in order to search it then specify bind_dn & bind_pw to use for this. - currently only implemented for Active Directory If you need to restrict the set of allowed users (e.g. to members of a department) then specify a rfc4515 search filter string. - currently only implemented for mode in ['ad', 'company', 'uid_r'] """ def ldap_auth_aux(username, password, ldap_server=server, ldap_port=port, ldap_basedn=base_dn, ldap_mode=mode, ldap_binddn=bind_dn, ldap_bindpw=bind_pw, secure=secure, cert_path=cert_path, filterstr=filterstr): try: if secure: if not ldap_port: ldap_port = 636 con = ldap.initialize( "ldaps://" + ldap_server + ":" + str(ldap_port)) if cert_path: con.set_option(ldap.OPT_X_TLS_CACERTDIR, cert_path) else: if not ldap_port: ldap_port = 389 con = ldap.initialize( "ldap://" + ldap_server + ":" + str(ldap_port)) if ldap_mode == 'ad': # Microsoft Active Directory if '@' not in username: domain = [] for x in ldap_basedn.split(','): if "DC=" in x.upper(): domain.append(x.split('=')[-1]) username = "%s@%s" % (username, '.'.join(domain)) username_bare = username.split("@")[0] con.set_option(ldap.OPT_PROTOCOL_VERSION, 3) if ldap_binddn: # need to search directory with an admin account 1st con.simple_bind_s(ldap_binddn, ldap_bindpw) else: # credentials should be in the form of username@domain.tld con.simple_bind_s(username, password) # this will throw an index error if the account is not found # in the ldap_basedn result = con.search_ext_s( ldap_basedn, ldap.SCOPE_SUBTREE, "(&(sAMAccountName=%s)(%s))" % (username_bare, filterstr), ["sAMAccountName"])[0][1] if ldap_binddn: # We know the user exists & is in the correct OU # so now we just check the password con.simple_bind_s(username, password) if ldap_mode == 'domino': # Notes Domino if "@" in username: username = username.split("@")[0] con.simple_bind_s(username, password) if ldap_mode == 'cn': # OpenLDAP (CN) dn = "cn=" + username + "," + ldap_basedn con.simple_bind_s(dn, password) if ldap_mode == 'uid': # OpenLDAP (UID) dn = "uid=" + username + "," + ldap_basedn con.simple_bind_s(dn, password) if ldap_mode == 'company': # no DNs or password needed to search directory dn = "" pw = "" # bind anonymously con.simple_bind_s(dn, pw) # search by e-mail address filter = '(&(mail=' + username + ')(' + filterstr + '))' # find the uid attrs = ['uid'] # perform the actual search company_search_result=con.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, filter, attrs) dn = company_search_result[0][0] # perform the real authentication test con.simple_bind_s(dn, password) if ldap_mode == 'uid_r': # OpenLDAP (UID) with subtree search and multiple DNs if type(ldap_basedn) == type([]): basedns = ldap_basedn else: basedns = [ldap_basedn] filter = '(&(uid=%s)(%s))' % (username, filterstr) for basedn in basedns: try: result = con.search_s(basedn, ldap.SCOPE_SUBTREE, filter) if result: user_dn = result[0][0] # Check the password con.simple_bind_s(user_dn, password) con.unbind() return True except ldap.LDAPError, detail: (exc_type, exc_value) = sys.exc_info()[:2] sys.stderr.write("ldap_auth: searching %s for %s resulted in %s: %s\n" % (basedn, filter, exc_type, exc_value)) return False con.unbind() return True except ldap.LDAPError, e: return False except IndexError, ex: # for AD membership test return False if filterstr[0] == '(' and filterstr[-1] == ')': # rfc4515 syntax filterstr = filterstr[1:-1] # parens added again where used return ldap_auth_aux
Python
#!/usr/bin/env python # coding: utf8 """ OpenID authentication for web2py Allowed using OpenID login together with web2py built-in login. By default, to support OpenID login, put this in your db.py >>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth >>> auth.settings.login_form = OpenIDAuth(auth) To show OpenID list in user profile, you can add the following code before the end of function user() of your_app/controllers/default.py + if (request.args and request.args(0) == "profile"): + form = DIV(form, openid_login_form.list_user_openids()) return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration) More detail in the description of the class OpenIDAuth. Requirements: python-openid version 2.2.5 or later Reference: * w2p openID http://w2popenid.appspot.com/init/default/wiki/w2popenid * RPX and web2py auth module http://www.web2pyslices.com/main/slices/take_slice/28 * built-in file: gluon/contrib/login_methods/rpx_account.py * built-in file: gluon/tools.py (Auth class) """ import time from datetime import datetime, timedelta from gluon import * from gluon.storage import Storage, Messages try: import openid.consumer.consumer from openid.association import Association from openid.store.interface import OpenIDStore from openid.extensions.sreg import SRegRequest, SRegResponse from openid.store import nonce from openid.consumer.discover import DiscoveryFailure except ImportError, err: raise ImportError("OpenIDAuth requires python-openid package") DEFAULT = lambda: None class OpenIDAuth(object): """ OpenIDAuth It supports the logout_url, implementing the get_user and login_form for cas usage of gluon.tools.Auth. It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods combined with the standard logon/register procedure. It uses OpenID Consumer when render the form and begins the OpenID authentication. Example: (put these code after auth.define_tables() in your models.) auth = Auth(globals(), db) # authentication/authorization ... auth.define_tables() # creates all needed tables ... #include in your model after auth has been defined from gluon.contrib.login_methods.openid_auth import OpenIDAuth openid_login_form = OpenIDAuth(request, auth, db) from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm extended_login_form = ExtendedLoginForm(request, auth, openid_login_form, signals=['oid','janrain_nonce']) auth.settings.login_form = extended_login_form """ def __init__(self, auth): self.auth = auth self.db = auth.db request = current.request self.nextvar = '_next' self.realm = 'http://%s' % request.env.http_host self.login_url = URL(r=request, f='user', args=['login']) self.return_to_url = self.realm + self.login_url self.table_alt_logins_name = "alt_logins" if not auth.settings.table_user: raise self.table_user = self.auth.settings.table_user self.openid_expiration = 15 #minutes self.messages = self._define_messages() if not self.table_alt_logins_name in self.db.tables: self._define_alt_login_table() def _define_messages(self): messages = Messages(current.T) messages.label_alt_login_username = 'Sign-in with OpenID: ' messages.label_add_alt_login_username = 'Add a new OpenID: ' messages.submit_button = 'Sign in' messages.submit_button_add = 'Add' messages.a_delete = 'Delete' messages.comment_openid_signin = 'What is OpenID?' messages.comment_openid_help_title = 'Start using your OpenID' messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/' messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?' messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.' messages.flash_openid_associated = 'OpenID associated' messages.flash_associate_openid = 'Please login or register an account for this OpenID.' messages.p_openid_not_registered = "This Open ID haven't be registered. " \ + "Please login to associate with it or register an account for it." messages.flash_openid_authenticated = 'OpenID authenticated successfully.' messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)' messages.flash_openid_canceled = 'OpenID authentication canceled by user.' messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.' messages.h_openid_login = 'OpenID Login' messages.h_openid_list = 'OpenID List' return messages def _define_alt_login_table(self): """ Define the OpenID login table. Note: type is what I used for our project. We're going to support 'fackbook' and 'plurk' alternate login methods. Otherwise it's always 'openid' and you may not need it. This should be easy to changed. (Just remove the field of "type" and remove the "and db.alt_logins.type == type_" in _find_matched_openid function) """ db = self.db table = db.define_table( self.table_alt_logins_name, Field('username', length=512, default=''), Field('type', length=128, default='openid', readable=False), Field('user', self.table_user, readable=False), ) table.username.requires = IS_NOT_IN_DB(db, table.username) self.table_alt_logins = table def logout_url(self, next): """ Delete the w2popenid record in session as logout """ if current.session.w2popenid: del(current.session.w2popenid) return next def login_form(self): """ Start to process the OpenID response if 'janrain_nonce' in request parameters and not processed yet. Else return the OpenID form for login. """ request = current.request if request.vars.has_key('janrain_nonce') and not self._processed(): self._process_response() return self.auth() return self._form() def get_user(self): """ It supports the logout_url, implementing the get_user and login_form for cas usage of gluon.tools.Auth. """ request = current.request args = request.args if args[0] == 'logout': return True # Let logout_url got called if current.session.w2popenid: w2popenid = current.session.w2popenid db = self.db if (w2popenid.ok is True and w2popenid.oid): # OpenID authenticated if self._w2popenid_expired(w2popenid): del(current.session.w2popenid) flash = self.messages.flash_openid_expired current.session.warning = flash redirect(self.auth.settings.login_url) oid = self._remove_protocol(w2popenid.oid) alt_login = self._find_matched_openid(db, oid) nextvar = self.nextvar # This OpenID not in the database. If user logged in then add it # into database, else ask user to login or register. if not alt_login: if self.auth.is_logged_in(): # TODO: ask first maybe self._associate_user_openid(self.auth.user, oid) if current.session.w2popenid: del(current.session.w2popenid) current.session.flash = self.messages.flash_openid_associated if request.vars.has_key(nextvar): redirect(request.vars[nextvar]) redirect(self.auth.settings.login_next) if not request.vars.has_key(nextvar): # no next var, add it and do login again # so if user login or register can go back here to associate the OpenID redirect(URL(r=request, args=['login'], vars={nextvar:self.login_url})) self.login_form = self._form_with_notification() current.session.flash = self.messages.flash_associate_openid return None # need to login or register to associate this openid # Get existed OpenID user user = db(self.table_user.id==alt_login.user).select().first() if user: if current.session.w2popenid: del(current.session.w2popenid) if 'username' in self.table_user.fields(): username = 'username' elif 'email' in self.table_user.fields(): username = 'email' return {username: user[username]} if user else None # login success (almost) return None # just start to login def _find_matched_openid(self, db, oid, type_='openid'): """ Get the matched OpenID for given """ query = ((db.alt_logins.username == oid) & (db.alt_logins.type == type_)) alt_login = db(query).select().first() # Get the OpenID record return alt_login def _associate_user_openid(self, user, oid): """ Associate the user logged in with given OpenID """ # print "[DB] %s authenticated" % oid self.db.alt_logins.insert(username=oid, user=user.id) def _form_with_notification(self): """ Render the form for normal login with a notice of OpenID authenticated """ form = DIV() # TODO: check when will happen if self.auth.settings.login_form in (self.auth, self): self.auth.settings.login_form = self.auth form = DIV(self.auth()) register_note = DIV(P(self.messages.p_openid_not_registered)) form.components.append(register_note) return lambda: form def _remove_protocol(self, oid): """ Remove https:// or http:// from oid url """ protocol = 'https://' if oid.startswith(protocol): oid = oid[len(protocol):] return oid protocol = 'http://' if oid.startswith(protocol): oid = oid[len(protocol):] return oid return oid def _init_consumerhelper(self): """ Initialize the ConsumerHelper """ if not hasattr(self, "consumerhelper"): self.consumerhelper = ConsumerHelper(current.session, self.db) return self.consumerhelper def _form(self, style=None): form = DIV(H3(self.messages.h_openid_login), self._login_form(style)) return form def _login_form(self, openid_field_label=None, submit_button=None, _next=None, style=None): """ Render the form for OpenID login """ def warning_openid_fail(session): session.warning = messages.openid_fail_discover style = style or """ background-attachment: scroll; background-repeat: no-repeat; background-image: url("http://wiki.openid.net/f/openid-16x16.gif"); background-position: 0% 50%; background-color: transparent; padding-left: 18px; width: 400px; """ style = style.replace("\n","") request = current.request session = current.session messages = self.messages hidden_next_input = "" if _next == 'profile': profile_url = URL(r=request, f='user', args=['profile']) hidden_next_input = INPUT(_type="hidden", _name="_next", _value=profile_url) form = FORM(openid_field_label or self.messages.label_alt_login_username, INPUT(_type="input", _name="oid", requires=IS_NOT_EMPTY(error_message=messages.openid_fail_discover), _style=style), hidden_next_input, INPUT(_type="submit", _value=submit_button or messages.submit_button), " ", A(messages.comment_openid_signin, _href=messages.comment_openid_help_url, _title=messages.comment_openid_help_title, _class='openid-identifier', _target="_blank"), _action=self.login_url ) if form.accepts(request.vars, session): oid = request.vars.oid consumerhelper = self._init_consumerhelper() url = self.login_url return_to_url = self.return_to_url if not oid: warning_openid_fail(session) redirect(url) try: if request.vars.has_key('_next'): return_to_url = self.return_to_url + '?_next=' + request.vars._next url = consumerhelper.begin(oid, self.realm, return_to_url) except DiscoveryFailure: warning_openid_fail(session) redirect(url) return form def _processed(self): """ Check if w2popenid authentication is processed. Return True if processed else False. """ processed = (hasattr(current.session, 'w2popenid') and current.session.w2popenid.ok is True) return processed def _set_w2popenid_expiration(self, w2popenid): """ Set expiration for OpenID authentication. """ w2popenid.expiration = datetime.now() + timedelta(minutes=self.openid_expiration) def _w2popenid_expired(self, w2popenid): """ Check if w2popenid authentication is expired. Return True if expired else False. """ return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration) def _process_response(self): """ Process the OpenID by ConsumerHelper. """ request = current.request request_vars = request.vars consumerhelper = self._init_consumerhelper() process_status = consumerhelper.process_response(request_vars, self.return_to_url) if process_status == "success": w2popenid = current.session.w2popenid user_data = self.consumerhelper.sreg() current.session.w2popenid.ok = True self._set_w2popenid_expiration(w2popenid) w2popenid.user_data = user_data current.session.flash = self.messages.flash_openid_authenticated elif process_status == "failure": flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message current.session.warning = flash elif process_status == "cancel": current.session.warning = self.messages.flash_openid_canceled elif process_status == "setup_needed": current.session.warning = self.messages.flash_openid_need_setup def list_user_openids(self): messages = self.messages request = current.request if request.vars.has_key('delete_openid'): self.remove_openid(request.vars.delete_openid) query = self.db.alt_logins.user == self.auth.user.id alt_logins = self.db(query).select() l = [] for alt_login in alt_logins: username = alt_login.username delete_href = URL(r=request, f='user', args=['profile'], vars={'delete_openid': username}) delete_link = A(messages.a_delete, _href=delete_href) l.append(LI(username, " ", delete_link)) profile_url = URL(r=request, f='user', args=['profile']) #return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url openid_list = DIV(H3(messages.h_openid_list), UL(l), self._login_form( _next='profile', submit_button=messages.submit_button_add, openid_field_label=messages.label_add_alt_login_username) ) return openid_list def remove_openid(self, openid): query = self.db.alt_logins.username == openid self.db(query).delete() class ConsumerHelper(object): """ ConsumerHelper knows the python-openid and """ def __init__(self, session, db): self.session = session store = self._init_store(db) self.consumer = openid.consumer.consumer.Consumer(session, store) def _init_store(self, db): """ Initialize Web2pyStore """ if not hasattr(self, "store"): store = Web2pyStore(db) session = self.session if not session.has_key('w2popenid'): session.w2popenid = Storage() self.store = store return self.store def begin(self, oid, realm, return_to_url): """ Begin the OpenID authentication """ w2popenid = self.session.w2popenid w2popenid.oid = oid auth_req = self.consumer.begin(oid) auth_req.addExtension(SRegRequest(required=['email','nickname'])) url = auth_req.redirectURL(return_to=return_to_url, realm=realm) return url def process_response(self, request_vars, return_to_url): """ Complete the process and """ resp = self.consumer.complete(request_vars, return_to_url) if resp: if resp.status == openid.consumer.consumer.SUCCESS: self.resp = resp if hasattr(resp, "identity_url"): self.session.w2popenid.oid = resp.identity_url return "success" if resp.status == openid.consumer.consumer.FAILURE: self.error_message = resp.message return "failure" if resp.status == openid.consumer.consumer.CANCEL: return "cancel" if resp.status == openid.consumer.consumer.SETUP_NEEDED: return "setup_needed" return "no resp" def sreg(self): """ Try to get OpenID Simple Registation http://openid.net/specs/openid-simple-registration-extension-1_0.html """ if self.resp: resp = self.resp sreg_resp = SRegResponse.fromSuccessResponse(resp) return sreg_resp.data if sreg_resp else None else: return None class Web2pyStore(OpenIDStore): """ Web2pyStore This class implements the OpenIDStore interface. OpenID stores take care of persisting nonces and associations. The Janrain Python OpenID library comes with implementations for file and memory storage. Web2pyStore uses the web2py db abstration layer. See the source code docs of OpenIDStore for a comprehensive description of this interface. """ def __init__(self, database): self.database = database self.table_oid_associations_name = 'oid_associations' self.table_oid_nonces_name = 'oid_nonces' self._initDB() def _initDB(self): if self.table_oid_associations_name not in self.database: self.database.define_table(self.table_oid_associations_name, Field('server_url', 'string', length=2047, required=True), Field('handle', 'string', length=255, required=True), Field('secret', 'blob', required=True), Field('issued', 'integer', required=True), Field('lifetime', 'integer', required=True), Field('assoc_type', 'string', length=64, required=True) ) if self.table_oid_nonces_name not in self.database: self.database.define_table(self.table_oid_nonces_name, Field('server_url', 'string', length=2047, required=True), Field('timestamp', 'integer', required=True), Field('salt', 'string', length=40, required=True) ) def storeAssociation(self, server_url, association): """ Store associations. If there already is one with the same server_url and handle in the table replace it. """ db = self.database query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == association.handle) db(query).delete() db.oid_associations.insert(server_url = server_url, handle = association.handle, secret = association.secret, issued = association.issued, lifetime = association.lifetime, assoc_type = association.assoc_type), 'insert '*10 def getAssociation(self, server_url, handle=None): """ Return the association for server_url and handle. If handle is not None return the latests associations for that server_url. Return None if no association can be found. """ db = self.database query = (db.oid_associations.server_url == server_url) if handle: query &= (db.oid_associations.handle == handle) rows = db(query).select(orderby=db.oid_associations.issued) keep_assoc, _ = self._removeExpiredAssocations(rows) if len(keep_assoc) == 0: return None else: assoc = keep_assoc.pop() # pop the last one as it should be the latest one return Association(assoc['handle'], assoc['secret'], assoc['issued'], assoc['lifetime'], assoc['assoc_type']) def removeAssociation(self, server_url, handle): db = self.database query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == handle) return db(query).delete() != None def useNonce(self, server_url, timestamp, salt): """ This method returns Falase if a nonce has been used before or its timestamp is not current. """ db = self.database if abs(timestamp - time.time()) > nonce.SKEW: return False query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.timestamp == timestamp) & (db.oid_nonces.salt == salt) if db(query).count() > 0: return False else: db.oid_nonces.insert(server_url = server_url, timestamp = timestamp, salt = salt) return True def _removeExpiredAssocations(self, rows): """ This helper function is not part of the interface. Given a list of association rows it checks which associations have expired and deletes them from the db. It returns a tuple of the form ([valid_assoc], no_of_expired_assoc_deleted). """ db = self.database keep_assoc = [] remove_assoc = [] t1970 = time.time() for r in rows: if r['issued'] + r['lifetime'] < t1970: remove_assoc.append(r) else: keep_assoc.append(r) for r in remove_assoc: del db.oid_associations[r['id']] return (keep_assoc, len(remove_assoc)) # return tuple (list of valid associations, number of deleted associations) def cleanupNonces(self): """ Remove expired nonce entries from DB and return the number of entries deleted. """ db = self.database query = (db.oid_nonces.timestamp < time.time() - nonce.SKEW) return db(query).delete() def cleanupAssociations(self): """ Remove expired associations from db and return the number of entries deleted. """ db = self.database query = (db.oid_associations.id > 0) return self._removeExpiredAssocations(db(query).select())[1] #return number of assoc removed def cleanup(self): """ This method should be run periodically to free the db from expired nonce and association entries. """ return self.cleanupNonces(), self.cleanupAssociations()
Python
#!/usr/bin/env python # coding: utf8 """ ExtendedLoginForm is used to extend normal login form in web2py with one more login method. So user can choose the built-in login or extended login methods. """ from gluon.html import DIV class ExtendedLoginForm(object): """ Put extended_login_form under web2py/gluon/contrib/login_methods folder. Then inside your model where defines the auth: auth = Auth(globals(),db) # authentication/authorization ... auth.define_tables() # You might like to put the code after auth.define_tables ... # if the alt_login_form deals with tables of auth. alt_login_form = RPXAccount(request, api_key="...", domain="...", url = "http://localhost:8000/%s/default/user/login" % request.application) extended_login_form = ExtendedLoginForm(auth, alt_login_form, signals=['token']) auth.settings.login_form = extended_login_form Note: Since rpx_account doesn't create the password for the user, you might need to provide a way for user to create password to do normal login. """ def __init__(self, auth, alt_login_form, signals=[], login_arg = 'login' ): self.auth = auth self.alt_login_form = alt_login_form self.signals = signals self.login_arg = login_arg def get_user(self): """ Delegate the get_user to alt_login_form.get_user. """ if hasattr(self.alt_login_form, 'get_user'): return self.alt_login_form.get_user() return None # let gluon.tools.Auth.get_or_create_user do the rest def login_url(self, next): """ Optional implement for alt_login_form. In normal case, this should be replaced by get_user, and never get called. """ if hasattr(self.alt_login_form, 'login_url'): return self.alt_login_form.login_url(next) return self.auth.settings.login_url def logout_url(self, next): """ Optional implement for alt_login_form. Called if bool(alt_login_form.get_user) is True. If alt_login_form implemented logout_url function, it will return that function call. """ if hasattr(self.alt_login_form, 'logout_url'): return self.alt_login_form.logout_url(next) return next def login_form(self): """ Combine the auth() form with alt_login_form. If signals are set and a parameter in request matches any signals, it will return the call of alt_login_form.login_form instead. So alt_login_form can handle some particular situations, for example, multiple steps of OpenID login inside alt_login_form.login_form. Otherwise it will render the normal login form combined with alt_login_form.login_form. """ request = self.auth.environment.request args = request.args if (self.signals and any([True for signal in self.signals if request.vars.has_key(signal)]) ): return self.alt_login_form.login_form() self.auth.settings.login_form = self.auth form = DIV(self.auth()) self.auth.settings.login_form = self form.components.append(self.alt_login_form.login_form()) return form
Python
import urllib import urllib2 import base64 def basic_auth(server="http://127.0.0.1"): """ to use basic login with a different server from gluon.contrib.login_methods.basic_auth import basic_auth auth.settings.login_methods.append(basic_auth('http://server')) """ def basic_login_aux(username, password, server=server): key = base64.b64encode(username+':'+password) headers = {'Authorization': 'Basic ' + key} request = urllib2.Request(server, None, headers) try: urllib2.urlopen(request) return True except (urllib2.URLError, urllib2.HTTPError): return False return basic_login_aux
Python
from gluon.contrib.pam import authenticate def pam_auth(): """ to use pam_login: from gluon.contrib.login_methods.pam_auth import pam_auth auth.settings.login_methods.append(pam_auth()) """ def pam_auth_aux(username, password): return authenticate(username, password) return pam_auth_aux
Python
import smtplib def email_auth(server="smtp.gmail.com:587", domain="@gmail.com"): """ to use email_login: from gluon.contrib.login_methods.email_auth import email_auth auth.settings.login_methods.append(email_auth("smtp.gmail.com:587", "@gmail.com")) """ def email_auth_aux(email, password, server=server, domain=domain): if domain: if not isinstance(domain,(list,tuple)): domain=[str(domain)] if not [d for d in domain if email[-len(d):]==d]: return False (host, port) = server.split(':') try: server = None server = smtplib.SMTP(host, port) server.ehlo() server.starttls() server.ehlo() server.login(email, password) server.quit() return True except: if server: server.quit() return False return email_auth_aux
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>. License: GPL v2 Tinkered by Szabolcs Gyuris < szimszo n @ o regpreshaz dot eu> """ from gluon import current, redirect class CasAuth( object ): """ Login will be done via Web2py's CAS application, instead of web2py's login form. Include in your model (eg db.py):: from gluon.contrib.login_methods.cas_auth import CasAuth auth.define_tables(username=True) auth.settings.login_form=CasAuth( urlbase = "https://[your CAS provider]/app/default/user/cas", actions=['login','validate','logout']) where urlbase is the actual CAS server url without the login,logout... Enjoy. ###UPDATE### if you want to connect to a CAS version 2 JASIG Server use this: auth.settings.login_form=CasAuth( urlbase = "https://[Your CAS server]/cas", actions = ['login','serviceValidate','logout'], casversion = 2, casusername = "cas:user") where casusername is the xml node returned by CAS server which contains user's username. """ def __init__(self, g=None, ### g for backward compatibility ### urlbase = "https://web2py.com/cas/cas", actions=['login','check','logout'], maps=dict(username=lambda v:v.get('username',v['user']), email=lambda v:v.get('email',None), user_id=lambda v:v['user']), casversion = 1, casusername = 'cas:user' ): self.urlbase=urlbase self.cas_login_url="%s/%s"%(self.urlbase,actions[0]) self.cas_check_url="%s/%s"%(self.urlbase,actions[1]) self.cas_logout_url="%s/%s"%(self.urlbase,actions[2]) self.maps=maps self.casversion = casversion self.casusername = casusername http_host=current.request.env.http_x_forwarded_host if not http_host: http_host=current.request.env.http_host if current.request.env.wsgi_url_scheme in [ 'https', 'HTTPS' ]: scheme = 'https' else: scheme = 'http' self.cas_my_url='%s://%s%s'%( scheme, http_host, current.request.env.path_info ) def login_url( self, next = "/" ): current.session.token=self._CAS_login() return next def logout_url( self, next = "/" ): current.session.token=None current.session.auth=None self._CAS_logout() return next def get_user( self ): user=current.session.token if user: d = {'source':'web2py cas'} for key in self.maps: d[key]=self.maps[key](user) return d return None def _CAS_login( self ): """ exposed as CAS.login(request) returns a token on success, None on failed authentication """ import urllib self.ticket=current.request.vars.ticket if not current.request.vars.ticket: redirect( "%s?service=%s"% (self.cas_login_url, self.cas_my_url)) else: url="%s?service=%s&ticket=%s" % (self.cas_check_url, self.cas_my_url, self.ticket ) data=urllib.urlopen( url ).read() if data.startswith('yes') or data.startswith('no'): data = data.split('\n') if data[0]=='yes': a,b,c = data[1].split( ':' )+[None,None] return dict(user=a,email=b,username=c) return None import xml.dom.minidom as dom import xml.parsers.expat as expat try: dxml=dom.parseString(data) envelop = dxml.getElementsByTagName("cas:authenticationSuccess") if len(envelop)>0: res = dict() for x in envelop[0].childNodes: if x.nodeName.startswith('cas:') and len(x.childNodes): key = x.nodeName[4:].encode('utf8') value = x.childNodes[0].nodeValue.encode('utf8') if not key in res: res[key]=value else: if not isinstance(res[key],list): res[key]=[res[key]] res[key].append(value) return res except expat.ExpatError: pass return None # fallback def _CAS_logout( self ): """ exposed CAS.logout() redirects to the CAS logout page """ import urllib redirect("%s?service=%s" % (self.cas_logout_url,self.cas_my_url))
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Loginza.ru authentication for web2py Developed by Vladimir Dronnikov (Copyright © 2011) Email <dronnikov@gmail.com> """ import urllib from gluon.html import * from gluon.tools import fetch from gluon.storage import Storage import gluon.contrib.simplejson as json class Loginza(object): """ from gluon.contrib.login_methods.loginza import Loginza auth.settings.login_form = Loginza(request, url = "http://localhost:8000/%s/default/user/login" % request.application) """ def __init__(self, request, url = "", embed = True, auth_url = "http://loginza.ru/api/authinfo", language = "en", prompt = "loginza", on_login_failure = None, ): self.request = request self.token_url = url self.embed = embed self.auth_url = auth_url self.language = language self.prompt = prompt self.profile = None self.on_login_failure = on_login_failure self.mappings = Storage() # TODO: profile.photo is the URL to the picture # Howto download and store it locally? # FIXME: what if email is unique=True self.mappings["http://twitter.com/"] = lambda profile:\ dict(registration_id = profile.get("identity",""), username = profile.get("nickname",""), email = profile.get("email",""), last_name = profile.get("name","").get("full_name",""), #avatar = profile.get("photo",""), ) self.mappings["https://www.google.com/accounts/o8/ud"] = lambda profile:\ dict(registration_id = profile.get("identity",""), username = profile.get("name","").get("full_name",""), email = profile.get("email",""), first_name = profile.get("name","").get("first_name",""), last_name = profile.get("name","").get("last_name",""), #avatar = profile.get("photo",""), ) self.mappings["http://vkontakte.ru/"] = lambda profile:\ dict(registration_id=profile.get("identity",""), username = profile.get("name","").get("full_name",""), email = profile.get("email",""), first_name = profile.get("name","").get("first_name",""), last_name = profile.get("name","").get("last_name",""), #avatar = profile.get("photo",""), ) self.mappings.default = lambda profile:\ dict(registration_id = profile.get("identity",""), username = profile.get("name","").get("full_name"), email = profile.get("email",""), first_name = profile.get("name","").get("first_name",""), last_name = profile.get("name","").get("last_name",""), #avatar = profile.get("photo",""), ) def get_user(self): request = self.request if request.vars.token: user = Storage() data = urllib.urlencode(dict(token = request.vars.token)) auth_info_json = fetch(self.auth_url+'?'+data) #print auth_info_json auth_info = json.loads(auth_info_json) if auth_info["identity"] != None: self.profile = auth_info provider = self.profile["provider"] user = self.mappings.get(provider, self.mappings.default)(self.profile) #user["password"] = ??? #user["avatar"] = ??? return user elif self.on_login_failure: redirect(self.on_login_failure) return None def login_form(self): request = self.request args = request.args LOGINZA_URL = "https://loginza.ru/api/widget?lang=%s&token_url=%s&overlay=loginza" if self.embed: form = IFRAME(_src=LOGINZA_URL % (self.language, self.token_url), _scrolling="no", _frameborder="no", _style="width:359px;height:300px;") else: form = DIV(A(self.prompt, _href=LOGINZA_URL % (self.language, self.token_url), _class="loginza"), SCRIPT(_src="https://s3-eu-west-1.amazonaws.com/s1.loginza.ru/js/widget.js", _type="text/javascript")) return form
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Written by Michele Comitini <mcm@glisco.it> License: GPL v3 Adds support for OAuth 2.0 authentication to web2py. OAuth 2.0 Draft: http://tools.ietf.org/html/draft-ietf-oauth-v2-10 """ import time import cgi from urllib2 import urlopen import urllib2 from urllib import urlencode class OAuthAccount(object): """ Login will be done via OAuth Framework, instead of web2py's login form. Include in your model (eg db.py):: # define the auth_table before call to auth.define_tables() auth_table = db.define_table( auth.settings.table_user_name, Field('first_name', length=128, default=""), Field('last_name', length=128, default=""), Field('username', length=128, default="", unique=True), Field('password', 'password', length=256, readable=False, label='Password'), Field('registration_key', length=128, default= "", writable=False, readable=False)) auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username) . . . auth.define_tables() . . . CLIENT_ID=\"<put your fb application id here>\" CLIENT_SECRET=\"<put your fb application secret here>\" AUTH_URL="http://..." TOKEN_URL="http://..." from gluon.contrib.login_methods.oauth20_account import OAuthAccount auth.settings.login_form=OAuthAccount(globals(),CLIENT_ID,CLIENT_SECRET,AUTH_URL, TOKEN_URL, **args ) Any optional arg will be passed as is to remote server for requests. It can be used for the optional "scope" parameters for Facebook. """ def __redirect_uri(self, next=None): """Build the uri used by the authenticating server to redirect the client back to the page originating the auth request. Appends the _next action to the generated url so the flows continues. """ r = self.request http_host=r.env.http_x_forwarded_for if not http_host: http_host=r.env.http_host url_scheme = r.env.wsgi_url_scheme if next: path_info = next else: path_info = r.env.path_info uri = '%s://%s%s' %(url_scheme, http_host, path_info) if r.get_vars and not next: uri += '?' + urlencode(r.get_vars) return uri def __build_url_opener(self, uri): """Build the url opener for managing HTTP Basic Athentication""" # Create an OpenerDirector with support for Basic HTTP Authentication... auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(None, uri, self.client_id, self.client_secret) opener = urllib2.build_opener(auth_handler) return opener def accessToken(self): """Return the access token generated by the authenticating server. If token is already in the session that one will be used. Otherwise the token is fetched from the auth server. """ if self.session.token and self.session.token.has_key('expires'): expires = self.session.token['expires'] # reuse token until expiration if expires == 0 or expires > time.time(): return self.session.token['access_token'] if self.session.code: data = dict(client_id=self.client_id, client_secret=self.client_secret, redirect_uri=self.session.redirect_uri, response_type='token', code=self.session.code) if self.args: data.update(self.args) open_url = None opener = self.__build_url_opener(self.token_url) try: open_url = opener.open(self.token_url, urlencode(data)) except urllib2.HTTPError, e: raise Exception(e.read()) finally: del self.session.code # throw it away if open_url: try: tokendata = cgi.parse_qs(open_url.read()) self.session.token = dict([(k,v[-1]) for k,v in tokendata.items()]) # set expiration absolute time try to avoid broken # implementations where "expires_in" becomes "expires" if self.session.token.has_key('expires_in'): exps = 'expires_in' else: exps = 'expires' self.session.token['expires'] = int(self.session.token[exps]) + \ time.time() finally: opener.close() return self.session.token['access_token'] self.session.token = None return None def __init__(self, g, client_id, client_secret, auth_url, token_url, **args): self.globals = g self.client_id = client_id self.client_secret = client_secret self.request = g['request'] self.session = g['session'] self.auth_url = auth_url self.token_url = token_url self.args = args def login_url(self, next="/"): self.__oauth_login(next) return next def logout_url(self, next="/"): del self.session.token return next def get_user(self): '''Returns the user using the Graph API. ''' raise NotImplementedError, "Must override get_user()" if not self.accessToken(): return None if not self.graph: self.graph = GraphAPI((self.accessToken())) user = None try: user = self.graph.get_object("me") except GraphAPIError: self.session.token = None self.graph = None if user: return dict(first_name = user['first_name'], last_name = user['last_name'], username = user['id']) def __oauth_login(self, next): '''This method redirects the user to the authenticating form on authentication server if the authentication code and the authentication token are not available to the application yet. Once the authentication code has been received this method is called to set the access token into the session by calling accessToken() ''' if not self.accessToken(): if not self.request.vars.code: self.session.redirect_uri=self.__redirect_uri(next) data = dict(redirect_uri=self.session.redirect_uri, response_type='code', client_id=self.client_id) if self.args: data.update(self.args) auth_request_url = self.auth_url + "?" +urlencode(data) HTTP = self.globals['HTTP'] raise HTTP(307, "You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>", Location=auth_request_url) else: self.session.code = self.request.vars.code self.accessToken() return self.session.code return None
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Written by Michele Comitini <mcm@glisco.it> License: GPL v3 Adds support for OAuth1.0a authentication to web2py. Dependencies: - python-oauth2 (http://github.com/simplegeo/python-oauth2) """ import oauth2 as oauth import cgi from urllib2 import urlopen import urllib2 from urllib import urlencode class OAuthAccount(object): """ Login will be done via OAuth Framework, instead of web2py's login form. Include in your model (eg db.py):: # define the auth_table before call to auth.define_tables() auth_table = db.define_table( auth.settings.table_user_name, Field('first_name', length=128, default=""), Field('last_name', length=128, default=""), Field('username', length=128, default="", unique=True), Field('password', 'password', length=256, readable=False, label='Password'), Field('registration_key', length=128, default= "", writable=False, readable=False)) auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username) . . . auth.define_tables() . . . CLIENT_ID=\"<put your fb application id here>\" CLIENT_SECRET=\"<put your fb application secret here>\" AUTH_URL="..." TOKEN_URL="..." ACCESS_TOKEN_URL="..." from gluon.contrib.login_methods.oauth10a_account import OAuthAccount auth.settings.login_form=OAuthAccount(globals(),CLIENT_ID,CLIENT_SECRET, AUTH_URL, TOKEN_URL, ACCESS_TOKEN_URL) """ def __redirect_uri(self, next=None): """Build the uri used by the authenticating server to redirect the client back to the page originating the auth request. Appends the _next action to the generated url so the flows continues. """ r = self.request http_host=r.env.http_x_forwarded_for if not http_host: http_host=r.env.http_host url_scheme = r.env.wsgi_url_scheme if next: path_info = next else: path_info = r.env.path_info uri = '%s://%s%s' %(url_scheme, http_host, path_info) if r.get_vars and not next: uri += '?' + urlencode(r.get_vars) return uri def accessToken(self): """Return the access token generated by the authenticating server. If token is already in the session that one will be used. Otherwise the token is fetched from the auth server. """ if self.session.access_token: # return the token (TODO: does it expire?) return self.session.access_token if self.session.request_token: # Exchange the request token with an authorization token. token = self.session.request_token self.session.request_token = None # Build an authorized client # OAuth1.0a put the verifier! token.set_verifier(self.request.vars.oauth_verifier) client = oauth.Client(self.consumer, token) resp, content = client.request(self.access_token_url, "POST") if str(resp['status']) != '200': self.session.request_token = None self.globals['redirect'](self.globals['URL'](f='user',args='logout')) self.session.access_token = oauth.Token.from_string(content) return self.session.access_token self.session.access_token = None return None def __init__(self, g, client_id, client_secret, auth_url, token_url, access_token_url): self.globals = g self.client_id = client_id self.client_secret = client_secret self.code = None self.request = g['request'] self.session = g['session'] self.auth_url = auth_url self.token_url = token_url self.access_token_url = access_token_url # consumer init self.consumer = oauth.Consumer(self.client_id, self.client_secret) def login_url(self, next="/"): self.__oauth_login(next) return next def logout_url(self, next="/"): self.session.request_token = None self.session.access_token = None return next def get_user(self): '''Get user data. Since OAuth does not specify what a user is, this function must be implemented for the specific provider. ''' raise NotImplementedError, "Must override get_user()" def __oauth_login(self, next): '''This method redirects the user to the authenticating form on authentication server if the authentication code and the authentication token are not available to the application yet. Once the authentication code has been received this method is called to set the access token into the session by calling accessToken() ''' if not self.accessToken(): # setup the client client = oauth.Client(self.consumer, None) # Get a request token. # oauth_callback *is REQUIRED* for OAuth1.0a # putting it in the body seems to work. callback_url = self.__redirect_uri(next) data = urlencode(dict(oauth_callback=callback_url)) resp, content = client.request(self.token_url, "POST", body=data) if resp['status'] != '200': self.session.request_token = None self.globals['redirect'](self.globals['URL'](f='user',args='logout')) # Store the request token in session. request_token = self.session.request_token = oauth.Token.from_string(content) # Redirect the user to the authentication URL and pass the callback url. data = urlencode(dict(oauth_token=request_token.key, oauth_callback=callback_url)) auth_request_url = self.auth_url + '?' +data HTTP = self.globals['HTTP'] raise HTTP(307, "You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>", Location=auth_request_url) return None
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and Robin B <robi123@gmail.com>. License: GPL v2 """ __all__ = ['MEMDB', 'Field'] import re import sys import os import types import datetime import thread import cStringIO import csv import copy import gluon.validators as validators from gluon.storage import Storage import random SQL_DIALECTS = {'memcache': { 'boolean': bool, 'string': unicode, 'text': unicode, 'password': unicode, 'blob': unicode, 'upload': unicode, 'integer': long, 'double': float, 'date': datetime.date, 'time': datetime.time, 'datetime': datetime.datetime, 'id': int, 'reference': int, 'lower': None, 'upper': None, 'is null': 'IS NULL', 'is not null': 'IS NOT NULL', 'extract': None, 'left join': None, }} def cleanup(text): if re.compile('[^0-9a-zA-Z_]').findall(text): raise SyntaxError, \ 'Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text return text def assert_filter_fields(*fields): for field in fields: if isinstance(field, (Field, Expression)) and field.type\ in ['text', 'blob']: raise SyntaxError, 'AppEngine does not index by: %s'\ % field.type def dateobj_to_datetime(object): # convert dates,times to datetimes for AppEngine if isinstance(object, datetime.date): object = datetime.datetime(object.year, object.month, object.day) if isinstance(object, datetime.time): object = datetime.datetime( 1970, 1, 1, object.hour, object.minute, object.second, object.microsecond, ) return object def sqlhtml_validators(field_type, length): v = { 'boolean': [], 'string': validators.IS_LENGTH(length), 'text': [], 'password': validators.IS_LENGTH(length), 'blob': [], 'upload': [], 'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100), 'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100), 'date': validators.IS_DATE(), 'time': validators.IS_TIME(), 'datetime': validators.IS_DATETIME(), 'reference': validators.IS_INT_IN_RANGE(0, 1e100), } try: return v[field_type[:9]] except KeyError: return [] class DALStorage(dict): """ a dictionary that let you do d['a'] as well as d.a """ def __getattr__(self, key): return self[key] def __setattr__(self, key, value): if key in self: raise SyntaxError, 'Object \'%s\'exists and cannot be redefined' % key self[key] = value def __repr__(self): return '<DALStorage ' + dict.__repr__(self) + '>' class SQLCallableList(list): def __call__(self): return copy.copy(self) class MEMDB(DALStorage): """ an instance of this class represents a database connection Example:: db=MEMDB(Client()) db.define_table('tablename',Field('fieldname1'), Field('fieldname2')) """ def __init__(self, client): self._dbname = 'memdb' self['_lastsql'] = '' self.tables = SQLCallableList() self._translator = SQL_DIALECTS['memcache'] self.client = client def define_table( self, tablename, *fields, **args ): tablename = cleanup(tablename) if tablename in dir(self) or tablename[0] == '_': raise SyntaxError, 'invalid table name: %s' % tablename if not tablename in self.tables: self.tables.append(tablename) else: raise SyntaxError, 'table already defined: %s' % tablename t = self[tablename] = Table(self, tablename, *fields) t._create() return t def __call__(self, where=''): return Set(self, where) class SQLALL(object): def __init__(self, table): self.table = table class Table(DALStorage): """ an instance of this class represents a database table Example:: db=MEMDB(Client()) db.define_table('users',Field('name')) db.users.insert(name='me') """ def __init__( self, db, tablename, *fields ): self._db = db self._tablename = tablename self.fields = SQLCallableList() self._referenced_by = [] fields = list(fields) fields.insert(0, Field('id', 'id')) for field in fields: self.fields.append(field.name) self[field.name] = field field._tablename = self._tablename field._table = self field._db = self._db self.ALL = SQLALL(self) def _create(self): fields = [] myfields = {} for k in self.fields: field = self[k] attr = {} if not field.type[:9] in ['id', 'reference']: if field.notnull: attr = dict(required=True) if field.type[:2] == 'id': continue if field.type[:9] == 'reference': referenced = field.type[10:].strip() if not referenced: raise SyntaxError, \ 'Table %s: reference \'%s\' to nothing!' % (self._tablename, k) if not referenced in self._db: raise SyntaxError, \ 'Table: table %s does not exist' % referenced referee = self._db[referenced] ftype = \ self._db._translator[field.type[:9]]( self._db[referenced]._tableobj) if self._tablename in referee.fields: # ## THIS IS OK raise SyntaxError, \ 'Field: table \'%s\' has same name as a field ' \ 'in referenced table \'%s\'' % (self._tablename, referenced) self._db[referenced]._referenced_by.append((self._tablename, field.name)) elif not field.type in self._db._translator\ or not self._db._translator[field.type]: raise SyntaxError, 'Field: unkown field type %s' % field.type self._tableobj = self._db.client return None def create(self): # nothing to do, here for backward compatility pass def drop(self): # nothing to do, here for backward compatibility self._db(self.id > 0).delete() def insert(self, **fields): id = self._create_id() if self.update(id, **fields): return long(id) else: return None def get(self, id): val = self._tableobj.get(self._id_to_key(id)) if val: return Storage(val) else: return None def update(self, id, **fields): for field in fields: if not field in fields and self[field].default\ != None: fields[field] = self[field].default if field in fields: fields[field] = obj_represent(fields[field], self[field].type, self._db) return self._tableobj.set(self._id_to_key(id), fields) def delete(self, id): return self._tableobj.delete(self._id_to_key(id)) def _shard_key(self, shard): return self._id_to_key('s/%s' % shard) def _id_to_key(self, id): return '__memdb__/t/%s/k/%s' % (self._tablename, str(id)) def _create_id(self): shard = random.randint(10, 99) shard_id = self._shard_key(shard) id = self._tableobj.incr(shard_id) if not id: if self._tableobj.set(shard_id, '0'): id = 0 else: raise Exception, 'cannot set memcache' return long(str(shard) + str(id)) def __str__(self): return self._tablename class Expression(object): def __init__( self, name, type='string', db=None, ): (self.name, self.type, self._db) = (name, type, db) def __str__(self): return self.name def __or__(self, other): # for use in sortby assert_filter_fields(self, other) return Expression(self.name + '|' + other.name, None, None) def __invert__(self): assert_filter_fields(self) return Expression('-' + self.name, self.type, None) # for use in Query def __eq__(self, value): return Query(self, '=', value) def __ne__(self, value): return Query(self, '!=', value) def __lt__(self, value): return Query(self, '<', value) def __le__(self, value): return Query(self, '<=', value) def __gt__(self, value): return Query(self, '>', value) def __ge__(self, value): return Query(self, '>=', value) # def like(self,value): return Query(self,' LIKE ',value) # def belongs(self,value): return Query(self,' IN ',value) # for use in both Query and sortby def __add__(self, other): return Expression('%s+%s' % (self, other), 'float', None) def __sub__(self, other): return Expression('%s-%s' % (self, other), 'float', None) def __mul__(self, other): return Expression('%s*%s' % (self, other), 'float', None) def __div__(self, other): return Expression('%s/%s' % (self, other), 'float', None) class Field(Expression): """ an instance of this class represents a database field example:: a = Field(name, 'string', length=32, required=False, default=None, requires=IS_NOT_EMPTY(), notnull=False, unique=False, uploadfield=True) to be used as argument of GQLDB.define_table allowed field types: string, boolean, integer, double, text, blob, date, time, datetime, upload, password strings must have a length or 512 by default. fields should have a default or they will be required in SQLFORMs the requires argument are used to validate the field input in SQLFORMs """ def __init__( self, fieldname, type='string', length=None, default=None, required=False, requires=sqlhtml_validators, ondelete='CASCADE', notnull=False, unique=False, uploadfield=True, ): self.name = cleanup(fieldname) if fieldname in dir(Table) or fieldname[0] == '_': raise SyntaxError, 'Field: invalid field name: %s' % fieldname if isinstance(type, Table): type = 'reference ' + type._tablename if not length: length = 512 self.type = type # 'string', 'integer' self.length = length # the length of the string self.default = default # default value for field self.required = required # is this field required self.ondelete = ondelete.upper() # this is for reference fields only self.notnull = notnull self.unique = unique self.uploadfield = uploadfield if requires == sqlhtml_validators: requires = sqlhtml_validators(type, length) elif requires is None: requires = [] self.requires = requires # list of validators def formatter(self, value): if value is None or not self.requires: return value if not isinstance(self.requires, (list, tuple)): requires = [self.requires] else: requires = copy.copy(self.requires) requires.reverse() for item in requires: if hasattr(item, 'formatter'): value = item.formatter(value) return value def __str__(self): return '%s.%s' % (self._tablename, self.name) MEMDB.Field = Field # ## required by gluon/globals.py session.connect def obj_represent(object, fieldtype, db): if object != None: if fieldtype == 'date' and not isinstance(object, datetime.date): (y, m, d) = [int(x) for x in str(object).strip().split('-')] object = datetime.date(y, m, d) elif fieldtype == 'time' and not isinstance(object, datetime.time): time_items = [int(x) for x in str(object).strip().split(':')[:3]] if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] object = datetime.time(h, mi, s) elif fieldtype == 'datetime' and not isinstance(object, datetime.datetime): (y, m, d) = [int(x) for x in str(object)[:10].strip().split('-')] time_items = [int(x) for x in str(object)[11:].strip().split(':')[:3]] if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] object = datetime.datetime( y, m, d, h, mi, s, ) elif fieldtype == 'integer' and not isinstance(object, long): object = long(object) return object class QueryException: def __init__(self, **a): self.__dict__ = a class Query(object): """ A query object necessary to define a set. It can be stored or can be passed to GQLDB.__call__() to obtain a Set Example: query=db.users.name=='Max' set=db(query) records=set.select() """ def __init__( self, left, op=None, right=None, ): if isinstance(right, (Field, Expression)): raise SyntaxError, \ 'Query: right side of filter must be a value or entity' if isinstance(left, Field) and left.name == 'id': if op == '=': self.get_one = \ QueryException(tablename=left._tablename, id=long(right)) return else: raise SyntaxError, 'only equality by id is supported' raise SyntaxError, 'not supported' def __str__(self): return str(self.left) class Set(object): """ As Set represents a set of records in the database, the records are identified by the where=Query(...) object. normally the Set is generated by GQLDB.__call__(Query(...)) given a set, for example set=db(db.users.name=='Max') you can: set.update(db.users.name='Massimo') set.delete() # all elements in the set set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10)) and take subsets: subset=set(db.users.id<5) """ def __init__(self, db, where=None): self._db = db self._tables = [] self.filters = [] if hasattr(where, 'get_all'): self.where = where self._tables.insert(0, where.get_all) elif hasattr(where, 'get_one') and isinstance(where.get_one, QueryException): self.where = where.get_one else: # find out which tables are involved if isinstance(where, Query): self.filters = where.left self.where = where self._tables = [field._tablename for (field, op, val) in self.filters] def __call__(self, where): if isinstance(self.where, QueryException) or isinstance(where, QueryException): raise SyntaxError, \ 'neither self.where nor where can be a QueryException instance' if self.where: return Set(self._db, self.where & where) else: return Set(self._db, where) def _get_table_or_raise(self): tablenames = list(set(self._tables)) # unique if len(tablenames) < 1: raise SyntaxError, 'Set: no tables selected' if len(tablenames) > 1: raise SyntaxError, 'Set: no join in appengine' return self._db[tablenames[0]]._tableobj def _getitem_exception(self): (tablename, id) = (self.where.tablename, self.where.id) fields = self._db[tablename].fields self.colnames = ['%s.%s' % (tablename, t) for t in fields] item = self._db[tablename].get(id) return (item, fields, tablename, id) def _select_except(self): (item, fields, tablename, id) = self._getitem_exception() if not item: return [] new_item = [] for t in fields: if t == 'id': new_item.append(long(id)) else: new_item.append(getattr(item, t)) r = [new_item] return Rows(self._db, r, *self.colnames) def select(self, *fields, **attributes): """ Always returns a Rows object, even if it may be empty """ if isinstance(self.where, QueryException): return self._select_except() else: raise SyntaxError, 'select arguments not supported' def count(self): return len(self.select()) def delete(self): if isinstance(self.where, QueryException): (item, fields, tablename, id) = self._getitem_exception() if not item: return self._db[tablename].delete(id) else: raise Exception, 'deletion not implemented' def update(self, **update_fields): if isinstance(self.where, QueryException): (item, fields, tablename, id) = self._getitem_exception() if not item: return for (key, value) in update_fields.items(): setattr(item, key, value) self._db[tablename].update(id, **item) else: raise Exception, 'update not implemented' def update_record( t, s, id, a, ): item = s.get(id) for (key, value) in a.items(): t[key] = value setattr(item, key, value) s.update(id, **item) class Rows(object): """ A wrapper for the return value of a select. It basically represents a table. It has an iterator and each row is represented as a dictionary. """ # ## this class still needs some work to care for ID/OID def __init__( self, db, response, *colnames ): self._db = db self.colnames = colnames self.response = response def __len__(self): return len(self.response) def __getitem__(self, i): if i >= len(self.response) or i < 0: raise SyntaxError, 'Rows: no such row: %i' % i if len(self.response[0]) != len(self.colnames): raise SyntaxError, 'Rows: internal error' row = DALStorage() for j in xrange(len(self.colnames)): value = self.response[i][j] if isinstance(value, unicode): value = value.encode('utf-8') packed = self.colnames[j].split('.') try: (tablename, fieldname) = packed except: if not '_extra' in row: row['_extra'] = DALStorage() row['_extra'][self.colnames[j]] = value continue table = self._db[tablename] field = table[fieldname] if not tablename in row: row[tablename] = DALStorage() if field.type[:9] == 'reference': referee = field.type[10:].strip() rid = value row[tablename][fieldname] = rid elif field.type == 'boolean' and value != None: # row[tablename][fieldname]=Set(self._db[referee].id==rid) if value == True or value == 'T': row[tablename][fieldname] = True else: row[tablename][fieldname] = False elif field.type == 'date' and value != None\ and not isinstance(value, datetime.date): (y, m, d) = [int(x) for x in str(value).strip().split('-')] row[tablename][fieldname] = datetime.date(y, m, d) elif field.type == 'time' and value != None\ and not isinstance(value, datetime.time): time_items = [int(x) for x in str(value).strip().split(':')[:3]] if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] row[tablename][fieldname] = datetime.time(h, mi, s) elif field.type == 'datetime' and value != None\ and not isinstance(value, datetime.datetime): (y, m, d) = [int(x) for x in str(value)[:10].strip().split('-')] time_items = [int(x) for x in str(value)[11:].strip().split(':')[:3]] if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] row[tablename][fieldname] = datetime.datetime( y, m, d, h, mi, s, ) else: row[tablename][fieldname] = value if fieldname == 'id': id = row[tablename].id row[tablename].update_record = lambda t = row[tablename], \ s = self._db[tablename], id = id, **a: update_record(t, s, id, a) for (referee_table, referee_name) in \ table._referenced_by: s = self._db[referee_table][referee_name] row[tablename][referee_table] = Set(self._db, s == id) if len(row.keys()) == 1: return row[row.keys()[0]] return row def __iter__(self): """ iterator over records """ for i in xrange(len(self)): yield self[i] def __str__(self): """ serializes the table into a csv file """ s = cStringIO.StringIO() writer = csv.writer(s) writer.writerow(self.colnames) c = len(self.colnames) for i in xrange(len(self)): row = [self.response[i][j] for j in xrange(c)] for k in xrange(c): if isinstance(row[k], unicode): row[k] = row[k].encode('utf-8') writer.writerow(row) return s.getvalue() def xml(self): """ serializes the table using sqlhtml.SQLTABLE (if present) """ return sqlhtml.SQLTABLE(self).xml() def test_all(): """ How to run from web2py dir: export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH python gluon/contrib/memdb.py Setup the UTC timezone and database stubs >>> import os >>> os.environ['TZ'] = 'UTC' >>> import time >>> if hasattr(time, 'tzset'): ... time.tzset() >>> >>> from google.appengine.api import apiproxy_stub_map >>> from google.appengine.api.memcache import memcache_stub >>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap() >>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub()) Create a table with all possible field types >>> from google.appengine.api.memcache import Client >>> db=MEMDB(Client()) >>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table') Insert a field >>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15)) >>> user_id != None True Select all # >>> all = db().select(db.users.ALL) Drop the table # >>> db.users.drop() Select many entities >>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime')) >>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow) >>> few = 5 >>> most = many - few >>> 0 < few < most < many True >>> for i in range(many): ... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i)) >>> # test timezones >>> class TZOffset(datetime.tzinfo): ... def __init__(self,offset=0): ... self.offset = offset ... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset) ... def dst(self, dt): return datetime.timedelta(0) ... def tzname(self, dt): return 'UTC' + str(self.offset) ... >>> SERVER_OFFSET = -8 >>> >>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201) >>> post_id = db.posts.insert(created_at=stamp,body='body1') >>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at >>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset()) >>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET)) >>> stamp == naive_stamp True >>> utc_stamp == server_stamp True >>> rows = db(db.posts.id==post_id).select() >>> len(rows) == 1 True >>> rows[0].body == 'body1' True >>> db(db.posts.id==post_id).delete() >>> rows = db(db.posts.id==post_id).select() >>> len(rows) == 0 True >>> id = db.posts.insert(total='0') # coerce str to integer >>> rows = db(db.posts.id==id).select() >>> len(rows) == 1 True >>> rows[0].total == 0 True Examples of insert, select, update, delete >>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table') >>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22') >>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21') >>> me=db(db.person.id==person_id).select()[0] # test select >>> me.name 'Massimo' >>> db(db.person.id==person_id).update(name='massimo') # test update >>> me = db(db.person.id==person_id).select()[0] >>> me.name 'massimo' >>> str(me.birth) '1971-12-21' # resave date to ensure it comes back the same >>> me=db(db.person.id==person_id).update(birth=me.birth) # test update >>> me = db(db.person.id==person_id).select()[0] >>> me.birth datetime.date(1971, 12, 21) >>> db(db.person.id==marco_id).delete() # test delete >>> len(db(db.person.id==marco_id).select()) 0 Update a single record >>> me.update_record(name=\"Max\") >>> me.name 'Max' >>> me = db(db.person.id == person_id).select()[0] >>> me.name 'Max' """ SQLField = Field SQLTable = Table SQLXorable = Expression SQLQuery = Query SQLSet = Set SQLRows = Rows SQLStorage = DALStorage if __name__ == '__main__': import doctest doctest.testmod()
Python
SMSCODES = { 'Aliant':'@chat.wirefree.ca', 'Alltel':'@message.alltel.com', 'Ameritech':'@paging.acswireless.com', 'AT&T':'@txt.att.net', 'AU by KDDI':'@ezweb.ne.jp', 'BeeLine GSM':'@sms.beemail.ru', 'Bell Mobility Canada':'@txt.bellmobility.ca', 'Bellsouth':'@bellsouth.cl', 'BellSouth Mobility':'@blsdcs.net', 'Blue Sky Frog':'@blueskyfrog.com', 'Boost':'@myboostmobile.com', 'Cellular South':'@csouth1.com', 'CellularOne':'@mobile.celloneusa.com', 'CellularOne West':'@mycellone.com', 'Cincinnati Bell':'@gocbw.com', 'Claro':'@clarotorpedo.com.br', 'Comviq':'@sms.comviq.se', 'Dutchtone/Orange-NL':'@sms.orange.nl', 'Edge Wireless':'@sms.edgewireless.com', 'EinsteinPCS / Airadigm Communications':'@einsteinsms.com', 'EPlus':'@smsmail.eplus.de', 'Fido Canada':'@fido.ca', 'Golden Telecom':'@sms.goldentele.com', 'Idea Cellular':'@ideacellular.net', 'Kyivstar':'@sms.kyivstar.net', 'LMT':'@sms.lmt.lv', 'Manitoba Telecom Systems':'@text.mtsmobility.com', 'Meteor':'@sms.mymeteor.ie', 'Metro PCS':'@mymetropcs.com', 'Metrocall Pager':'@page.metrocall.com', 'MobileOne':'@m1.com.sg', 'Mobilfone':'@page.mobilfone.com', 'Mobility Bermuda':'@ml.bm', 'Netcom':'@sms.netcom.no', 'Nextel':'@messaging.nextel.com', 'NPI Wireless':'@npiwireless.com', 'O2':'@o2.co.uk', 'O2 M-mail':'@mmail.co.uk', 'Optus':'@optusmobile.com.au', 'Orange':'@orange.net', 'Oskar':'@mujoskar.cz', 'Pagenet':'@pagenet.net', 'PCS Rogers':'@pcs.rogers.com', 'Personal Communication':'@pcom.ru', 'Plus GSM Poland':'@text.plusgsm.pl', 'Powertel':'@ptel.net', 'Primtel':'@sms.primtel.ru', 'PSC Wireless':'@sms.pscel.com', 'Qualcomm':'@pager.qualcomm.com', 'Qwest':'@qwestmp.com', 'Safaricom':'@safaricomsms.com', 'Satelindo GSM':'@satelindogsm.com', 'SCS-900':'@scs-900.ru', 'Simple Freedom':'@text.simplefreedom.net', 'Skytel - Alphanumeric':'@skytel.com', 'Smart Telecom':'@mysmart.mymobile.ph', 'Southern Linc':'@page.southernlinc.com', 'Sprint PCS':'@messaging.sprintpcs.com', 'Sprint PCS - Short Mail':'@sprintpcs.com', 'SunCom':'@tms.suncom.com', 'SureWest Communications':'@mobile.surewest.com', 'SwissCom Mobile':'@bluewin.ch', 'T-Mobile Germany':'@T-D1-SMS.de', 'T-Mobile Netherlands':'@gin.nl', 'T-Mobile UK':'@t-mobile.uk.net', 'T-Mobile USA (tmail)':'@tmail.com', 'T-Mobile USA (tmomail)':'@tmomail.net', 'Tele2 Latvia':'@sms.tele2.lv', 'Telefonica Movistar':'@movistar.net', 'Telenor':'@mobilpost.no', 'Telia Denmark':'@gsm1800.telia.dk', 'Telus Mobility':'@msg.telus.com', 'The Phone House':'@sms.phonehouse.de', 'TIM':'@timnet.com', 'UMC':'@sms.umc.com.ua', 'Unicel':'@utext.com', 'US Cellular':'@email.uscc.net', 'Verizon Wireless (vtext)':'@vtext.com', 'Verizon Wireless (airtouchpaging)':'@airtouchpaging.com', 'Verizon Wireless (myairmail)':'@myairmail.com', 'Vessotel':'@pager.irkutsk.ru', 'Virgin Mobile Canada':'@vmobile.ca', 'Virgin Mobile USA':'@vmobl.com', 'Vodafone Italy':'@sms.vodafone.it', 'Vodafone Japan (n)':'@n.vodafone.ne.jp', 'Vodafone Japan (d)':'@d.vodafone.ne.jp', 'Vodafone Japan (r)':'@r.vodafone.ne.jp', 'Vodafone Japan (k)':'@k.vodafone.ne.jp', 'Vodafone Japan (t)':'@t.vodafone.ne.jp', 'Vodafone Japan (q)':'@q.vodafone.ne.jp', 'Vodafone Japan (s)':'@s.vodafone.ne.jp', 'Vodafone Japan (h)':'@h.vodafone.ne.jp', 'Vodafone Japan (c)':'@c.vodafone.ne.jp', 'Vodafone Spain':'@vodafone.es', 'Vodafone UK':'@vodafone.net', 'Weblink Wireless':'@airmessage.net', 'WellCom':'@sms.welcome2well.com', 'WyndTell':'@wyndtell.com', } def sms_email(number,provider): """ >>> print sms_email('1 (312) 375-6536','T-Mobile USA (tmail)') print 13123756536@tmail.com """ import re if number[0]=='+1': number=number[1:] elif number[0]=='+': number=number[3:] elif number[:2]=='00': number=number[3:] number=re.sub('[^\d]','',number) return number+SMSCODES[provider]
Python
#!/usr/bin/python # -*- coding: latin-1 -*- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. "Visual Template designer for PyFPDF (using wxPython OGL library)" __author__ = "Mariano Reingart <reingart@gmail.com>" __copyright__ = "Copyright (C) 2011 Mariano Reingart" __license__ = "GPL 3.0" __version__ = "1.01a" # Based on: # * pySjetch.py wxPython sample application # * OGL.py and other wxPython demo modules import os, sys import wx import wx.lib.ogl as ogl from wx.lib.wordwrap import wordwrap DEBUG = True class CustomDialog(wx.Dialog): "A dinamyc dialog to ask user about arbitrary fields" def __init__( self, parent, ID, title, size=wx.DefaultSize, pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE, fields=None, data=None, ): wx.Dialog.__init__ (self, parent, ID, title, pos, size, style) sizer = wx.BoxSizer(wx.VERTICAL) self.textctrls = {} for field in fields: box = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, -1, field) label.SetHelpText("This is the help text for the label") box.Add(label, 1, wx.ALIGN_CENTRE|wx.ALL, 5) text = wx.TextCtrl(self, -1, "", size=(80,-1)) text.SetHelpText("Here's some help text for field #1") if field in data: text.SetValue(repr(data[field])) box.Add(text, 1, wx.ALIGN_CENTRE|wx.ALL, 1) sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 1) self.textctrls[field] = text line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL) sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5) btnsizer = wx.StdDialogButtonSizer() btn = wx.Button(self, wx.ID_OK) btn.SetHelpText("The OK button completes the dialog") btn.SetDefault() btnsizer.AddButton(btn) btn = wx.Button(self, wx.ID_CANCEL) btn.SetHelpText("The Cancel button cancels the dialog. (Cool, huh?)") btnsizer.AddButton(btn) btnsizer.Realize() sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) self.SetSizer(sizer) sizer.Fit(self) @classmethod def do_input(Class, parent, title, fields, data): dlg = Class(parent, -1, title, size=(350, 200), style=wx.DEFAULT_DIALOG_STYLE, # & ~wx.CLOSE_BOX, fields=fields, data=data ) dlg.CenterOnScreen() while 1: val = dlg.ShowModal() if val == wx.ID_OK: values = {} for field in fields: try: values[field] = eval(dlg.textctrls[field].GetValue()) except Exception, e: msg = wx.MessageDialog(parent, unicode(e), "Error in field %s" % field, wx.OK | wx.ICON_INFORMATION ) msg.ShowModal() msg.Destroy() break else: return dict([(field, values[field]) for field in fields]) else: return None class MyEvtHandler(ogl.ShapeEvtHandler): "Custom Event Handler for Shapes" def __init__(self, callback): ogl.ShapeEvtHandler.__init__(self) self.callback = callback def OnLeftClick(self, x, y, keys=0, attachment=0): shape = self.GetShape() canvas = shape.GetCanvas() dc = wx.ClientDC(canvas) canvas.PrepareDC(dc) if shape.Selected() and keys & ogl.KEY_SHIFT: shape.Select(False, dc) #canvas.Redraw(dc) canvas.Refresh(False) else: redraw = False shapeList = canvas.GetDiagram().GetShapeList() toUnselect = [] for s in shapeList: if s.Selected() and not keys & ogl.KEY_SHIFT: # If we unselect it now then some of the objects in # shapeList will become invalid (the control points are # shapes too!) and bad things will happen... toUnselect.append(s) shape.Select(True, dc) if toUnselect: for s in toUnselect: s.Select(False, dc) ##canvas.Redraw(dc) canvas.Refresh(False) self.callback() def OnEndDragLeft(self, x, y, keys=0, attachment=0): shape = self.GetShape() ogl.ShapeEvtHandler.OnEndDragLeft(self, x, y, keys, attachment) if not shape.Selected(): self.OnLeftClick(x, y, keys, attachment) self.callback() def OnSizingEndDragLeft(self, pt, x, y, keys, attch): ogl.ShapeEvtHandler.OnSizingEndDragLeft(self, pt, x, y, keys, attch) self.callback() def OnMovePost(self, dc, x, y, oldX, oldY, display): shape = self.GetShape() ogl.ShapeEvtHandler.OnMovePost(self, dc, x, y, oldX, oldY, display) self.callback() if "wxMac" in wx.PlatformInfo: shape.GetCanvas().Refresh(False) def OnLeftDoubleClick(self, x, y, keys = 0, attachment = 0): self.callback("LeftDoubleClick") def OnRightClick(self, *dontcare): self.callback("RightClick") class Element(object): "Visual class that represent a placeholder in the template" fields = ['name', 'type', 'x1', 'y1', 'x2', 'y2', 'font', 'size', 'bold', 'italic', 'underline', 'foreground', 'background', 'align', 'text', 'priority',] def __init__(self, canvas=None, frame=None, zoom=5.0, static=False, **kwargs): self.kwargs = kwargs self.zoom = zoom self.frame = frame self.canvas = canvas self.static = static name = kwargs['name'] kwargs['type'] type = kwargs['type'] x, y, w, h = self.set_coordinates(kwargs['x1'], kwargs['y1'], kwargs['x2'], kwargs['y2']) text = kwargs['text'] shape = self.shape = ogl.RectangleShape(w, h) if not static: shape.SetDraggable(True, True) shape.SetX(x) shape.SetY(y) #if pen: shape.SetPen(pen) #if brush: shape.SetBrush(brush) shape.SetBrush(wx.TRANSPARENT_BRUSH) if type not in ('L', 'B', 'BC'): if not static: pen = wx.LIGHT_GREY_PEN else: pen = wx.RED_PEN shape.SetPen(pen) self.text = kwargs['text'] evthandler = MyEvtHandler(self.evt_callback) evthandler.SetShape(shape) evthandler.SetPreviousHandler(shape.GetEventHandler()) shape.SetEventHandler(evthandler) shape.SetCentreResize(False) shape.SetMaintainAspectRatio(False) canvas.AddShape( shape ) @classmethod def new(Class, parent): data = dict(name='some_name', type='T', x1=5.0, y1=5.0, x2=100.0, y2=10.0, font="Arial", size=12, bold=False, italic=False, underline=False, foreground= 0x000000, background=0xFFFFFF, align="L", text="", priority=0) data = CustomDialog.do_input(parent, 'New element', Class.fields, data) if data: return Class(canvas=parent.canvas, frame=parent, **data) def edit(self): "Edit current element (show a dialog box with all fields)" data = self.kwargs.copy() x1, y1, x2, y2 = self.get_coordinates() data.update(dict(name=self.name, text=self.text, x1=x1, y1=y1, x2=x2, y2=y2, )) data = CustomDialog.do_input(self.frame, 'Edit element', self.fields, data) if data: self.kwargs.update(data) self.name = data['name'] self.text = data['text'] x,y, w, h = self.set_coordinates(data['x1'], data['y1'], data['x2'], data['y2']) self.shape.SetX(x) self.shape.SetY(y) self.shape.SetWidth(w) self.shape.SetHeight(h) self.canvas.Refresh(False) self.canvas.GetDiagram().ShowAll(1) def edit_text(self): "Allow text edition (i.e. for doubleclick)" dlg = wx.TextEntryDialog( self.frame, 'Text for %s' % self.name, 'Edit Text', '') if self.text: dlg.SetValue(self.text) if dlg.ShowModal() == wx.ID_OK: self.text = dlg.GetValue().encode("latin1") dlg.Destroy() def copy(self): "Return an identical duplicate" kwargs = self.as_dict() element = Element(canvas=self.canvas, frame=self.frame, zoom=self.zoom, static=self.static, **kwargs) return element def remove(self): "Erases visual shape from OGL canvas (element must be deleted manually)" self.canvas.RemoveShape(self.shape) def move(self, dx, dy): "Change pdf coordinates (converting to wx internal values)" x1, y1, x2, y2 = self.get_coordinates() x1 += dx x2 += dx y1 += dy y2 += dy x, y, w, h = self.set_coordinates(x1, y1, x2, y2) self.shape.SetX(x) self.shape.SetY(y) def evt_callback(self, evt_type=None): "Event dispatcher" if evt_type=="LeftDoubleClick": self.edit_text() if evt_type=='RightClick': self.edit() # update the status bar x1, y1, x2, y2 = self.get_coordinates() self.frame.SetStatusText("%s (%0.2f, %0.2f) - (%0.2f, %0.2f)" % (self.name, x1, y1, x2, y2)) def get_coordinates(self): "Convert from wx to pdf coordinates" x, y = self.shape.GetX(), self.shape.GetY() w, h = self.shape.GetBoundingBoxMax() w -= 1 h -= 1 x1 = x/self.zoom - w/self.zoom/2.0 x2 = x/self.zoom + w/self.zoom/2.0 y1 = y/self.zoom - h/self.zoom/2.0 y2 = y/self.zoom + h/self.zoom/2.0 return x1, y1, x2, y2 def set_coordinates(self, x1, y1, x2, y2): "Convert from pdf to wx coordinates" x1 = x1 * self.zoom x2 = x2 * self.zoom y1 = y1 * self.zoom y2 = y2 * self.zoom # shapes seems to be centred, pdf coord not w = max(x1, x2) - min(x1, x2) + 1 h = max(y1, y2) - min(y1, y2) + 1 x = (min(x1, x2) + w/2.0) y = (min(y1, y2) + h/2.0) return x, y, w, h def text(self, txt=None): if txt is not None: if not isinstance(txt,str): txt = str(txt) self.kwargs['text'] = txt self.shape.ClearText() for line in txt.split('\n'): self.shape.AddText(unicode(line, "latin1")) self.canvas.Refresh(False) return self.kwargs['text'] text = property(text, text) def set_x(self, x): self.shape.SetX(x) self.canvas.Refresh(False) self.evt_callback() def set_y(self, y): self.shape.SetY(y) self.canvas.Refresh(False) self.evt_callback() def get_x(self): return self.shape.GetX() def get_y(self): return self.shape.GetY() x = property(get_x, set_x) y = property(get_y, set_y) def selected(self, sel=None): if sel is not None: print "Setting Select(%s)" % sel self.shape.Select(sel) return self.shape.Selected() selected = property(selected, selected) def name(self, name=None): if name is not None: self.kwargs['name'] = name return self.kwargs['name'] name = property(name, name) def __contains__(self, k): "Implement in keyword for searchs" return k in self.name.lower() or self.text and k in self.text.lower() def as_dict(self): "Return a dictionary representation, used by pyfpdf" d = self.kwargs x1, y1, x2, y2 = self.get_coordinates() d.update({ 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'text': self.text}) return d class AppFrame(wx.Frame): "OGL Designer main window" title = "PyFPDF Template Designer (wx OGL)" def __init__(self): wx.Frame.__init__( self, None, -1, self.title, size=(640,480), style=wx.DEFAULT_FRAME_STYLE ) sys.excepthook = self.except_hook self.filename = "" # Create a toolbar: tsize = (16,16) self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT) artBmp = wx.ArtProvider.GetBitmap self.toolbar.AddSimpleTool( wx.ID_NEW, artBmp(wx.ART_NEW, wx.ART_TOOLBAR, tsize), "New") self.toolbar.AddSimpleTool( wx.ID_OPEN, artBmp(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize), "Open") self.toolbar.AddSimpleTool( wx.ID_SAVE, artBmp(wx.ART_FILE_SAVE, wx.ART_TOOLBAR, tsize), "Save") self.toolbar.AddSimpleTool( wx.ID_SAVEAS, artBmp(wx.ART_FILE_SAVE_AS, wx.ART_TOOLBAR, tsize), "Save As...") #------- self.toolbar.AddSeparator() self.toolbar.AddSimpleTool( wx.ID_UNDO, artBmp(wx.ART_UNDO, wx.ART_TOOLBAR, tsize), "Undo") self.toolbar.AddSimpleTool( wx.ID_REDO, artBmp(wx.ART_REDO, wx.ART_TOOLBAR, tsize), "Redo") self.toolbar.AddSeparator() #------- self.toolbar.AddSimpleTool( wx.ID_CUT, artBmp(wx.ART_CUT, wx.ART_TOOLBAR, tsize), "Remove") self.toolbar.AddSimpleTool( wx.ID_COPY, artBmp(wx.ART_COPY, wx.ART_TOOLBAR, tsize), "Duplicate") self.toolbar.AddSimpleTool( wx.ID_PASTE, artBmp(wx.ART_PASTE, wx.ART_TOOLBAR, tsize), "Insert") self.toolbar.AddSeparator() self.toolbar.AddSimpleTool( wx.ID_FIND, artBmp(wx.ART_FIND, wx.ART_TOOLBAR, tsize), "Find") self.toolbar.AddSeparator() self.toolbar.AddSimpleTool( wx.ID_PRINT, artBmp(wx.ART_PRINT, wx.ART_TOOLBAR, tsize), "Print") self.toolbar.AddSimpleTool( wx.ID_ABOUT, artBmp(wx.ART_HELP, wx.ART_TOOLBAR, tsize), "About") self.toolbar.Realize() self.toolbar.EnableTool(wx.ID_SAVEAS, False) self.toolbar.EnableTool(wx.ID_UNDO, False) self.toolbar.EnableTool(wx.ID_REDO, False) menu_handlers = [ (wx.ID_NEW, self.do_new), (wx.ID_OPEN, self.do_open), (wx.ID_SAVE, self.do_save), (wx.ID_PRINT, self.do_print), (wx.ID_FIND, self.do_find), (wx.ID_CUT, self.do_cut), (wx.ID_COPY, self.do_copy), (wx.ID_PASTE, self.do_paste), (wx.ID_ABOUT, self.do_about), ] for menu_id, handler in menu_handlers: self.Bind(wx.EVT_MENU, handler, id = menu_id) sizer = wx.BoxSizer(wx.VERTICAL) # put stuff into sizer self.CreateStatusBar() canvas = self.canvas = ogl.ShapeCanvas( self ) maxWidth = 1500 maxHeight = 2000 canvas.SetScrollbars(20, 20, maxWidth/20, maxHeight/20) sizer.Add( canvas, 1, wx.GROW ) canvas.SetBackgroundColour("WHITE") # diagram = self.diagram = ogl.Diagram() canvas.SetDiagram( diagram ) diagram.SetCanvas( canvas ) diagram.SetSnapToGrid( False ) # apply sizer self.SetSizer(sizer) self.SetAutoLayout(1) self.Show(1) self.Bind(wx.EVT_CHAR_HOOK, self.on_key_event) self.elements = [] def on_key_event(self, event): """ Respond to a keypress event. We make the arrow keys move the selected object(s) by one pixel in the given direction. """ step = 1 if event.ControlDown(): step = 20 if event.GetKeyCode() == wx.WXK_UP: self.move_elements(0, -step) elif event.GetKeyCode() == wx.WXK_DOWN: self.move_elements(0, step) elif event.GetKeyCode() == wx.WXK_LEFT: self.move_elements(-step, 0) elif event.GetKeyCode() == wx.WXK_RIGHT: self.move_elements(step, 0) elif event.GetKeyCode() == wx.WXK_DELETE: self.do_cut() else: event.Skip() def do_new(self, evt=None): for element in self.elements: element.remove() self.elements = [] # draw paper size guides for k, (w, h) in [('legal', (216, 356)), ('A4', (210, 297)), ('letter', (216, 279))]: self.create_elements( k, 'R', 0, 0, w, h, size=70, foreground=0x808080, priority=-100, canvas=self.canvas, frame=self, static=True) self.diagram.ShowAll( 1 ) def do_open(self, evt): dlg = wx.FileDialog( self, message="Choose a file", defaultDir=os.getcwd(), defaultFile="invoice.csv", wildcard="CSV Files (*.csv)|*.csv", style=wx.OPEN ) if dlg.ShowModal() == wx.ID_OK: # This returns a Python list of files that were selected. self.filename = dlg.GetPaths()[0] dlg.Destroy() self.SetTitle(self.filename + " - " + self.title) self.do_new() tmp = [] f = open(self.filename) try: filedata = f.readlines() finally: f.close() for lno, linea in enumerate(filedata): if DEBUG: print "processing line", lno, linea args = [] for i,v in enumerate(linea.split(";")): if not v.startswith("'"): v = v.replace(",",".") else: v = v#.decode('latin1') if v.strip()=='': v = None else: v = eval(v.strip()) args.append(v) tmp.append(args) # sort by z-order (priority) for args in sorted(tmp, key=lambda t: t[-1]): if DEBUG: print args self.create_elements(*args) self.diagram.ShowAll( 1 ) # return True def do_save(self, evt, filename=None): try: from time import gmtime, strftime ts = strftime("%Y%m%d%H%M%S", gmtime()) os.rename(self.filename, self.filename + ts + ".bak") except Exception, e: if DEBUG: print e pass def csv_repr(v, decimal_sep="."): if isinstance(v, float): return ("%0.2f" % v).replace(".", decimal_sep) else: return repr(v) f = open(self.filename, "w") try: for element in sorted(self.elements, key=lambda e:e.name): if element.static: continue d = element.as_dict() l = [d['name'], d['type'], d['x1'], d['y1'], d['x2'], d['y2'], d['font'], d['size'], d['bold'], d['italic'], d['underline'], d['foreground'], d['background'], d['align'], d['text'], d['priority'], ] f.write(";".join([csv_repr(v) for v in l])) f.write("\n") finally: f.close() def do_print(self, evt): # genero el renderizador con propiedades del PDF from template import Template t = Template(elements=[e.as_dict() for e in self.elements if not e.static]) t.add_page() if not t['logo'] or not os.path.exists(t['logo']): # put a default logo so it doesn't trow an exception logo = os.path.join(os.path.dirname(__file__), 'tutorial','logo.png') t.set('logo', logo) try: t.render(self.filename +".pdf") except: if DEBUG and False: import pdb; pdb.pm() else: raise if sys.platform=="linux2": os.system("evince ""%s""" % self.filename +".pdf") else: os.startfile(self.filename +".pdf") def do_find(self, evt): # busco nombre o texto dlg = wx.TextEntryDialog( self, 'Enter text to search for', 'Find Text', '') if dlg.ShowModal() == wx.ID_OK: txt = dlg.GetValue().encode("latin1").lower() for element in self.elements: if txt in element: element.selected = True print "Found:", element.name self.canvas.Refresh(False) dlg.Destroy() def do_cut(self, evt=None): "Delete selected elements" new_elements = [] for element in self.elements: if element.selected: print "Erasing:", element.name element.selected = False self.canvas.Refresh(False) element.remove() else: new_elements.append(element) self.elements = new_elements self.canvas.Refresh(False) self.diagram.ShowAll( 1 ) def do_copy(self, evt): "Duplicate selected elements" fields = ['qty', 'dx', 'dy'] data = {'qty': 1, 'dx': 0.0, 'dy': 5.0} data = CustomDialog.do_input(self, 'Copy elements', fields, data) if data: new_elements = [] for i in range(1, data['qty']+1): for element in self.elements: if element.selected: print "Copying:", element.name new_element = element.copy() name = new_element.name if len(name)>2 and name[-2:].isdigit(): new_element.name = name[:-2] + "%02d" % (int(name[-2:])+i) else: new_element.name = new_element.name + "_copy" new_element.selected = False new_element.move(data['dx']*i, data['dy']*i) new_elements.append(new_element) self.elements.extend(new_elements) self.canvas.Refresh(False) self.diagram.ShowAll( 1 ) def do_paste(self, evt): "Insert new elements" element = Element.new(self) if element: self.canvas.Refresh(False) self.elements.append(element) self.diagram.ShowAll( 1 ) def create_elements(self, name, type, x1, y1, x2, y2, font="Arial", size=12, bold=False, italic=False, underline=False, foreground= 0x000000, background=0xFFFFFF, align="L", text="", priority=0, canvas=None, frame=None, static=False, **kwargs): element = Element(name=name, type=type, x1=x1, y1=y1, x2=x2, y2=y2, font=font, size=size, bold=bold, italic=italic, underline=underline, foreground= foreground, background=background, align=align, text=text, priority=priority, canvas=canvas or self.canvas, frame=frame or self, static=static) self.elements.append(element) def move_elements(self, x, y): for element in self.elements: if element.selected: print "moving", element.name, x, y element.x = element.x + x element.y = element.y + y def do_about(self, evt): info = wx.AboutDialogInfo() info.Name = self.title info.Version = __version__ info.Copyright = __copyright__ info.Description = ( "Visual Template designer for PyFPDF (using wxPython OGL library)\n" "Input files are CSV format describing the layout, separated by ;\n" "Use toolbar buttons to open, save, print (preview) your template, " "and there are buttons to find, add, remove or duplicate elements.\n" "Over an element, a double left click opens edit text dialog, " "and a right click opens edit properties dialog. \n" "Multiple element can be selected with shift left click. \n" "Use arrow keys or drag-and-drop to move elements.\n" "For further information see project webpage:" ) info.WebSite = ("http://code.google.com/p/pyfpdf/wiki/Templates", "pyfpdf Google Code Project") info.Developers = [ __author__, ] info.License = wordwrap(__license__, 500, wx.ClientDC(self)) # Then we call wx.AboutBox giving it that info object wx.AboutBox(info) def except_hook(self, type, value, trace): import traceback exc = traceback.format_exception(type, value, trace) for e in exc: wx.LogError(e) wx.LogError('Unhandled Error: %s: %s'%(str(type), str(value))) app = wx.PySimpleApp() ogl.OGLInitialize() frame = AppFrame() app.MainLoop() app.Destroy()
Python
# -*- coding: iso-8859-1 -*- "PDF Template Helper for FPDF.py" __author__ = "Mariano Reingart <reingart@gmail.com>" __copyright__ = "Copyright (C) 2010 Mariano Reingart" __license__ = "LGPL 3.0" import sys,os,csv from fpdf import FPDF def rgb(col): return (col // 65536), (col // 256 % 256), (col% 256) class Template: def __init__(self, infile=None, elements=None, format='A4', orientation='portrait', title='', author='', subject='', creator='', keywords=''): if elements: self.elements = dict([(v['name'].lower(),v) for v in elements]) self.handlers = {'T': self.text, 'L': self.line, 'I': self.image, 'B': self.rect, 'BC': self.barcode, } self.pg_no = 0 self.texts = {} pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm") pdf.set_title(title) pdf.set_author(author) pdf.set_creator(creator) pdf.set_subject(subject) pdf.set_keywords(keywords) def parse_csv(self, infile, delimiter=",", decimal_sep="."): "Parse template format csv file and create elements dict" keys = ('name','type','x1','y1','x2','y2','font','size', 'bold','italic','underline','foreground','background', 'align','text','priority') self.elements = {} f = open(infile, 'rb') try: for row in csv.reader(f, delimiter=delimiter): kargs = {} for i,v in enumerate(row): if not v.startswith("'") and decimal_sep!=".": v = v.replace(decimal_sep,".") else: v = v if v=='': v = None else: v = eval(v.strip()) kargs[keys[i]] = v self.elements[kargs['name'].lower()] = kargs finally: f.close() def add_page(self): self.pg_no += 1 self.texts[self.pg_no] = {} def __setitem__(self, name, value): if name.lower() in self.elements: if isinstance(value,unicode): value = value.encode("latin1","ignore") else: value = str(value) self.texts[self.pg_no][name.lower()] = value # setitem shortcut (may be further extended) set = __setitem__ def __getitem__(self, name): if name.lower() in self.elements: return self.texts[self.pg_no].get(name.lower(), self.elements[name.lower()]['text']) def split_multicell(self, text, element_name): "Divide (\n) a string using a given element width" pdf = self.pdf element = self.elements[element_name.lower()] style = "" if element['bold']: style += "B" if element['italic']: style += "I" if element['underline']: style += "U" pdf.set_font(element['font'],style,element['size']) align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish if isinstance(text, unicode): text = text.encode("latin1","ignore") else: text = str(text) return pdf.multi_cell(w=element['x2']-element['x1'], h=element['y2']-element['y1'], txt=text,align=align,split_only=True) def render(self, outfile, dest="F"): pdf = self.pdf for pg in range(1, self.pg_no+1): pdf.add_page() pdf.set_font('Arial','B',16) pdf.set_auto_page_break(False,margin=0) for element in sorted(self.elements.values(),key=lambda x: x['priority']): #print "dib",element['type'], element['name'], element['x1'], element['y1'], element['x2'], element['y2'] element = element.copy() element['text'] = self.texts[pg].get(element['name'].lower(), element['text']) if 'rotate' in element: pdf.rotate(element['rotate'], element['x1'], element['y1']) self.handlers[element['type'].upper()](pdf, **element) if 'rotate' in element: pdf.rotate(0) return pdf.output(outfile, dest) def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10, bold=False, italic=False, underline=False, align="", foreground=0, backgroud=65535, *args, **kwargs): if text: if pdf.text_color!=rgb(foreground): pdf.set_text_color(*rgb(foreground)) if pdf.fill_color!=rgb(backgroud): pdf.set_fill_color(*rgb(backgroud)) font = font.strip().lower() if font == 'arial black': font = 'arial' style = "" for tag in 'B', 'I', 'U': if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)): text = text[3:-4] style += tag if bold: style += "B" if italic: style += "I" if underline: style += "U" align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish pdf.set_font(font,style,size) ##m_k = 72 / 2.54 ##h = (size/m_k) pdf.set_xy(x1,y1) pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align) #pdf.Text(x=x1,y=y1,txt=text) def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs): if pdf.draw_color!=rgb(foreground): #print "SetDrawColor", hex(foreground) pdf.set_draw_color(*rgb(foreground)) #print "SetLineWidth", size pdf.set_line_width(size) pdf.line(x1, y1, x2, y2) def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs): if pdf.draw_color!=rgb(foreground): pdf.set_draw_color(*rgb(foreground)) if pdf.fill_color!=rgb(backgroud): pdf.set_fill_color(*rgb(backgroud)) pdf.set_line_width(size) pdf.rect(x1, y1, x2-x1, y2-y1) def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs): pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='') def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1, foreground=0, *args, **kwargs): if pdf.draw_color!=rgb(foreground): pdf.set_draw_color(*rgb(foreground)) font = font.lower().strip() if font == 'interleaved 2of5 nt': pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1) if __name__ == "__main__": # generate sample invoice (according Argentina's regulations) import random from decimal import Decimal f = Template(format="A4", title="Sample Invoice", author="Sample Company", subject="Sample Customer", keywords="Electronic TAX Invoice") f.parse_csv(infile="invoice.csv", delimiter=";", decimal_sep=",") detail = "Lorem ipsum dolor sit amet, consectetur. " * 30 items = [] for i in range(1, 30): ds = "Sample product %s" % i qty = random.randint(1,10) price = round(random.random()*100,3) code = "%s%s%02d" % (chr(random.randint(65,90)), chr(random.randint(65,90)),i) items.append(dict(code=code, unit='u', qty=qty, price=price, amount=qty*price, ds="%s: %s" % (i,ds))) # divide and count lines lines = 0 li_items = [] for it in items: qty = it['qty'] code = it['code'] unit = it['unit'] for ds in f.split_multicell(it['ds'], 'item_description01'): # add item description line (without price nor amount) li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None)) # clean qty and code (show only at first) unit = qty = code = None # set last item line price and amount li_items[-1].update(amount = it['amount'], price = it['price']) obs="\n<U>Detail:</U>\n\n" + detail for ds in f.split_multicell(obs, 'item_description01'): li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None)) # calculate pages: lines = len(li_items) max_lines_per_page = 24 pages = lines / (max_lines_per_page - 1) if lines % (max_lines_per_page - 1): pages = pages + 1 # completo campos y hojas for page in range(1, pages+1): f.add_page() f['page'] = 'Page %s of %s' % (page, pages) if pages>1 and page<pages: s = 'Continues on page %s' % (page+1) else: s = '' f['item_description%02d' % (max_lines_per_page+1)] = s f["company_name"] = "Sample Company" f["company_logo"] = "tutorial/logo.png" f["company_header1"] = "Some Address - somewhere -" f["company_header2"] = "http://www.example.com" f["company_footer1"] = "Tax Code ..." f["company_footer2"] = "Tax/VAT ID ..." f['number'] = '0001-00001234' f['issue_date'] = '2010-09-10' f['due_date'] = '2099-09-10' f['customer_name'] = "Sample Client" f['customer_address'] = "Siempreviva 1234" # print line item... li = 0 k = 0 total = Decimal("0.00") for it in li_items: k = k + 1 if k > page * (max_lines_per_page - 1): break if it['amount']: total += Decimal("%.6f" % it['amount']) if k > (page - 1) * (max_lines_per_page - 1): li += 1 if it['qty'] is not None: f['item_quantity%02d' % li] = it['qty'] if it['code'] is not None: f['item_code%02d' % li] = it['code'] if it['unit'] is not None: f['item_unit%02d' % li] = it['unit'] f['item_description%02d' % li] = it['ds'] if it['price'] is not None: f['item_price%02d' % li] = "%0.3f" % it['price'] if it['amount'] is not None: f['item_amount%02d' % li] = "%0.2f" % it['amount'] if pages == page: f['net'] = "%0.2f" % (total/Decimal("1.21")) f['vat'] = "%0.2f" % (total*(1-1/Decimal("1.21"))) f['total_label'] = 'Total:' else: f['total_label'] = 'SubTotal:' f['total'] = "%0.2f" % total f.render("./invoice.pdf") if sys.platform.startswith("linux"): os.system("evince ./invoice.pdf") else: os.system("./invoice.pdf")
Python
# -*- coding: latin-1 -*- "HTML Renderer for FPDF.py" __author__ = "Mariano Reingart <reingart@gmail.com>" __copyright__ = "Copyright (C) 2010 Mariano Reingart" __license__ = "LGPL 3.0" # Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc. from fpdf import FPDF from HTMLParser import HTMLParser DEBUG = False def px2mm(px): return int(px)*25.4/72.0 def hex2dec(color = "#000000"): if color: r = int(color[1:3], 16) g = int(color[3:5], 16) b = int(color[5:7], 16) return r, g, b class HTML2FPDF(HTMLParser): "Render basic HTML to FPDF" def __init__(self, pdf, image_map): HTMLParser.__init__(self) self.image_map = image_map self.style = {} self.pre = False self.href = '' self.align = '' self.page_links = {} self.font_list = ("times","courier", "helvetica") self.pdf = pdf self.r = self.g = self.b = 0 self.indent = 0 self.bullet = [] self.set_font("times", 12) self.table = None # table attributes self.table_col_width = None # column (header) widths self.table_col_index = None # current column index self.td = None # cell attributes self.th = False # header enabled self.tr = None self.theader = None # table header cells self.tfooter = None # table footer cells self.thead = None self.tfoot = None self.theader_out = self.tfooter_out = False def width2mm(self, length): if length[-1]=='%': total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin if self.table['width'][-1]=='%': total *= int(self.table['width'][:-1])/100.0 return int(length[:-1]) * total / 101.0 else: return int(length) / 6.0 def handle_data(self, txt): if self.td is not None: # drawing a table? if 'width' not in self.td and 'colspan' not in self.td: l = [self.table_col_width[self.table_col_index]] elif 'colspan' in self.td: i = self.table_col_index colspan = int(self.td['colspan']) l = self.table_col_width[i:i+colspan] else: l = [self.td.get('width','240')] w = sum([self.width2mm(lenght) for lenght in l]) h = int(self.td.get('height', 0)) / 4 or self.h*1.30 self.table_h = h border = int(self.table.get('border', 0)) if not self.th: align = self.td.get('align', 'L')[0].upper() border = border and 'LR' else: self.set_style('B',True) border = border or 'B' align = 'C' bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', ''))) # parsing table header/footer (drawn later): if self.thead is not None: self.theader.append(((w,h,txt,border,0,align), bgcolor)) if self.tfoot is not None: self.tfooter.append(((w,h,txt,border,0,align), bgcolor)) # check if reached end of page, add table footer and header: height = h + (self.tfooter and self.tfooter[0][0][1] or 0) if self.pdf.y+height>self.pdf.page_break_trigger and not self.th: self.output_table_footer() self.pdf.add_page() self.theader_out = self.tfooter_out = False if self.tfoot is None and self.thead is None: if not self.theader_out: self.output_table_header() self.box_shadow(w, h, bgcolor) if DEBUG: print "td cell", self.pdf.x, w, txt, "*" self.pdf.cell(w,h,txt,border,0,align) elif self.table is not None: # ignore anything else than td inside a table pass elif self.align: if DEBUG: print "cell", txt, "*" self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href) else: txt = txt.replace("\n"," ") if self.href: self.put_link(self.href,txt) else: if DEBUG: print "write", txt, "*" self.pdf.write(self.h,txt) def box_shadow(self, w, h, bgcolor): if DEBUG: print "box_shadow", w, h, bgcolor if bgcolor: fill_color = self.pdf.fill_color self.pdf.set_fill_color(*bgcolor) self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F') self.pdf.fill_color = fill_color def output_table_header(self): if self.theader: b = self.b x = self.pdf.x self.pdf.set_x(self.table_offset) self.set_style('B',True) for cell, bgcolor in self.theader: self.box_shadow(cell[0], cell[1], bgcolor) self.pdf.cell(*cell) self.set_style('B',b) self.pdf.ln(self.theader[0][0][1]) self.pdf.set_x(self.table_offset) #self.pdf.set_x(x) self.theader_out = True def output_table_footer(self): if self.tfooter: x = self.pdf.x self.pdf.set_x(self.table_offset) #TODO: self.output_table_sep() for cell, bgcolor in self.tfooter: self.box_shadow(cell[0], cell[1], bgcolor) self.pdf.cell(*cell) self.pdf.ln(self.tfooter[0][0][1]) self.pdf.set_x(x) if int(self.table.get('border', 0)): self.output_table_sep() self.tfooter_out = True def output_table_sep(self): self.pdf.set_x(self.table_offset) x1 = self.pdf.x y1 = self.pdf.y w = sum([self.width2mm(lenght) for lenght in self.table_col_width]) self.pdf.line(x1,y1,x1+w,y1) def handle_starttag(self, tag, attrs): attrs = dict(attrs) if DEBUG: print "STARTTAG", tag, attrs if tag=='b' or tag=='i' or tag=='u': self.set_style(tag,1) if tag=='a': self.href=attrs['href'] if tag=='br': self.pdf.ln(5) if tag=='p': self.pdf.ln(5) if attrs: self.align=attrs['align'].lower() if tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'): k = (2, 1.5, 1.17, 1, 0.83, 0.67)[int(tag[1])] self.pdf.ln(5*k) self.pdf.set_text_color(150,0,0) self.pdf.set_font_size(12 * k) if attrs: self.align = attrs.get('align') if tag=='hr': self.put_line() if tag=='pre': self.pdf.set_font('Courier','',11) self.pdf.set_font_size(11) self.set_style('B',False) self.set_style('I',False) self.pre = True if tag=='blockquote': self.set_text_color(100,0,45) self.pdf.ln(3) if tag=='ul': self.indent+=1 self.bullet.append('\x95') if tag=='ol': self.indent+=1 self.bullet.append(0) if tag=='li': self.pdf.ln(self.h+2) self.pdf.set_text_color(190,0,0) bullet = self.bullet[self.indent-1] if not isinstance(bullet, basestring): bullet += 1 self.bullet[self.indent-1] = bullet bullet = "%s. " % bullet self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet)) self.set_text_color() if tag=='font': if 'color' in attrs: self.color = hex2dec(attrs['color']) self.set_text_color(*color) self.color = color if 'face' in attrs and attrs['face'].lower() in self.font_list: face = attrs.get('face').lower() self.pdf.set_font(face) self.font_face = face if 'size' in attrs: face = attrs.get('size') self.pdf.set_font('', size) self.font_size = size if tag=='table': self.table = dict([(k.lower(), v) for k,v in attrs.items()]) if not 'width' in self.table: self.table['width'] = '100%' if self.table['width'][-1]=='%': w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin w *= int(self.table['width'][:-1])/100.0 self.table_offset = (self.pdf.w-w)/2.0 self.table_col_width = [] self.theader_out = self.tfooter_out = False self.theader = [] self.tfooter = [] self.thead = None self.tfoot = None self.pdf.ln() if tag=='tr': self.tr = dict([(k.lower(), v) for k,v in attrs.items()]) self.table_col_index = 0 self.pdf.set_x(self.table_offset) if tag=='td': self.td = dict([(k.lower(), v) for k,v in attrs.items()]) if tag=='th': self.td = dict([(k.lower(), v) for k,v in attrs.items()]) self.th = True if self.td['width']: self.table_col_width.append(self.td['width']) if tag=='thead': self.thead = {} if tag=='tfoot': self.tfoot = {} if tag=='img': if 'src' in attrs: x = self.pdf.get_x() y = self.pdf.get_y() w = px2mm(attrs.get('width', 0)) h = px2mm(attrs.get('height',0)) if self.align and self.align[0].upper() == 'C': x = (self.pdf.w-x)/2.0 - w/2.0 self.pdf.image(self.image_map(attrs['src']), x, y, w, h, link=self.href) self.pdf.set_x(x+w) self.pdf.set_y(y+h) if tag=='b' or tag=='i' or tag=='u': self.set_style(tag, True) if tag=='center': self.align = 'Center' def handle_endtag(self, tag): #Closing tag if DEBUG: print "ENDTAG", tag if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4': self.pdf.ln(6) self.set_font() self.set_style() self.align = None if tag=='pre': self.pdf.set_font(self.font or 'Times','',12) self.pdf.set_font_size(12) self.pre=False if tag=='blockquote': self.set_text_color(0,0,0) self.pdf.ln(3) if tag=='strong': tag='b' if tag=='em': tag='i' if tag=='b' or tag=='i' or tag=='u': self.set_style(tag, False) if tag=='a': self.href='' if tag=='p': self.align='' if tag in ('ul', 'ol'): self.indent-=1 self.bullet.pop() if tag=='table': if not self.tfooter_out: self.output_table_footer() self.table = None self.th = False self.theader = None self.tfooter = None self.pdf.ln() if tag=='thead': self.thead = None if tag=='tfoot': self.tfoot = None if tag=='tbody': # draw a line separator between table bodies self.pdf.set_x(self.table_offset) self.output_table_sep() if tag=='tr': h = self.table_h if self.tfoot is None: self.pdf.ln(h) self.tr = None if tag=='td' or tag=='th': if self.th: if DEBUG: print "revert style" self.set_style('B', False) # revert style self.table_col_index += int(self.td.get('colspan','1')) self.td = None self.th = False if tag=='font': if self.color: self.pdf.set_text_color(0,0,0) self.color = None if self.font: self.SetFont('Times','',12) self.font = None if tag=='center': self.align = None def set_font(self, face=None, size=None): if face: self.font_face = face if size: self.font_size = size self.h = size / 72.0*25.4 if DEBUG: print "H", self.h self.pdf.set_font(self.font_face or 'times','',12) self.pdf.set_font_size(self.font_size or 12) self.set_style('u', False) self.set_style('b', False) self.set_style('i', False) self.set_text_color() def set_style(self, tag=None, enable=None): #Modify style and select corresponding font if tag: t = self.style.get(tag.lower()) self.style[tag.lower()] = enable style='' for s in ('b','i','u'): if self.style.get(s): style+=s if DEBUG: print "SET_FONT_STYLE", style self.pdf.set_font('',style) def set_text_color(self, r=None, g=0, b=0): if r is None: self.pdf.set_text_color(self.r,self.g,self.b) else: self.pdf.set_text_color(r, g, b) self.r = r self.g = g self.b = b def put_link(self, url, txt): #Put a hyperlink self.set_text_color(0,0,255) self.set_style('u', True) self.pdf.write(5,txt,url) self.set_style('u', False) self.set_text_color(0) def put_line(self): self.pdf.ln(2) self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y()) self.pdf.ln(3) class HTMLMixin(): def write_html(self, text, image_map=lambda x:x): "Parse HTML and convert it to PDF" h2p = HTML2FPDF(self,image_map=image_map) h2p.feed(text) if __name__=='__main__': html=""" <H1 align="center">html2fpdf</H1> <h2>Basic usage</h2> <p>You can now easily print text mixing different styles : <B>bold</B>, <I>italic</I>, <U>underlined</U>, or <B><I><U>all at once</U></I></B>!<BR>You can also insert links on text, such as <A HREF="http://www.fpdf.org">www.fpdf.org</A>, or on an image: click on the logo.<br> <center> <A HREF="http://www.fpdf.org"><img src="tutorial/logo.png" width="104" height="71"></A> </center> <h3>Sample List</h3> <ul><li>option 1</li> <ol><li>option 2</li></ol> <li>option 3</li></ul> <table border="0" align="center" width="50%"> <thead><tr><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead> <tbody> <tr><td>cell 1</td><td>cell 2</td></tr> <tr><td>cell 2</td><td>cell 3</td></tr> </tbody> </table> <table border="1"> <thead><tr bgcolor="#A0A0A0"><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead> <tfoot><tr bgcolor="#E0E0E0"><td>footer 1</td><td>footer 2</td></tr></tfoot> <tbody> <tr><td>cell 1</td><td>cell 2</td></tr> <tr> <td width="30%">cell 1</td><td width="70%" bgcolor="#D0D0FF" align='right'>cell 2</td> </tr> </tbody> <tbody><tr><td colspan="2">cell spanned</td></tr></tbody> <tbody> """ + """<tr bgcolor="#F0F0F0"> <td>cell 3</td><td>cell 4</td> </tr><tr bgcolor="#FFFFFF"> <td>cell 5</td><td>cell 6</td> </tr>""" * 200 + """ </tbody> </table> """ class MyFPDF(FPDF, HTMLMixin): def header(self): self.image('tutorial/logo_pb.png',10,8,33) self.set_font('Arial','B',15) self.cell(80) self.cell(30,10,'Title',1,0,'C') self.ln(20) def footer(self): self.set_y(-15) self.set_font('Arial','I',8) txt = 'Page %s of %s' % (self.page_no(), self.alias_nb_pages()) self.cell(0,10,txt,0,0,'C') pdf=MyFPDF() #First page pdf.add_page() pdf.write_html(html) pdf.output('html.pdf','F') import os os.system("evince html.pdf")
Python
from fpdf import FPDF from html import HTMLMixin from template import Template
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Creates a taskbar icon for web2py # # Author: Mark Larsen, mostly stolen from Mark Hammond's # # C:\Python25\Lib\site-packages\win32\Demos\win32gui_taskbar.py # # 11/7/08 # dual licensed under the web2py license (LGPL) and the Python license. import os import sys import base64 import win32con import win32api import win32gui class TaskBarIcon: def __init__(self, iconPath=None): self.iconPath = iconPath self.status = [] msg_TaskbarRestart = \ win32api.RegisterWindowMessage('TaskbarCreated') message_map = { msg_TaskbarRestart: self.OnRestart, win32con.WM_DESTROY: self.OnDestroy, win32con.WM_COMMAND: self.OnCommand, win32con.WM_USER + 20: self.OnTaskbarNotify, } # Register the Window class. wc = win32gui.WNDCLASS() hinst = wc.hInstance = win32api.GetModuleHandle(None) wc.lpszClassName = 'web2pyTaskbar' wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW) wc.hbrBackground = win32con.COLOR_WINDOW wc.lpfnWndProc = message_map # could also specify a wndproc. classAtom = win32gui.RegisterClass(wc) # Create the Window. style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU self.hwnd = win32gui.CreateWindow( classAtom, 'web2pyTaskbar', style, 0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, 0, 0, hinst, None, ) win32gui.UpdateWindow(self.hwnd) self.SetServerStopped() def __createIcon(self): # try and use custom icon if self.iconPath and os.path.isfile(self.iconPath): hicon = self.__loadFromFile(self.iconPath) else: try: fp = 'tmp.ico' icFH = file(fp, 'wb') if self.serverState == self.EnumServerState.STOPPED: icFH.write(base64.b64decode(self.__getIconStopped())) elif self.serverState == self.EnumServerState.RUNNING: icFH.write(base64.b64decode(self.__getIconRunning())) icFH.close() hicon = self.__loadFromFile(fp) os.unlink(fp) except: print "Can't load web2py icons - using default" hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION) flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE\ | win32gui.NIF_TIP nid = ( self.hwnd, 0, flags, win32con.WM_USER + 20, hicon, 'web2py Framework', ) try: win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, nid) except: try: win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid) except win32api.error: # This is common when windows is starting, and this code is hit # before the taskbar has been created. print 'Failed to add the taskbar icon - is explorer running?' # but keep running anyway - when explorer starts, we get the def OnRestart( self, hwnd, msg, wparam, lparam, ): self._DoCreateIcons() def OnDestroy( self, hwnd, msg, wparam, lparam, ): nid = (self.hwnd, 0) win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid) def OnTaskbarNotify( self, hwnd, msg, wparam, lparam, ): if lparam == win32con.WM_LBUTTONUP: pass elif lparam == win32con.WM_LBUTTONDBLCLK: pass elif lparam == win32con.WM_RBUTTONUP: menu = win32gui.CreatePopupMenu() win32gui.AppendMenu(menu, win32con.MF_STRING, 1023, 'Toggle Display') win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '') if self.serverState == self.EnumServerState.STOPPED: win32gui.AppendMenu(menu, win32con.MF_STRING, 1024, 'Start Server') win32gui.AppendMenu(menu, win32con.MF_STRING | win32con.MF_GRAYED, 1025, 'Restart Server') win32gui.AppendMenu(menu, win32con.MF_STRING | win32con.MF_GRAYED, 1026, 'Stop Server') else: win32gui.AppendMenu(menu, win32con.MF_STRING | win32con.MF_GRAYED, 1024, 'Start Server') win32gui.AppendMenu(menu, win32con.MF_STRING, 1025, 'Restart Server') win32gui.AppendMenu(menu, win32con.MF_STRING, 1026, 'Stop Server') win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '') win32gui.AppendMenu(menu, win32con.MF_STRING, 1027, 'Quit (pid:%i)' % os.getpid()) pos = win32gui.GetCursorPos() # See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp win32gui.SetForegroundWindow(self.hwnd) win32gui.TrackPopupMenu( menu, win32con.TPM_LEFTALIGN, pos[0], pos[1], 0, self.hwnd, None, ) win32api.PostMessage(self.hwnd, win32con.WM_NULL, 0, 0) return 1 def OnCommand( self, hwnd, msg, wparam, lparam, ): id = win32api.LOWORD(wparam) if id == 1023: self.status.append(self.EnumStatus.TOGGLE) elif id == 1024: self.status.append(self.EnumStatus.START) elif id == 1025: self.status.append(self.EnumStatus.RESTART) elif id == 1026: self.status.append(self.EnumStatus.STOP) elif id == 1027: self.status.append(self.EnumStatus.QUIT) self.Destroy() else: print 'Unknown command -', id def Destroy(self): win32gui.DestroyWindow(self.hwnd) def SetServerRunning(self): self.serverState = self.EnumServerState.RUNNING self.__createIcon() def SetServerStopped(self): self.serverState = self.EnumServerState.STOPPED self.__createIcon() def __getIconRunning(self): return 'AAABAAEAEBAQAAAAAAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAgAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABERAgAAIAAAEAACAAAgAAABEAIiACIgAAABAgAgIAIAEAECACAgAgABEAIiACACAAAAAAAAAAAAICACIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAAAAAAAAAAAD//wAAhe8AAL3vAADMYwAA9a0AALWtAADMbQAA//8AAKwjAABV7QAAVe0AAFQjAABV7QAAVe0AAFQjAAD//wAA' def __getIconStopped(self): return 'AAABAAEAEBAQAAEABAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJCdIAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzMzMzMzMzAwERMjMzIzAzEDMyMzMjAzMxAzIiMyAjMzMwMjMjAzIzEzECMyAjMjMxEzAiAyMyMzMzMwAzMzMzIyMyACMiIzIyMjAzAyMyMjIyAjMwIzIyMjAyIiMCIzIyAjIzMyAyMjAyMjMzIwIyAjIyIiMiIDAzMzMzMzMzB//gAAhe0AAJ3rAADMYwAA9a0AALGNAADMLQAA/n8AAKwjAABVrQAAUc0AAFQjAABF5QAAVekAABQhAAB//gAA' def __loadFromFile(self, iconPath): hinst = win32api.GetModuleHandle(None) icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE hicon = win32gui.LoadImage( hinst, iconPath, win32con.IMAGE_ICON, 0, 0, icon_flags, ) return hicon class EnumStatus: TOGGLE = 0 START = 1 STOP = 2 RESTART = 3 QUIT = 4 class EnumServerState: RUNNING = 0 STOPPED = 1
Python
""" AIM class to credit card payment with authorize.net Fork of authnet code written by John Conde http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/ Unkown license, assuming public domain Modifed by Massimo Di Pierro - ported from Python 3.x run on Python 2.4+ - fixed a couple of bugs - merged with test so single file - namedtuple from http://code.activestate.com/recipes/500261/ """ __all__ = ['AIM'] from operator import itemgetter import urllib _known_tuple_types = {} class NamedTupleBase(tuple): """Base class for named tuples with the __new__ operator set, named tuples yielded by the namedtuple() function will subclass this and add properties.""" def __new__(cls, *args, **kws): """Create a new instance of this fielded tuple""" # May need to unpack named field values here if kws: values = list(args) + [None]*(len(cls._fields) - len(args)) fields = dict((val, idx) for idx, val in enumerate(cls._fields)) for kw, val in kws.iteritems(): assert kw in kws, "%r not in field list" % kw values[fields[kw]] = val args = tuple(values) return tuple.__new__(cls, args) def namedtuple(typename, fieldnames): """ >>> import namedtuples >>> tpl = namedtuples.namedtuple(['a', 'b', 'c']) >>> tpl(1, 2, 3) (1, 2, 3) >>> tpl(1, 2, 3).b 2 >>> tpl(c=1, a=2, b=3) (2, 3, 1) >>> tpl(c=1, a=2, b=3).b 3 >>> tpl(c='pads with nones') (None, None, 'pads with nones') >>> tpl(b='pads with nones') (None, 'pads with nones', None) >>> """ # Split up a string, some people do this if isinstance(fieldnames, basestring): fieldnames = fieldnames.replace(',', ' ').split() # Convert anything iterable that enumerates fields to a tuple now fieldname_tuple = tuple(str(field) for field in fieldnames) # See if we've cached this if fieldname_tuple in _known_tuple_types: return _known_tuple_types[fieldname_tuple] # Make the type new_tuple_type = type(typename, (NamedTupleBase,), {}) # Set the hidden field new_tuple_type._fields = fieldname_tuple # Add the getters for i, field in enumerate(fieldname_tuple): setattr(new_tuple_type, field, property(itemgetter(i))) # Cache _known_tuple_types[fieldname_tuple] = new_tuple_type # Done return new_tuple_type class AIM: class AIMError(Exception): def __init__(self, value): self.parameter = value def __str__(self): return str(self.parameter) def __init__(self, login, transkey, testmode=False): if str(login).strip() == '' or login == None: raise AIM.AIMError('No login name provided') if str(transkey).strip() == '' or transkey == None: raise AIM.AIMError('No transaction key provided') if testmode != True and testmode != False: raise AIM.AIMError('Invalid value for testmode. Must be True or False. "{0}" given.'.format(testmode)) self.testmode = testmode self.proxy = None; self.delimiter = '|' self.results = [] self.error = True self.success = False self.declined = False self.parameters = {} self.setParameter('x_delim_data', 'true') self.setParameter('x_delim_char', self.delimiter) self.setParameter('x_relay_response', 'FALSE') self.setParameter('x_url', 'FALSE') self.setParameter('x_version', '3.1') self.setParameter('x_method', 'CC') self.setParameter('x_type', 'AUTH_CAPTURE') self.setParameter('x_login', login) self.setParameter('x_tran_key', transkey) def process(self): encoded_args = urllib.urlencode(self.parameters) if self.testmode == True: url = 'https://test.authorize.net/gateway/transact.dll' else: url = 'https://secure.authorize.net/gateway/transact.dll' if self.proxy == None: self.results += str(urllib.urlopen(url, encoded_args).read()).split(self.delimiter) else: opener = urllib.FancyURLopener(self.proxy) opened = opener.open(url, encoded_args) try: self.results += str(opened.read()).split(self.delimiter) finally: opened.close() Results = namedtuple('Results', 'ResultResponse ResponseSubcode ResponseCode ResponseText AuthCode \ AVSResponse TransactionID InvoiceNumber Description Amount PaymentMethod \ TransactionType CustomerID CHFirstName CHLastName Company BillingAddress \ BillingCity BillingState BillingZip BillingCountry Phone Fax Email ShippingFirstName \ ShippingLastName ShippingCompany ShippingAddress ShippingCity ShippingState \ ShippingZip ShippingCountry TaxAmount DutyAmount FreightAmount TaxExemptFlag \ PONumber MD5Hash CVVResponse CAVVResponse') self.response = Results(*tuple(r for r in self.results)[0:40]) if self.getResultResponseFull() == 'Approved': self.error = False self.success = True self.declined = False elif self.getResultResponseFull() == 'Declined': self.error = False self.success = False self.declined = True else: raise AIM.AIMError(self.response.ResponseText) def setTransaction(self, creditcard, expiration, total, cvv=None, tax=None, invoice=None): if str(creditcard).strip() == '' or creditcard == None: raise AIM.AIMError('No credit card number passed to setTransaction(): {0}'.format(creditcard)) if str(expiration).strip() == '' or expiration == None: raise AIM.AIMError('No expiration number to setTransaction(): {0}'.format(expiration)) if str(total).strip() == '' or total == None: raise AIM.AIMError('No total amount passed to setTransaction(): {0}'.format(total)) self.setParameter('x_card_num', creditcard) self.setParameter('x_exp_date', expiration) self.setParameter('x_amount', total) if cvv != None: self.setParameter('x_card_code', cvv) if tax != None: self.setParameter('x_tax', tax) if invoice != None: self.setParameter('x_invoice_num', invoice) def setTransactionType(self, transtype=None): types = ['AUTH_CAPTURE', 'AUTH_ONLY', 'PRIOR_AUTH_CAPTURE', 'CREDIT', 'CAPTURE_ONLY', 'VOID'] if transtype.upper() not in types: raise AIM.AIMError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype)) self.setParameter('x_type', transtype.upper()) def setProxy(self, proxy=None): if str(proxy).strip() == '' or proxy == None: raise AIM.AIMError('No proxy passed to setProxy()') self.proxy = {'http': str(proxy).strip()} def setParameter(self, key=None, value=None): if key != None and value != None and str(key).strip() != '' and str(value).strip() != '': self.parameters[key] = str(value).strip() else: raise AIM.AIMError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value)) def isApproved(self): return self.success def isDeclined(self): return self.declined def isError(self): return self.error def getResultResponseFull(self): responses = ['', 'Approved', 'Declined', 'Error'] return responses[int(self.results[0])] def process(creditcard,expiration,total,cvv=None,tax=None,invoice=None, login='cnpdev4289', transkey='SR2P8g4jdEn7vFLQ',testmode=True): payment = AIM(login,transkey,testmode) expiration = expiration.replace('/','') payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice) try: payment.process() return payment.isApproved() except AIM.AIMError: return False def test(): import socket import sys from time import time creditcard = '4427802641004797' expiration = '122012' total = '1.00' cvv = '123' tax = '0.00' invoice = str(time())[4:10] # get a random invoice number try: payment = AIM('cnpdev4289', 'SR2P8g4jdEn7vFLQ', True) payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice) payment.setParameter('x_duplicate_window', 180) # three minutes duplicate windows payment.setParameter('x_cust_id', '1324') # customer ID payment.setParameter('x_first_name', 'John') payment.setParameter('x_last_name', 'Conde') payment.setParameter('x_company', 'Test Company') payment.setParameter('x_address', '1234 Main Street') payment.setParameter('x_city', 'Townsville') payment.setParameter('x_state', 'NJ') payment.setParameter('x_zip', '12345') payment.setParameter('x_country', 'US') payment.setParameter('x_phone', '800-555-1234') payment.setParameter('x_description', 'Test Transaction') payment.setParameter('x_customer_ip', socket.gethostbyname(socket.gethostname())) payment.setParameter('x_email', 'john@example.com') payment.setParameter('x_email_customer', False) payment.process() if payment.isApproved(): print 'Response Code: ', payment.response.ResponseCode print 'Response Text: ', payment.response.ResponseText print 'Response: ', payment.getResultResponseFull() print 'Transaction ID: ', payment.response.TransactionID print 'CVV Result: ', payment.response.CVVResponse print 'Approval Code: ', payment.response.AuthCode print 'AVS Result: ', payment.response.AVSResponse elif payment.isDeclined(): print 'Your credit card was declined by your bank' elif payment.isError(): raise AIM.AIMError('An uncaught error occurred') except AIM.AIMError, e: print "Exception thrown:", e print 'An error occured' print 'approved',payment.isApproved() print 'declined',payment.isDeclined() print 'error',payment.isError() if __name__=='__main__': test()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """PyRSS2Gen - A Python library for generating RSS 2.0 feeds.""" __name__ = 'PyRSS2Gen' __version__ = (1, 0, 0) __author__ = 'Andrew Dalke <dalke@dalkescientific.com>' _generator_name = __name__ + '-' + '.'.join(map(str, __version__)) import datetime import cStringIO # Could make this the base class; will need to add 'publish' class WriteXmlMixin: def write_xml(self, outfile, encoding='iso-8859-1'): from xml.sax import saxutils handler = saxutils.XMLGenerator(outfile, encoding) handler.startDocument() self.publish(handler) handler.endDocument() def to_xml(self, encoding='iso-8859-1'): try: import cStringIO as StringIO except ImportError: import StringIO f = StringIO.StringIO() self.write_xml(f, encoding) return f.getvalue() def _element( handler, name, obj, d={}, ): if isinstance(obj, basestring) or obj is None: # special-case handling to make the API easier # to use for the common case. handler.startElement(name, d) if obj is not None: handler.characters(obj) handler.endElement(name) else: # It better know how to emit the correct XML. obj.publish(handler) def _opt_element(handler, name, obj): if obj is None: return _element(handler, name, obj) def _format_date(dt): """convert a datetime into an RFC 822 formatted date Input date must be in GMT. """ # Looks like: # Sat, 07 Sep 2002 00:00:01 GMT # Can't use strftime because that's locale dependent # # Isn't there a standard way to do this for Python? The # rfc822 and email.Utils modules assume a timestamp. The # following is based on the rfc822 module. return '%s, %02d %s %04d %02d:%02d:%02d GMT' % ( [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', ][dt.weekday()], dt.day, [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', ][dt.month - 1], dt.year, dt.hour, dt.minute, dt.second, ) ## # A couple simple wrapper objects for the fields which # take a simple value other than a string. class IntElement: """implements the 'publish' API for integers Takes the tag name and the integer value to publish. (Could be used for anything which uses str() to be published to text for XML.) """ element_attrs = {} def __init__(self, name, val): self.name = name self.val = val def publish(self, handler): handler.startElement(self.name, self.element_attrs) handler.characters(str(self.val)) handler.endElement(self.name) class DateElement: """implements the 'publish' API for a datetime.datetime Takes the tag name and the datetime to publish. Converts the datetime to RFC 2822 timestamp (4-digit year). """ def __init__(self, name, dt): self.name = name self.dt = dt def publish(self, handler): _element(handler, self.name, _format_date(self.dt)) # ### class Category: """Publish a category element""" def __init__(self, category, domain=None): self.category = category self.domain = domain def publish(self, handler): d = {} if self.domain is not None: d['domain'] = self.domain _element(handler, 'category', self.category, d) class Cloud: """Publish a cloud""" def __init__( self, domain, port, path, registerProcedure, protocol, ): self.domain = domain self.port = port self.path = path self.registerProcedure = registerProcedure self.protocol = protocol def publish(self, handler): _element(handler, 'cloud', None, { 'domain': self.domain, 'port': str(self.port), 'path': self.path, 'registerProcedure': self.registerProcedure, 'protocol': self.protocol, }) class Image: """Publish a channel Image""" element_attrs = {} def __init__( self, url, title, link, width=None, height=None, description=None, ): self.url = url self.title = title self.link = link self.width = width self.height = height self.description = description def publish(self, handler): handler.startElement('image', self.element_attrs) _element(handler, 'url', self.url) _element(handler, 'title', self.title) _element(handler, 'link', self.link) width = self.width if isinstance(width, int): width = IntElement('width', width) _opt_element(handler, 'width', width) height = self.height if isinstance(height, int): height = IntElement('height', height) _opt_element(handler, 'height', height) _opt_element(handler, 'description', self.description) handler.endElement('image') class Guid: """Publish a guid Defaults to being a permalink, which is the assumption if it's omitted. Hence strings are always permalinks. """ def __init__(self, guid, isPermaLink=1): self.guid = guid self.isPermaLink = isPermaLink def publish(self, handler): d = {} if self.isPermaLink: d['isPermaLink'] = 'true' else: d['isPermaLink'] = 'false' _element(handler, 'guid', self.guid, d) class TextInput: """Publish a textInput Apparently this is rarely used. """ element_attrs = {} def __init__( self, title, description, name, link, ): self.title = title self.description = description self.name = name self.link = link def publish(self, handler): handler.startElement('textInput', self.element_attrs) _element(handler, 'title', self.title) _element(handler, 'description', self.description) _element(handler, 'name', self.name) _element(handler, 'link', self.link) handler.endElement('textInput') class Enclosure: """Publish an enclosure""" def __init__( self, url, length, type, ): self.url = url self.length = length self.type = type def publish(self, handler): _element(handler, 'enclosure', None, {'url': self.url, 'length': str(self.length), 'type': self.type}) class Source: """Publish the item's original source, used by aggregators""" def __init__(self, name, url): self.name = name self.url = url def publish(self, handler): _element(handler, 'source', self.name, {'url': self.url}) class SkipHours: """Publish the skipHours This takes a list of hours, as integers. """ element_attrs = {} def __init__(self, hours): self.hours = hours def publish(self, handler): if self.hours: handler.startElement('skipHours', self.element_attrs) for hour in self.hours: _element(handler, 'hour', str(hour)) handler.endElement('skipHours') class SkipDays: """Publish the skipDays This takes a list of days as strings. """ element_attrs = {} def __init__(self, days): self.days = days def publish(self, handler): if self.days: handler.startElement('skipDays', self.element_attrs) for day in self.days: _element(handler, 'day', day) handler.endElement('skipDays') class RSS2(WriteXmlMixin): """The main RSS class. Stores the channel attributes, with the \"category\" elements under \".categories\" and the RSS items under \".items\". """ rss_attrs = {'version': '2.0'} element_attrs = {} def __init__( self, title, link, description, language=None, copyright=None, managingEditor=None, webMaster=None, pubDate=None, lastBuildDate=None, categories=None, generator=_generator_name, docs='http://blogs.law.harvard.edu/tech/rss', cloud=None, ttl=None, image=None, rating=None, textInput=None, skipHours=None, skipDays=None, items=None, ): self.title = title self.link = link self.description = description self.language = language self.copyright = copyright self.managingEditor = managingEditor self.webMaster = webMaster self.pubDate = pubDate self.lastBuildDate = lastBuildDate if categories is None: categories = [] self.categories = categories self.generator = generator self.docs = docs self.cloud = cloud self.ttl = ttl self.image = image self.rating = rating self.textInput = textInput self.skipHours = skipHours self.skipDays = skipDays if items is None: items = [] self.items = items def publish(self, handler): handler.startElement('rss', self.rss_attrs) handler.startElement('channel', self.element_attrs) _element(handler, 'title', self.title) _element(handler, 'link', self.link) _element(handler, 'description', self.description) self.publish_extensions(handler) _opt_element(handler, 'language', self.language) _opt_element(handler, 'copyright', self.copyright) _opt_element(handler, 'managingEditor', self.managingEditor) _opt_element(handler, 'webMaster', self.webMaster) pubDate = self.pubDate if isinstance(pubDate, datetime.datetime): pubDate = DateElement('pubDate', pubDate) _opt_element(handler, 'pubDate', pubDate) lastBuildDate = self.lastBuildDate if isinstance(lastBuildDate, datetime.datetime): lastBuildDate = DateElement('lastBuildDate', lastBuildDate) _opt_element(handler, 'lastBuildDate', lastBuildDate) for category in self.categories: if isinstance(category, basestring): category = Category(category) category.publish(handler) _opt_element(handler, 'generator', self.generator) _opt_element(handler, 'docs', self.docs) if self.cloud is not None: self.cloud.publish(handler) ttl = self.ttl if isinstance(self.ttl, int): ttl = IntElement('ttl', ttl) _opt_element(handler, 'tt', ttl) if self.image is not None: self.image.publish(handler) _opt_element(handler, 'rating', self.rating) if self.textInput is not None: self.textInput.publish(handler) if self.skipHours is not None: self.skipHours.publish(handler) if self.skipDays is not None: self.skipDays.publish(handler) for item in self.items: item.publish(handler) handler.endElement('channel') handler.endElement('rss') def publish_extensions(self, handler): # Derived classes can hook into this to insert # output after the three required fields. pass class RSSItem(WriteXmlMixin): """Publish an RSS Item""" element_attrs = {} def __init__( self, title=None, link=None, description=None, author=None, categories=None, comments=None, enclosure=None, guid=None, pubDate=None, source=None, ): if title is None and description is None: raise TypeError( "RSSItem must define at least one of 'title' or 'description'") self.title = title self.link = link self.description = description self.author = author if categories is None: categories = [] self.categories = categories self.comments = comments self.enclosure = enclosure self.guid = guid self.pubDate = pubDate self.source = source # It sure does get tedious typing these names three times... def publish(self, handler): handler.startElement('item', self.element_attrs) _opt_element(handler, 'title', self.title) _opt_element(handler, 'link', self.link) self.publish_extensions(handler) _opt_element(handler, 'description', self.description) _opt_element(handler, 'author', self.author) for category in self.categories: if isinstance(category, basestring): category = Category(category) category.publish(handler) _opt_element(handler, 'comments', self.comments) if self.enclosure is not None: self.enclosure.publish(handler) _opt_element(handler, 'guid', self.guid) pubDate = self.pubDate if isinstance(pubDate, datetime.datetime): pubDate = DateElement('pubDate', pubDate) _opt_element(handler, 'pubDate', pubDate) if self.source is not None: self.source.publish(handler) handler.endElement('item') def publish_extensions(self, handler): # Derived classes can hook into this to insert # output after the title and link elements pass def dumps(rss, encoding='utf-8'): s = cStringIO.StringIO() rss.write_xml(s, encoding) return s.getvalue() def test(): rss = RSS2(title='web2py feed', link='http://www.web2py.com', description='About web2py', lastBuildDate=datetime.datetime.now(), items=[RSSItem(title='web2py and PyRSS2Gen-0.0', link='http://www.web2py.com/examples/simple_examples/getrss', description='web2py can now make rss feeds!', guid=Guid('http://www.web2py.com/'), pubDate=datetime.datetime(2007, 11, 14, 10, 30))]) return dumps(rss) if __name__ == '__main__': print test()
Python
#!/usr/bin/python # -*- coding: latin-1 -*- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. "Simple SOAP Server implementation" __author__ = "Mariano Reingart (reingart@gmail.com)" __copyright__ = "Copyright (C) 2010 Mariano Reingart" __license__ = "LGPL 3.0" __version__ = "1.02c" from simplexml import SimpleXMLElement, TYPE_MAP, DateTime, Date, Decimal DEBUG = False class SoapDispatcher(object): "Simple Dispatcher for SOAP Server" def __init__(self, name, documentation='', action='', location='', namespace=None, prefix=False, soap_uri="http://schemas.xmlsoap.org/soap/envelope/", soap_ns='soap', **kwargs): self.methods = {} self.name = name self.documentation = documentation self.action = action # base SoapAction self.location = location self.namespace = namespace # targetNamespace self.prefix = prefix self.soap_ns = soap_ns self.soap_uri = soap_uri def register_function(self, name, fn, returns=None, args=None, doc=None): self.methods[name] = fn, returns, args, doc or getattr(fn,"__doc__","") def dispatch(self, xml, action=None): "Receive and proccess SOAP call" # default values: prefix = self.prefix ret = fault = None soap_ns, soap_uri = self.soap_ns, self.soap_uri soap_fault_code = 'VersionMismatch' try: request = SimpleXMLElement(xml, namespace=self.namespace) # detect soap prefix and uri (xmlns attributes of Envelope) for k, v in request[:]: if v in ("http://schemas.xmlsoap.org/soap/envelope/", "http://www.w3.org/2003/05/soap-env",): soap_ns = request.attributes()[k].localName soap_uri = request.attributes()[k].value soap_fault_code = 'Client' # parse request message and get local method method = request('Body', ns=soap_uri).children()(0) if action: # method name = action name = action[len(self.action)+1:-1] prefix = self.prefix if not action or not name: # method name = input message name name = method.get_local_name() prefix = method.get_prefix() if DEBUG: print "dispatch method", name function, returns_types, args_types, doc = self.methods[name] # de-serialize parameters (if type definitions given) if args_types: args = method.children().unmarshall(args_types) elif args_types is None: args = {'request':method} # send raw request else: args = {} # no parameters soap_fault_code = 'Server' # execute function ret = function(**args) if DEBUG: print ret except Exception, e: import sys etype, evalue, etb = sys.exc_info() if DEBUG: import traceback detail = ''.join(traceback.format_exception(etype, evalue, etb)) detail += '\n\nXML REQUEST\n\n' + xml else: detail = None fault = {'faultcode': "%s.%s" % (soap_fault_code, etype.__name__), 'faultstring': unicode(evalue), 'detail': detail} # build response message if not prefix: xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>""" else: xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" xmlns:%(prefix)s="%(namespace)s"/>""" xml = xml % {'namespace': self.namespace, 'prefix': prefix, 'soap_ns': soap_ns, 'soap_uri': soap_uri} response = SimpleXMLElement(xml, namespace=self.namespace, prefix=prefix) response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance" response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema" body = response.add_child("%s:Body" % soap_ns, ns=False) if fault: # generate a Soap Fault (with the python exception) body.marshall("%s:Fault" % soap_ns, fault, ns=False) else: # return normal value res = body.add_child("%sResponse" % name, ns=prefix) if not prefix: res['xmlns'] = self.namespace # add target namespace # serialize returned values (response) if type definition available if returns_types: if not isinstance(ret, dict): res.marshall(returns_types.keys()[0], ret, ) else: for k,v in ret.items(): res.marshall(k, v) elif returns_types is None: # merge xmlelement returned res.import_node(ret) return response.as_xml() # Introspection functions: def list_methods(self): "Return a list of aregistered operations" return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()] def help(self, method=None): "Generate sample request and response messages" (function, returns, args, doc) = self.methods[method] xml = """ <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body> </soap:Envelope>""" % {'method':method, 'namespace':self.namespace} request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix) if args: items = args.items() elif args is None: items = [('value', None)] else: items = [] for k,v in items: request(method).marshall(k, v, add_comments=True, ns=False) xml = """ <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body><%(method)sResponse xmlns="%(namespace)s"/></soap:Body> </soap:Envelope>""" % {'method':method, 'namespace':self.namespace} response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix) if returns: items = returns.items() elif args is None: items = [('value', None)] else: items = [] for k,v in items: response('%sResponse'%method).marshall(k, v, add_comments=True, ns=False) return request.as_xml(pretty=True), response.as_xml(pretty=True), doc def wsdl(self): "Generate Web Service Description v1.1" xml = """<?xml version="1.0"?> <wsdl:definitions name="%(name)s" targetNamespace="%(namespace)s" xmlns:tns="%(namespace)s" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation> <wsdl:types> <xsd:schema targetNamespace="%(namespace)s" elementFormDefault="qualified" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> </xsd:schema> </wsdl:types> </wsdl:definitions> """ % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation} wsdl = SimpleXMLElement(xml) for method, (function, returns, args, doc) in self.methods.items(): # create elements: def parse_element(name, values, array=False, complex=False): if not complex: element = wsdl('wsdl:types')('xsd:schema').add_child('xsd:element') complex = element.add_child("xsd:complexType") else: complex = wsdl('wsdl:types')('xsd:schema').add_child('xsd:complexType') element = complex element['name'] = name if values: items = values elif values is None: items = [('value', None)] else: items = [] if not array and items: all = complex.add_child("xsd:all") elif items: all = complex.add_child("xsd:sequence") for k,v in items: e = all.add_child("xsd:element") e['name'] = k if array: e[:]={'minOccurs': "0", 'maxOccurs': "unbounded"} if v in TYPE_MAP.keys(): t='xsd:%s' % TYPE_MAP[v] elif v is None: t='xsd:anyType' elif isinstance(v, list): n="ArrayOf%s%s" % (name, k) l = [] for d in v: l.extend(d.items()) parse_element(n, l, array=True, complex=True) t = "tns:%s" % n elif isinstance(v, dict): n="%s%s" % (name, k) parse_element(n, v.items(), complex=True) t = "tns:%s" % n e.add_attribute('type', t) parse_element("%s" % method, args and args.items()) parse_element("%sResponse" % method, returns and returns.items()) # create messages: for m,e in ('Input',''), ('Output','Response'): message = wsdl.add_child('wsdl:message') message['name'] = "%s%s" % (method, m) part = message.add_child("wsdl:part") part[:] = {'name': 'parameters', 'element': 'tns:%s%s' % (method,e)} # create ports portType = wsdl.add_child('wsdl:portType') portType['name'] = "%sPortType" % self.name for method, (function, returns, args, doc) in self.methods.items(): op = portType.add_child('wsdl:operation') op['name'] = method if doc: op.add_child("wsdl:documentation", doc) input = op.add_child("wsdl:input") input['message'] = "tns:%sInput" % method output = op.add_child("wsdl:output") output['message'] = "tns:%sOutput" % method # create bindings binding = wsdl.add_child('wsdl:binding') binding['name'] = "%sBinding" % self.name binding['type'] = "tns:%sPortType" % self.name soapbinding = binding.add_child('soap:binding') soapbinding['style'] = "document" soapbinding['transport'] = "http://schemas.xmlsoap.org/soap/http" for method in self.methods.keys(): op = binding.add_child('wsdl:operation') op['name'] = method soapop = op.add_child('soap:operation') soapop['soapAction'] = self.action + method soapop['style'] = 'document' input = op.add_child("wsdl:input") ##input.add_attribute('name', "%sInput" % method) soapbody = input.add_child("soap:body") soapbody["use"] = "literal" output = op.add_child("wsdl:output") ##output.add_attribute('name', "%sOutput" % method) soapbody = output.add_child("soap:body") soapbody["use"] = "literal" service = wsdl.add_child('wsdl:service') service["name"] = "%sService" % self.name service.add_child('wsdl:documentation', text=self.documentation) port=service.add_child('wsdl:port') port["name"] = "%s" % self.name port["binding"] = "tns:%sBinding" % self.name soapaddress = port.add_child('soap:address') soapaddress["location"] = self.location return wsdl.as_xml(pretty=True) from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer class SOAPHandler(BaseHTTPRequestHandler): def do_GET(self): "User viewable help information and wsdl" args = self.path[1:].split("?") print "serving", args if self.path != "/" and args[0] not in self.server.dispatcher.methods.keys(): self.send_error(404, "Method not found: %s" % args[0]) else: if self.path == "/": # return wsdl if no method supplied response = self.server.dispatcher.wsdl() else: # return supplied method help (?request or ?response messages) req, res, doc = self.server.dispatcher.help(args[0]) if len(args)==1 or args[1]=="request": response = req else: response = res self.send_response(200) self.send_header("Content-type", "text/xml") self.end_headers() self.wfile.write(response) def do_POST(self): "SOAP POST gateway" self.send_response(200) self.send_header("Content-type", "text/xml") self.end_headers() request = self.rfile.read(int(self.headers.getheader('content-length'))) response = self.server.dispatcher.dispatch(request) self.wfile.write(response) if __name__=="__main__": import sys dispatcher = SoapDispatcher( name = "PySimpleSoapSample", location = "http://localhost:8008/", action = 'http://localhost:8008/', # SOAPAction namespace = "http://example.com/pysimplesoapsamle/", prefix="ns0", documentation = 'Example soap service using PySimpleSoap', trace = True, ns = True) def adder(p,c, dt=None): "Add several values" print c[0]['d'],c[1]['d'], import datetime dt = dt + datetime.timedelta(365) return {'ab': p['a']+p['b'], 'dd': c[0]['d']+c[1]['d'], 'dt': dt} def dummy(in0): "Just return input" return in0 def echo(request): "Copy request->response (generic, any type)" return request.value dispatcher.register_function('Adder', adder, returns={'AddResult': {'ab': int, 'dd': str } }, args={'p': {'a': int,'b': int}, 'dt': Date, 'c': [{'d': Decimal}]}) dispatcher.register_function('Dummy', dummy, returns={'out0': str}, args={'in0': str}) dispatcher.register_function('Echo', echo) if '--local' in sys.argv: wsdl=dispatcher.wsdl() print wsdl testfile = open("C:/test.wsdl","w") try: testfile.write(wsdl) finally: testfile.close() # dummy local test (clasic soap dialect) xml = """<?xml version="1.0" encoding="UTF-8"?> <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <Adder xmlns="http://example.com/sample.wsdl"> <p><a>1</a><b>2</b></p><c><d>5000000.1</d><d>.2</d></c><dt>20100724</dt> </Adder> </soap:Body> </soap:Envelope>""" print dispatcher.dispatch(xml) # dummy local test (modern soap dialect, SoapUI) xml = """ <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:pys="http://example.com/pysimplesoapsamle/"> <soapenv:Header/> <soapenv:Body> <pys:Adder> <pys:p><pys:a>9</pys:a><pys:b>3</pys:b></pys:p> <pys:dt>19690720<!--1969-07-20T21:28:00--></pys:dt> <pys:c><pys:d>10.001</pys:d><pys:d>5.02</pys:d></pys:c> </pys:Adder> </soapenv:Body> </soapenv:Envelope> """ print dispatcher.dispatch(xml) # echo local test (generic soap service) xml = """<?xml version="1.0" encoding="UTF-8"?> <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <soap:Body> <Echo xmlns="http://example.com/sample.wsdl"> <value xsi:type="xsd:string">Hello world</value> </Echo> </soap:Body> </soap:Envelope>""" print dispatcher.dispatch(xml) for method, doc in dispatcher.list_methods(): request, response, doc = dispatcher.help(method) ##print request ##print response if '--serve' in sys.argv: print "Starting server..." httpd = HTTPServer(("", 8008), SOAPHandler) httpd.dispatcher = dispatcher httpd.serve_forever() if '--consume' in sys.argv: from client import SoapClient client = SoapClient( location = "http://localhost:8008/", action = 'http://localhost:8008/', # SOAPAction namespace = "http://example.com/sample.wsdl", soap_ns='soap', trace = True, ns = False) response = client.Adder(p={'a':1,'b':2},dt='20100724',c=[{'d':'1.20'},{'d':'2.01'}]) result = response.AddResult print int(result.ab) print str(result.dd)
Python
#!/usr/bin/python # -*- coding: latin-1 -*- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. "Pythonic simple SOAP Client implementation" __author__ = "Mariano Reingart (reingart@gmail.com)" __copyright__ = "Copyright (C) 2008 Mariano Reingart" __license__ = "LGPL 3.0" __version__ = "1.02c" import urllib try: import httplib2 Http = httplib2.Http except ImportError: import urllib2 class Http(): # wrapper to use when httplib2 not available def request(self, url, method, body, headers): f = urllib2.urlopen(urllib2.Request(url, body, headers)) return f.info(), f.read() from simplexml import SimpleXMLElement, TYPE_MAP, OrderedDict class SoapFault(RuntimeError): def __init__(self,faultcode,faultstring): self.faultcode = faultcode self.faultstring = faultstring # soap protocol specification & namespace soap_namespaces = dict( soap11="http://schemas.xmlsoap.org/soap/envelope/", soap="http://schemas.xmlsoap.org/soap/envelope/", soapenv="http://schemas.xmlsoap.org/soap/envelope/", soap12="http://www.w3.org/2003/05/soap-env", ) class SoapClient(object): "Simple SOAP Client (s�mil PHP)" def __init__(self, location = None, action = None, namespace = None, cert = None, trace = False, exceptions = True, proxy = None, ns=False, soap_ns=None, wsdl = None, cache = False): self.certssl = cert self.keyssl = None self.location = location # server location (url) self.action = action # SOAP base action self.namespace = namespace # message self.trace = trace # show debug messages self.exceptions = exceptions # lanzar execpiones? (Soap Faults) self.xml_request = self.xml_response = '' if not soap_ns and not ns: self.__soap_ns = 'soap' # 1.1 elif not soap_ns and ns: self.__soap_ns = 'soapenv' # 1.2 else: self.__soap_ns = soap_ns # parse wsdl url self.services = wsdl and self.wsdl(wsdl, debug=trace, cache=cache) self.service_port = None # service port for late binding if not proxy: self.http = Http() else: import socks ##httplib2.debuglevel=4 self.http = httplib2.Http(proxy_info = httplib2.ProxyInfo( proxy_type=socks.PROXY_TYPE_HTTP, **proxy)) #if self.certssl: # esto funciona para validar al server? # self.http.add_certificate(self.keyssl, self.keyssl, self.certssl) self.__ns = ns # namespace prefix or False to not use it if not ns: self.__xml = """<?xml version="1.0" encoding="UTF-8"?> <%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:%(soap_ns)s="%(soap_uri)s"> <%(soap_ns)s:Body> <%(method)s xmlns="%(namespace)s"> </%(method)s> </%(soap_ns)s:Body> </%(soap_ns)s:Envelope>""" else: self.__xml = """<?xml version="1.0" encoding="UTF-8"?> <%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" xmlns:%(ns)s="%(namespace)s"> <%(soap_ns)s:Header/> <%(soap_ns)s:Body> <%(ns)s:%(method)s> </%(ns)s:%(method)s> </%(soap_ns)s:Body> </%(soap_ns)s:Envelope>""" def __getattr__(self, attr): "Return a pseudo-method that can be called" if not self.services: # not using WSDL? return lambda self=self, *args, **kwargs: self.call(attr,*args,**kwargs) else: # using WSDL: return lambda self=self, *args, **kwargs: self.wsdl_call(attr,*args,**kwargs) def call(self, method, *args, **kwargs): "Prepare xml request and make SOAP call, returning a SimpleXMLElement" #TODO: method != input_message # Basic SOAP request: xml = self.__xml % dict(method=method, namespace=self.namespace, ns=self.__ns, soap_ns=self.__soap_ns, soap_uri=soap_namespaces[self.__soap_ns]) request = SimpleXMLElement(xml,namespace=self.__ns and self.namespace, prefix=self.__ns) # serialize parameters if kwargs: parameters = kwargs.items() else: parameters = args if parameters and isinstance(parameters[0], SimpleXMLElement): # merge xmlelement parameter ("raw" - already marshalled) for param in parameters[0].children(): getattr(request,method).import_node(param) else: # marshall parameters: for k,v in parameters: # dict: tag=valor getattr(request,method).marshall(k,v) self.xml_request = request.as_xml() self.xml_response = self.send(method, self.xml_request) response = SimpleXMLElement(self.xml_response, namespace=self.namespace) if self.exceptions and response("Fault", ns=soap_namespaces.values(), error=False): raise SoapFault(unicode(response.faultcode), unicode(response.faultstring)) return response def send(self, method, xml): "Send SOAP request using HTTP" if self.location == 'test': return location = "%s" % self.location #?op=%s" % (self.location, method) if self.services: soap_action = self.action else: soap_action = self.action+method headers={ 'Content-type': 'text/xml; charset="UTF-8"', 'Content-length': str(len(xml)), "SOAPAction": "\"%s\"" % (soap_action) } if self.trace: print "-"*80 print "POST %s" % location print '\n'.join(["%s: %s" % (k,v) for k,v in headers.items()]) print u"\n%s" % xml.decode("utf8","ignore") response, content = self.http.request( location,"POST", body=xml, headers=headers ) self.response = response self.content = content if self.trace: print print '\n'.join(["%s: %s" % (k,v) for k,v in response.items()]) print content#.decode("utf8","ignore") print "="*80 return content def get_operation(self, method): # try to find operation in wsdl file soap_ver = self.__soap_ns == 'soap12' and 'soap12' or 'soap11' if not self.service_port: for service_name, service in self.services.items(): for port_name, port in [port for port in service['ports'].items()]: if port['soap_ver'] == soap_ver: self.service_port = service_name, port_name break else: raise RuntimeError("Cannot determine service in WSDL: " "SOAP version: %s" % soap_ver) else: port = self.services[self.service_port[0]]['ports'][self.service_port[1]] self.location = port['location'] operation = port['operations'].get(unicode(method)) if not operation: raise RuntimeError("Operation %s not found in WSDL: " "Service/Port Type: %s" % (method, self.service_port)) return operation def wsdl_call(self, method, *args, **kwargs): "Pre and post process SOAP call, input and output parameters using WSDL" soap_uri = soap_namespaces[self.__soap_ns] operation = self.get_operation(method) # get i/o type declarations: input = operation['input'] output = operation['output'] if 'action' in operation: self.action = operation['action'] # sort parameters (same order as xsd:sequence) def sort_dict(od, d): if isinstance(od, dict): ret = OrderedDict() for k in od.keys(): v = d.get(k) if v: if isinstance(v, dict): v = sort_dict(od[k], v) elif isinstance(v, list): v = [sort_dict(od[k][0], v1) for v1 in v] ret[str(k)] = v return ret else: return d if input and kwargs: params = sort_dict(input.values()[0], kwargs).items() method = input.keys()[0] #elif not input: #TODO: no message! (see wsmtxca.dummy) else: params = kwargs and kwargs.items() # call remote procedure response = self.call(method, *params) # parse results: resp = response('Body',ns=soap_uri).children().unmarshall(output) return resp and resp.values()[0] # pass Response tag children def help(self, method): "Return operation documentation and invocation/returned value example" operation = self.get_operation(method) input = operation['input'].values() input = input and input[0] output = operation['output'].values()[0] return u"%s(%s)\n -> %s:\n\n%s" % ( method, input and ", ".join("%s=%s" % (k,repr(v)) for k,v in input.items()) or "", output and output or "", operation.get("documentation",""), ) def wsdl(self, url, debug=False, cache=False): "Parse Web Service Description v1.1" soap_ns = { "http://schemas.xmlsoap.org/wsdl/soap/": 'soap11', "http://schemas.xmlsoap.org/wsdl/soap12/": 'soap12', } wsdl_uri="http://schemas.xmlsoap.org/wsdl/" xsd_uri="http://www.w3.org/2001/XMLSchema" xsi_uri="http://www.w3.org/2001/XMLSchema-instance" get_local_name = lambda s: str((':' in s) and s.split(':')[1] or s) REVERSE_TYPE_MAP = dict([(v,k) for k,v in TYPE_MAP.items()]) def fetch(url): "Fetch a document from a URL, save it locally if cache enabled" import os, hashlib # make md5 hash of the url for caching... filename = "%s.xml" % hashlib.md5(url).hexdigest() if isinstance(cache, basestring): filename = os.path.join(cache, filename) if cache and os.path.exists(filename): if debug: print "Reading file %s" % (filename, ) f = open(filename, "r") xml = f.read() f.close() else: if debug: print "Fetching url %s" % (url, ) f = urllib.urlopen(url) xml = f.read() if cache: if debug: print "Writing file %s" % (filename, ) f = open(filename, "w") f.write(xml) f.close() return xml # Open uri and read xml: xml = fetch(url) # Parse WSDL XML: wsdl = SimpleXMLElement(xml, namespace=wsdl_uri) # detect soap prefix and uri (xmlns attributes of <definitions>) xsd_ns = None soap_uris = {} for k, v in wsdl[:]: if v in soap_ns and k.startswith("xmlns:"): soap_uris[get_local_name(k)] = v if v== xsd_uri and k.startswith("xmlns:"): xsd_ns = get_local_name(k) # Extract useful data: self.namespace = wsdl['targetNamespace'] self.documentation = unicode(wsdl('documentation', error=False) or '') services = {} bindings = {} # binding_name: binding operations = {} # operation_name: operation port_type_bindings = {} # port_type_name: binding messages = {} # message: element elements = {} # element: type def for service in wsdl.service: service_name=service['name'] if not service_name: continue # empty service? if debug: print "Processing service", service_name serv = services.setdefault(service_name, {'ports': {}}) serv['documentation']=service['documentation'] or '' for port in service.port: binding_name = get_local_name(port['binding']) address = port('address', ns=soap_uris.values(), error=False) location = address and address['location'] or None soap_uri = address and soap_uris.get(address.get_prefix()) soap_ver = soap_uri and soap_ns.get(soap_uri) bindings[binding_name] = {'service_name': service_name, 'location': location, 'soap_uri': soap_uri, 'soap_ver': soap_ver, } serv['ports'][port['name']] = bindings[binding_name] for binding in wsdl.binding: binding_name = binding['name'] if debug: print "Processing binding", service_name soap_binding = binding('binding', ns=soap_uris.values(), error=False) transport = soap_binding and soap_binding['transport'] or None port_type_name = get_local_name(binding['type']) bindings[binding_name].update({ 'port_type_name': port_type_name, 'transport': transport, 'operations': {}, }) port_type_bindings[port_type_name] = bindings[binding_name] for operation in binding.operation: op_name = operation['name'] op = operation('operation',ns=soap_uris.values(), error=False) action = op and op['soapAction'] d = operations.setdefault(op_name, {}) bindings[binding_name]['operations'][op_name] = d d.update({'name': op_name}) #if action: #TODO: separe operation_binding from operation if action: d["action"] = action #TODO: cleanup element/schema/types parsing: def process_element(element_name, node): "Parse and define simple element types" if debug: print "Processing element", element_name for tag in node: if tag.get_local_name() in ("annotation", "documentation"): continue elif tag.get_local_name() in ('element', 'restriction'): if debug: print element_name,"has not children!",tag children = tag # element "alias"? alias = True elif tag.children(): children = tag.children() alias = False else: if debug: print element_name,"has not children!",tag continue #TODO: abstract? d = OrderedDict() for e in children: t = e['type'] if not t: t = e['base'] # complexContent (extension)! if not t: t = 'anyType' # no type given! t = t.split(":") if len(t)>1: ns, type_name = t else: ns, type_name = None, t[0] if element_name == type_name: continue # prevent infinite recursion uri = ns and e.get_namespace_uri(ns) or xsd_uri if uri==xsd_uri: # look for the type, None == any fn = REVERSE_TYPE_MAP.get(unicode(type_name), None) else: # complex type, postprocess later fn = elements.setdefault(unicode(type_name), OrderedDict()) if e['name'] is not None and not alias: e_name = unicode(e['name']) d[e_name] = fn else: if debug: print "complexConent/simpleType/element", element_name, "=", type_name d[None] = fn if e['maxOccurs']=="unbounded": # it's an array... TODO: compound arrays? d.array = True if e is not None and e.get_local_name() == 'extension' and e.children(): # extend base element: process_element(element_name, e.children()) elements.setdefault(element_name, OrderedDict()).update(d) # check axis2 namespace at schema types attributes self.namespace = dict(wsdl.types("schema", ns=xsd_uri)[:]).get('targetNamespace', self.namespace) imported_schemas = {} def preprocess_schema(schema): "Find schema elements and complex types" for element in schema.children(): if element.get_local_name() in ('import', ): schema_namespace = element['namespace'] schema_location = element['schemaLocation'] if schema_location is None: if debug: print "Schema location not provided for %s!" % (schema_namespace, ) continue if schema_location in imported_schemas: if debug: print "Schema %s already imported!" % (schema_location, ) continue imported_schemas[schema_location] = schema_namespace if debug: print "Importing schema %s from %s" % (schema_namespace, schema_location) # Open uri and read xml: xml = fetch(schema_location) # Parse imported XML schema (recursively): imported_schema = SimpleXMLElement(xml, namespace=xsd_uri) preprocess_schema(imported_schema) if element.get_local_name() in ('element', 'complexType', "simpleType"): element_name = unicode(element['name']) if debug: print "Parsing Element %s: %s" % (element.get_local_name(),element_name) if element.get_local_name() == 'complexType': children = element.children() elif element.get_local_name() == 'simpleType': children = element("restriction", ns=xsd_uri) elif element.get_local_name() == 'element' and element['type']: children = element else: children = element.children() if children: children = children.children() elif element.get_local_name() == 'element': children = element if children: process_element(element_name, children) def postprocess_element(elements): "Fix unresolved references (elements referenced before its definition, thanks .net)" for k,v in elements.items(): if isinstance(v, OrderedDict): if v.array: elements[k] = [v] # convert arrays to python lists if v!=elements: #TODO: fix recursive elements postprocess_element(v) if None in v and v[None]: # extension base? if isinstance(v[None], dict): for i, kk in enumerate(v[None]): # extend base -keep orginal order- elements[k].insert(kk, v[None][kk], i) del v[None] else: # "alias", just replace if debug: print "Replacing ", k , " = ", v[None] elements[k] = v[None] #break if isinstance(v, list): for n in v: # recurse list postprocess_element(n) # process current wsdl schema: for schema in wsdl.types("schema", ns=xsd_uri): preprocess_schema(schema) postprocess_element(elements) for message in wsdl.message: if debug: print "Processing message", message['name'] part = message('part', error=False) element = {} if part: element_name = part['element'] if not element_name: element_name = part['type'] # some uses type instead element_name = get_local_name(element_name) element = {element_name: elements.get(element_name)} messages[message['name']] = element for port_type in wsdl.portType: port_type_name = port_type['name'] if debug: print "Processing port type", port_type_name binding = port_type_bindings[port_type_name] for operation in port_type.operation: op_name = operation['name'] op = operations[op_name] op['documentation'] = unicode(operation('documentation', error=False) or '') if binding['soap_ver']: #TODO: separe operation_binding from operation (non SOAP?) input = get_local_name(operation.input['message']) output = get_local_name(operation.output['message']) op['input'] = messages[input] op['output'] = messages[output] if debug: import pprint pprint.pprint(services) return services def parse_proxy(proxy_str): "Parses proxy address user:pass@host:port into a dict suitable for httplib2" proxy_dict = {} if proxy_str is None: return if "@" in proxy_str: user_pass, host_port = proxy_str.split("@") else: user_pass, host_port = "", proxy_str if ":" in host_port: host, port = host_port.split(":") proxy_dict['proxy_host'], proxy_dict['proxy_port'] = host, int(port) if ":" in user_pass: proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(":") return proxy_dict if __name__=="__main__": import sys if '--web2py' in sys.argv: # test local sample webservice exposed by web2py from client import SoapClient if not '--wsdl' in sys.argv: client = SoapClient( location = "http://127.0.0.1:8000/webservices/sample/call/soap", action = 'http://127.0.0.1:8000/webservices/sample/call/soap', # SOAPAction namespace = "http://127.0.0.1:8000/webservices/sample/call/soap", soap_ns='soap', trace = True, ns = False, exceptions=True) else: client = SoapClient(wsdl="http://127.0.0.1:8000/webservices/sample/call/soap?WSDL",trace=True) response = client.Dummy() print 'dummy', response response = client.Echo(value='hola') print 'echo', repr(response) response = client.AddIntegers(a=1,b=2) if not '--wsdl' in sys.argv: result = response.AddResult # manully convert returned type print int(result) else: result = response['AddResult'] print result, type(result), "auto-unmarshalled" if '--raw' in sys.argv: # raw (unmarshalled parameter) local sample webservice exposed by web2py from client import SoapClient client = SoapClient( location = "http://127.0.0.1:8000/webservices/sample/call/soap", action = 'http://127.0.0.1:8000/webservices/sample/call/soap', # SOAPAction namespace = "http://127.0.0.1:8000/webservices/sample/call/soap", soap_ns='soap', trace = True, ns = False) params = SimpleXMLElement("""<?xml version="1.0" encoding="UTF-8"?><AddIntegers><a>3</a><b>2</b></AddIntegers>""") # manully convert returned type response = client.call('AddIntegers',params) result = response.AddResult print int(result) # manully convert returned type if '--ctg' in sys.argv: # test AFIP Agriculture webservice client = SoapClient( location = "https://fwshomo.afip.gov.ar/wsctg/services/CTGService", action = 'http://impl.service.wsctg.afip.gov.ar/CTGService/', # SOAPAction namespace = "http://impl.service.wsctg.afip.gov.ar/CTGService/", trace = True, ns = True) response = client.dummy() result = response.dummyResponse print str(result.appserver) print str(result.dbserver) print str(result.authserver) if '--wsfe' in sys.argv: # Demo & Test (AFIP Electronic Invoice): ta_file = open("TA.xml") try: ta_string = ta_file.read() # read access ticket (wsaa.py) finally: ta_file.close() ta = SimpleXMLElement(ta_string) token = str(ta.credentials.token) sign = str(ta.credentials.sign) cuit = long(20267565393) id = 1234 cbte =199 client = SoapClient( location = "https://wswhomo.afip.gov.ar/wsfe/service.asmx", action = 'http://ar.gov.afip.dif.facturaelectronica/', # SOAPAction namespace = "http://ar.gov.afip.dif.facturaelectronica/", trace = True) results = client.FERecuperaQTYRequest( argAuth= {"Token": token, "Sign": sign, "cuit":long(cuit)} ) if int(results.FERecuperaQTYRequestResult.RError.percode) != 0: print "Percode: %s" % results.FERecuperaQTYRequestResult.RError.percode print "MSGerror: %s" % results.FERecuperaQTYRequestResult.RError.perrmsg else: print int(results.FERecuperaQTYRequestResult.qty.value) if '--feriados' in sys.argv: # Demo & Test: Argentina Holidays (Ministerio del Interior): # this webservice seems disabled from datetime import datetime, timedelta client = SoapClient( location = "http://webservices.mininterior.gov.ar/Feriados/Service.svc", action = 'http://tempuri.org/IMyService/', # SOAPAction namespace = "http://tempuri.org/FeriadoDS.xsd", trace = True) dt1 = datetime.today() - timedelta(days=60) dt2 = datetime.today() + timedelta(days=60) feriadosXML = client.FeriadosEntreFechasas_xml(dt1=dt1.isoformat(), dt2=dt2.isoformat()); print feriadosXML if '--wsdl-parse' in sys.argv: client = SoapClient() # Test PySimpleSOAP WSDL client.wsdl("file:C:/test.wsdl", debug=True) # Test Java Axis WSDL: client.wsdl('https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl',debug=True) # Test .NET 2.0 WSDL: client.wsdl('https://wswhomo.afip.gov.ar/wsfe/service.asmx?WSDL',debug=True) client.wsdl('https://wswhomo.afip.gov.ar/wsfex/service.asmx?WSDL',debug=True) client.wsdl('https://testdia.afip.gov.ar/Dia/Ws/wDigDepFiel/wDigDepFiel.asmx?WSDL',debug=True) # Test JBoss WSDL: client.wsdl('https://fwshomo.afip.gov.ar/wsctg/services/CTGService?wsdl',debug=True) client.wsdl('https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl',debug=True) if '--wsdl-client' in sys.argv: client = SoapClient(wsdl='https://wswhomo.afip.gov.ar/wsfex/service.asmx?WSDL',trace=True) results = client.FEXDummy() print results['FEXDummyResult']['AppServer'] print results['FEXDummyResult']['DbServer'] print results['FEXDummyResult']['AuthServer'] ta_file = open("TA.xml") try: ta_string = ta_file.read() # read access ticket (wsaa.py) finally: ta_file.close() ta = SimpleXMLElement(ta_string) token = str(ta.credentials.token) sign = str(ta.credentials.sign) response = client.FEXGetCMP( Auth={"Token": token, "Sign": sign, "Cuit": 20267565393}, Cmp={"Tipo_cbte": 19, "Punto_vta": 1, "Cbte_nro": 1}) result = response['FEXGetCMPResult'] if False: print result if 'FEXErr' in result: print "FEXError:", result['FEXErr']['ErrCode'], result['FEXErr']['ErrCode'] cbt = result['FEXResultGet'] print cbt['Cae'] FEX_event = result['FEXEvents'] print FEX_event['EventCode'], FEX_event['EventMsg'] if '--wsdl-ctg' in sys.argv: client = SoapClient(wsdl='https://fwshomo.afip.gov.ar/wsctg/services/CTGService?wsdl', trace=True, ns = "ctg") results = client.dummy() print results print results['DummyResponse']['appserver'] print results['DummyResponse']['dbserver'] print results['DummyResponse']['authserver'] ta_file = open("TA.xml") try: ta_string = ta_file.read() # read access ticket (wsaa.py) finally: ta_file.close() ta = SimpleXMLElement(ta_string) token = str(ta.credentials.token) sign = str(ta.credentials.sign) print client.help("obtenerProvincias") response = client.obtenerProvincias(auth={"token":token, "sign":sign, "cuitRepresentado":20267565393}) print "response=",response for ret in response: print ret['return']['codigoProvincia'], ret['return']['descripcionProvincia'].encode("latin1") prueba = dict(numeroCartaDePorte=512345678, codigoEspecie=23, cuitRemitenteComercial=20267565393, cuitDestino=20267565393, cuitDestinatario=20267565393, codigoLocalidadOrigen=3058, codigoLocalidadDestino=3059, codigoCosecha='0910', pesoNetoCarga=1000, cantHoras=1, patenteVehiculo='CZO985', cuitTransportista=20267565393, numeroCTG="43816783", transaccion='10000001681', observaciones='', ) response = client.solicitarCTG( auth={"token": token, "sign": sign, "cuitRepresentado": 20267565393}, solicitarCTGRequest= prueba) print response['return']['numeroCTG'] ##print parse_proxy(None) ##print parse_proxy("host:1234") ##print parse_proxy("user:pass@host:1234") ##sys.exit(0)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- "Contributed modules"
Python
#!/usr/bin/python # -*- coding: latin-1 -*- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. "Simple XML manipulation" __author__ = "Mariano Reingart (reingart@gmail.com)" __copyright__ = "Copyright (C) 2008/009 Mariano Reingart" __license__ = "LGPL 3.0" __version__ = "1.02c" import xml.dom.minidom from decimal import Decimal import datetime import time DEBUG = False # Functions to serialize/unserialize special immutable types: datetime_u = lambda s: datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") datetime_m = lambda dt: dt.isoformat('T') date_u = lambda s: datetime.datetime.strptime(s[0:10], "%Y-%m-%d").date() date_m = lambda d: d.strftime("%Y-%m-%d") time_u = lambda s: datetime.datetime.strptime(s, "%H:%M:%S").time() time_m = lambda d: d.strftime("%H%M%S") bool_u = lambda s: {'0':False, 'false': False, '1': True, 'true': True}[s] # aliases: class Alias(): def __init__(self, py_type, xml_type): self.py_type, self.xml_type = py_type, xml_type def __call__(self, value): return self.py_type(value) def __repr__(self): return "<alias '%s' for '%s'>" % (self.xml_type, self.py_type) byte = Alias(str,'byte') short = Alias(int,'short') double = Alias(float,'double') integer = Alias(long,'integer') DateTime = datetime.datetime Date = datetime.date Time = datetime.time # Define convertion function (python type): xml schema type TYPE_MAP = {str:'string',unicode:'string', bool:'boolean', short:'short', byte:'byte', int:'int', long:'long', integer:'integer', float:'float', double:'double', Decimal:'decimal', datetime.datetime:'dateTime', datetime.date:'date', } TYPE_MARSHAL_FN = {datetime.datetime:datetime_m, datetime.date:date_m,} TYPE_UNMARSHAL_FN = {datetime.datetime:datetime_u, datetime.date:date_u, bool:bool_u, } class OrderedDict(dict): "Minimal ordered dictionary for xsd:sequences" def __init__(self): self.__keys = [] self.array = False def __setitem__(self, key, value): if key not in self.__keys: self.__keys.append(key) dict.__setitem__(self, key, value) def insert(self, key, value, index=0): if key not in self.__keys: self.__keys.insert(index, key) dict.__setitem__(self, key, value) def __delitem__(self, key): if key in self.__keys: self.__keys.remove(key) dict.__delitem__(self, key) def __iter__(self): return iter(self.__keys) def keys(self): return self.__keys def items(self): return [(key, self[key]) for key in self.__keys] def update(self, other): for k,v in other.items(): self[k] = v if isinstance(other, OrderedDict): self.array = other.array def __str__(self): return "*%s*" % dict.__str__(self) def __repr__(self): s= "*{%s}*" % ", ".join(['%s: %s' % (repr(k),repr(v)) for k,v in self.items()]) if self.array and False: s = "[%s]" % s return s class SimpleXMLElement(object): "Simple XML manipulation (simil PHP)" def __init__(self, text = None, elements = None, document = None, namespace = None, prefix=None): self.__ns = namespace self.__prefix = prefix if text: try: self.__document = xml.dom.minidom.parseString(text) except: if DEBUG: print text raise self.__elements = [self.__document.documentElement] else: self.__elements = elements self.__document = document def add_child(self,name,text=None,ns=True): "Adding a child tag to a node" if not ns or not self.__ns: if DEBUG: print "adding %s" % (name) element = self.__document.createElement(name) else: if DEBUG: print "adding %s ns %s %s" % (name, self.__ns,ns) if self.__prefix: element = self.__document.createElementNS(self.__ns, "%s:%s" % (self.__prefix, name)) else: element = self.__document.createElementNS(self.__ns, name) if text: if isinstance(text, unicode): element.appendChild(self.__document.createTextNode(text)) else: element.appendChild(self.__document.createTextNode(str(text))) self._element.appendChild(element) return SimpleXMLElement( elements=[element], document=self.__document, namespace=self.__ns, prefix=self.__prefix) def __setattr__(self, tag, text): "Add text child tag node (short form)" if tag.startswith("_"): object.__setattr__(self, tag, text) else: if DEBUG: print "__setattr__(%s,%s)" % (tag, text) self.add_child(tag,text) def add_comment(self, data): "Add an xml comment to this child" comment = self.__document.createComment(data) self._element.appendChild(comment) def as_xml(self,filename=None,pretty=False): "Return the XML representation of the document" if not pretty: return self.__document.toxml('UTF-8') else: return self.__document.toprettyxml(encoding='UTF-8') def __repr__(self): "Return the XML representation of this tag" return self._element.toxml('UTF-8') def get_name(self): "Return the tag name of this node" return self._element.tagName def get_local_name(self): "Return the tag loca name (prefix:name) of this node" return self._element.localName def get_prefix(self): "Return the namespace prefix of this node" return self._element.prefix def get_namespace_uri(self, ns): "Return the namespace uri for a prefix" v = self.__document.documentElement.attributes['xmlns:%s' % ns] return v.value def attributes(self): "Return a dict of attributes for this tag" #TODO: use slice syntax [:]? return self._element.attributes def __getitem__(self, item): "Return xml tag attribute value or a slice of attributes (iter)" if DEBUG: print "__getitem__(%s)" % item if isinstance(item,basestring): if self._element.hasAttribute(item): return self._element.attributes[item].value elif isinstance(item, slice): # return a list with name:values return self._element.attributes.items()[item] else: # return element by index (position) element = self.__elements[item] return SimpleXMLElement( elements=[element], document=self.__document, namespace=self.__ns, prefix=self.__prefix) def add_attribute(self, name, value): "Set an attribute value from a string" self._element.setAttribute(name, value) def __setitem__(self, item, value): "Set an attribute value" if isinstance(item,basestring): self.add_attribute(item, value) elif isinstance(item, slice): # set multiple attributes at once for k, v in value.items(): self.add_attribute(k, v) def __call__(self, tag=None, ns=None, children=False, error=True): "Search (even in child nodes) and return a child tag by name" try: if tag is None: # if no name given, iterate over siblings (same level) return self.__iter__() if children: # future: filter children? by ns? return self.children() elements = None if isinstance(tag, int): # return tag by index elements=[self.__elements[tag]] if ns and not elements: for ns_uri in isinstance(ns, (tuple, list)) and ns or (ns, ): if DEBUG: print "searching %s by ns=%s" % (tag,ns_uri) elements = self._element.getElementsByTagNameNS(ns_uri, tag) if elements: break if self.__ns and not elements: if DEBUG: print "searching %s by ns=%s" % (tag, self.__ns) elements = self._element.getElementsByTagNameNS(self.__ns, tag) if not elements: if DEBUG: print "searching %s " % (tag) elements = self._element.getElementsByTagName(tag) if not elements: if DEBUG: print self._element.toxml() if error: raise AttributeError("No elements found") else: return return SimpleXMLElement( elements=elements, document=self.__document, namespace=self.__ns, prefix=self.__prefix) except AttributeError, e: raise AttributeError("Tag not found: %s (%s)" % (tag, str(e))) def __getattr__(self, tag): "Shortcut for __call__" return self.__call__(tag) def __iter__(self): "Iterate over xml tags at this level" try: for __element in self.__elements: yield SimpleXMLElement( elements=[__element], document=self.__document, namespace=self.__ns, prefix=self.__prefix) except: raise def __dir__(self): "List xml children tags names" return [node.tagName for node in self._element.childNodes if node.nodeType != node.TEXT_NODE] def children(self): "Return xml children tags element" elements=[__element for __element in self._element.childNodes if __element.nodeType == __element.ELEMENT_NODE] if not elements: return None #raise IndexError("Tag %s has no children" % self._element.tagName) return SimpleXMLElement( elements=elements, document=self.__document, namespace=self.__ns, prefix=self.__prefix) def __len__(self): "Return elements count" return len(self.__elements) def __contains__( self, item): "Search for a tag name in this element or child nodes" return self._element.getElementsByTagName(item) def __unicode__(self): "Returns the unicode text nodes of the current element" if self._element.childNodes: rc = u"" for node in self._element.childNodes: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc return '' def __str__(self): "Returns the str text nodes of the current element" return unicode(self).encode("utf8","ignore") def __int__(self): "Returns the integer value of the current element" return int(self.__str__()) def __float__(self): "Returns the float value of the current element" try: return float(self.__str__()) except: raise IndexError(self._element.toxml()) _element = property(lambda self: self.__elements[0]) def unmarshall(self, types): "Convert to python values the current serialized xml element" # types is a dict of {tag name: convertion function} # example: types={'p': {'a': int,'b': int}, 'c': [{'d':str}]} # expected xml: <p><a>1</a><b>2</b></p><c><d>hola</d><d>chau</d> # returnde value: {'p': {'a':1,'b':2}, `'c':[{'d':'hola'},{'d':'chau'}]} d = {} for node in self(): name = str(node.get_local_name()) try: fn = types[name] except (KeyError, ), e: raise TypeError("Tag: %s invalid" % (name,)) if isinstance(fn,list): value = [] children = node.children() for child in children and children() or []: value.append(child.unmarshall(fn[0])) elif isinstance(fn,dict): children = node.children() value = children and children.unmarshall(fn) else: if fn is None: # xsd:anyType not unmarshalled value = node elif str(node) or fn == str: try: # get special desserialization function (if any) fn = TYPE_UNMARSHAL_FN.get(fn,fn) value = fn(unicode(node)) except (ValueError, TypeError), e: raise ValueError("Tag: %s: %s" % (name, unicode(e))) else: value = None d[name] = value return d def marshall(self, name, value, add_child=True, add_comments=False, ns=False): "Analize python value and add the serialized XML element using tag name" if isinstance(value, dict): # serialize dict (<key>value</key>) child = add_child and self.add_child(name,ns=ns) or self for k,v in value.items(): child.marshall(k, v, add_comments=add_comments, ns=ns) elif isinstance(value, tuple): # serialize tuple (<key>value</key>) child = add_child and self.add_child(name,ns=ns) or self for k,v in value: getattr(self,name).marshall(k, v, add_comments=add_comments, ns=ns) elif isinstance(value, list): # serialize lists child=self.add_child(name,ns=ns) if add_comments: child.add_comment("Repetitive array of:") for t in value: child.marshall(name,t, False, add_comments=add_comments, ns=ns) elif isinstance(value, basestring): # do not convert strings or unicodes self.add_child(name,value,ns=ns) elif value is None: # sent a empty tag? self.add_child(name,ns=ns) elif value in TYPE_MAP.keys(): # add commented placeholders for simple tipes (for examples/help only) child = self.add_child(name,ns=ns) child.add_comment(TYPE_MAP[value]) else: # the rest of object types are converted to string # get special serialization function (if any) fn = TYPE_MARSHAL_FN.get(type(value),str) self.add_child(name,fn(value),ns=ns) def import_node(self, other): x = self.__document.importNode(other._element, True) # deep copy self._element.appendChild(x) if __name__ == "__main__": span = SimpleXMLElement('<span><a href="python.org.ar">pyar</a><prueba><i>1</i><float>1.5</float></prueba></span>') assert str(span.a)==str(span('a'))==str(span.a(0))=="pyar" assert span.a['href']=="python.org.ar" assert int(span.prueba.i)==1 and float(span.prueba.float)==1.5 span1 = SimpleXMLElement('<span><a href="google.com">google</a><a>yahoo</a><a>hotmail</a></span>') assert [str(a) for a in span1.a()] == ['google', 'yahoo', 'hotmail'] span1.add_child('a','altavista') span1.b = "ex msn" d = {'href':'http://www.bing.com/', 'alt': 'Bing'} span1.b[:] = d assert sorted([(k,v) for k,v in span1.b[:]]) == sorted(d.items()) print span1.as_xml() assert 'b' in span1 span.import_node(span1) print span.as_xml()
Python
#!/usr/bin/env python # created my Massimo Di Pierro # license MIT/BSD/GPL import re import cgi __all__ = ['render', 'markmin2html'] __doc__ = """ # Markmin markup language ## About This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the ``markmin2html`` function in the ``markmin2html.py``. Example of usage: `` >>> m = "Hello **world** [[link http://web2py.com]]" >>> from markmin2html import markmin2html >>> print markmin2html(m) >>> from markmin2latex import markmin2latex >>> print markmin2latex(m) >>> from markmin2pdf import markmin2pdf # requires pdflatex >>> print markmin2pdf(m) `` ## Why? We wanted a markup language with the following requirements: - less than 100 lines of functional code - easy to read - secure - support table, ul, ol, code - support html5 video and audio elements (html serialization only) - can align images and resize them - can specify class for tables and code elements - can add anchors - does not use _ for markup (since it creates odd behavior) - automatically links urls - fast - easy to extend - supports latex and pdf including references - allows to describe the markup in the markup (this document is generated from markmin syntax) (results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster) The [[web2py book http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827]] published by lulu, for example, was entirely generated with markmin2pdf from the online [[web2py wiki http://www.web2py.com/book]] ## Download - http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py - http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py - http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD. ## Examples ### Bold, italic, code and links -------------------------------------------------- **SOURCE** | **OUTPUT** ``# title`` | **title** ``## section`` | **section** ``### subsection`` | **subsection** ``**bold**`` | **bold** ``''italic''`` | ''italic'' ``!`!`verbatim`!`!`` | ``verbatim`` ``http://google.com`` | http://google.com ``[[click me #myanchor]]`` | [[click me #myanchor]] --------------------------------------------------- ### More on links The format is always ``[[title link]]``. Notice you can nest bold, italic and code inside the link title. ### Anchors [[myanchor]] You can place an anchor anywhere in the text using the syntax ``[[name]]`` where ''name'' is the name of the anchor. You can then link the anchor with [[link #myanchor]], i.e. ``[[link #myanchor]]``. ### Images [[some image http://www.web2py.com/examples/static/web2py_logo.png right 200px]] This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code ``[[some image http://www.web2py.com/examples/static/web2py_logo.png right 200px]]``. ### Unordered Lists `` - Dog - Cat - Mouse `` is rendered as - Dog - Cat - Mouse Two new lines between items break the list in two lists. ### Ordered Lists `` + Dog + Cat + Mouse `` is rendered as + Dog + Cat + Mouse ### Tables Something like this `` --------- **A** | **B** | **C** 0 | 0 | X 0 | X | 0 X | 0 | 0 -----:abc `` is a table and is rendered as --------- **A** | **B** | **C** 0 | 0 | X 0 | X | 0 X | 0 | 0 -----:abc Four or more dashes delimit the table and | separates the columns. The ``:abc`` at the end sets the class for the table and it is optional. ### Blockquote A table with a single cell is rendered as a blockquote: ----- Hello world ----- ### Code, ``<code>``, escaping and extra stuff `` def test(): return "this is Python code" ``:python Optionally a ` inside a ``!`!`...`!`!`` block can be inserted escaped with !`!. The ``:python`` after the markup is also optional. If present, by default, it is used to set the class of the <code> block. The behavior can be overridden by passing an argument ``extra`` to the ``render`` function. For example: `` >>> markmin2html("!`!!`!aaa!`!!`!:custom", extra=dict(custom=lambda text: 'x'+text+'x')) ``:python generates ``'xaaax'``:python (the ``!`!`...`!`!:custom`` block is rendered by the ``custom=lambda`` function passed to ``render``). ### Html5 support Markmin also supports the <video> and <audio> html5 tags using the notation: `` [[title link video]] [[title link audio]] `` ### Latex and other extensions Formulas can be embedded into HTML with ``$````$``formula``$````$``. You can use Google charts to render the formula: `` >>> LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="ce\ nter"/>' >>> markmin2html(text,{'latex':lambda code: LATEX % code.replace('"','\"')}) `` ### Code with syntax highlighting This requires a syntax highlighting tool, such as the web2py CODE helper. `` >>> extra={'code_cpp':lambda text: CODE(text,language='cpp').xml(), 'code_java':lambda text: CODE(text,language='java').xml(), 'code_python':lambda text: CODE(text,language='python').xml(), 'code_html':lambda text: CODE(text,language='html').xml()} >>> markmin2html(text,extra=extra) `` Code can now be marked up as in this example: `` !`!` <html><body>example</body></html> !`!`:code_html `` ### Citations and References Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like `` - [[key]] value `` in the References will be translated into Latex `` \\bibitem{key} value `` Here is an example of usage: `` As shown in Ref.!`!`mdipierro`!`!:cite ## References - [[mdipierro]] web2py Manual, 3rd Edition, lulu.com `` ### Caveats ``<ul/>``, ``<ol/>``, ``<code/>``, ``<table/>``, ``<blockquote/>``, ``<h1/>``, ..., ``<h6/>`` do not have ``<p>...</p>`` around them. """ META = 'META' LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="center"/>' regex_newlines = re.compile('(\n\r)|(\r\n)') regex_dd=re.compile('\$\$(?P<latex>.*?)\$\$') regex_code = re.compile('('+META+')|(``(?P<t>.*?)``(:(?P<c>\w+))?)',re.S) regex_maps = [ (re.compile('[ \t\r]+\n'),'\n'), (re.compile('[ \t\r]+\n'),'\n'), (re.compile('\*\*(?P<t>[^\s\*]+( +[^\s\*]+)*)\*\*'),'<b>\g<t></b>'), (re.compile("''(?P<t>[^\s']+( +[^\s']+)*)''"),'<i>\g<t></i>'), (re.compile('^#{6} (?P<t>[^\n]+)',re.M),'\n\n<<h6>\g<t></h6>\n'), (re.compile('^#{5} (?P<t>[^\n]+)',re.M),'\n\n<<h5>\g<t></h5>\n'), (re.compile('^#{4} (?P<t>[^\n]+)',re.M),'\n\n<<h4>\g<t></h4>\n'), (re.compile('^#{3} (?P<t>[^\n]+)',re.M),'\n\n<<h3>\g<t></h3>\n'), (re.compile('^#{2} (?P<t>[^\n]+)',re.M),'\n\n<<h2>\g<t></h2>\n'), (re.compile('^#{1} (?P<t>[^\n]+)',re.M),'\n\n<<h1>\g<t></h1>\n'), (re.compile('^\- +(?P<t>.*)',re.M),'<<ul><li>\g<t></li></ul>'), (re.compile('^\+ +(?P<t>.*)',re.M),'<<ol><li>\g<t></li></ol>'), (re.compile('</ol>\n<<ol>'),''), (re.compile('</ul>\n<<ul>'),''), (re.compile('<<'),'\n\n<<'), (re.compile('\n\s+\n'),'\n\n')] regex_table = re.compile('^\-{4,}\n(?P<t>.*?)\n\-{4,}(:(?P<c>\w+))?\n',re.M|re.S) regex_anchor = re.compile('\[\[(?P<t>\S+)\]\]') regex_image_width = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center) +(?P<w>\d+px)\]\]') regex_image = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center)\]\]') regex_video = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +video\]\]') regex_audio = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +audio\]\]') regex_link = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+)\]\]') regex_link_popup = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) popup\]\]') regex_link_no_anchor = re.compile('\[\[ +(?P<k>\S+)\]\]') regex_auto = re.compile('(?<!["\w\>])(?P<k>\w+://[\w\.\-\+\?&%\/]+)',re.M) def render(text,extra={},allowed={},sep='p'): """ Arguments: - text is the text to be processed - extra is a dict like extra=dict(custom=lambda value: value) that process custom code as in " ``this is custom code``:custom " - allowed is a dictionary of list of allowed classes like allowed = dict(code=('python','cpp','java')) - sep can be 'p' to separate text in <p>...</p> or can be 'br' to separate text using <br /> >>> render('this is\\n# a section\\nparagraph') '<p>this is</p><h1>a section</h1><p>paragraph</p>' >>> render('this is\\n## a subsection\\nparagraph') '<p>this is</p><h2>a subsection</h2><p>paragraph</p>' >>> render('this is\\n### a subsubsection\\nparagraph') '<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>' >>> render('**hello world**') '<p><b>hello world</b></p>' >>> render('``hello world``') '<code class="">hello world</code>' >>> render('``hello world``:python') '<code class="python">hello world</code>' >>> render('``\\nhello\\nworld\\n``:python') '<pre><code class="python">hello\\nworld</code></pre>' >>> render("''hello world''") '<p><i>hello world</i></p>' >>> render('** hello** **world**') '<p>** hello** <b>world</b></p>' >>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another') '<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>' >>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another') '<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>' >>> render("----\\na | b\\nc | d\\n----\\n") '<table class=""><tr><td>a</td><td>b</td></tr><tr><td>c</td><td>d</td></tr></table>' >>> render("----\\nhello world\\n----\\n") '<blockquote class="">hello world</blockquote>' >>> render('[[this is a link http://example.com]]') '<p><a href="http://example.com">this is a link</a></p>' >>> render('[[this is an image http://example.com left]]') '<p><img src="http://example.com" alt="this is an image" align="left" /></p>' >>> render('[[this is an image http://example.com left 200px]]') '<p><img src="http://example.com" alt="this is an image" align="left" width="200px" /></p>' >>> render('[[this is an image http://example.com video]]') '<p><video src="http://example.com" controls></video></p>' >>> render('[[this is an image http://example.com audio]]') '<p><audio src="http://example.com" controls></audio></p>' >>> render('[[this is a **link** http://example.com]]') '<p><a href="http://example.com">this is a <b>link</b></a></p>' >>> render("``aaa``:custom",extra=dict(custom=lambda text: 'x'+text+'x')) 'xaaax' >>> render(r"$$\int_a^b sin(x)dx$$") '<code class="latex">\\\\int_a^b sin(x)dx</code>' """ text = str(text or '') ############################################################# # replace all blocks marked with ``...``:class with META # store them into segments they will be treated as code ############################################################# segments, i = [], 0 text = regex_dd.sub('``\g<latex>``:latex ',text) text = regex_newlines.sub('\n',text) while True: item = regex_code.search(text,i) if not item: break if item.group()==META: segments.append((None,None)) text = text[:item.start()]+META+text[item.end():] else: c = item.group('c') or '' if 'code' in allowed and not c in allowed['code']: c = '' code = item.group('t').replace('!`!','`') segments.append((code,c)) text = text[:item.start()]+META+text[item.end():] i=item.start()+3 ############################################################# # do h1,h2,h3,h4,h5,h6,b,i,ol,ul and normalize spaces ############################################################# text = '\n'.join(t.strip() for t in text.split('\n')) text = cgi.escape(text) for regex, sub in regex_maps: text = regex.sub(sub,text) ############################################################# # process tables and blockquotes ############################################################# while True: item = regex_table.search(text) if not item: break c = item.group('c') or '' if 'table' in allowed and not c in allowed['table']: c = '' content = item.group('t') if ' | ' in content: rows = content.replace('\n','</td></tr><tr><td>').replace(' | ','</td><td>') text = text[:item.start()] + '<<table class="%s"><tr><td>'%c + rows + '</td></tr></table>' + text[item.end():] else: text = text[:item.start()] + '<<blockquote class="%s">'%c + content + '</blockquote>' + text[item.end():] ############################################################# # deal with images, videos, audios and links ############################################################# text = regex_anchor.sub('<span id="\g<t>"><span>', text) text = regex_image_width.sub('<img src="\g<k>" alt="\g<t>" align="\g<p>" width="\g<w>" />', text) text = regex_image.sub('<img src="\g<k>" alt="\g<t>" align="\g<p>" />', text) text = regex_video.sub('<video src="\g<k>" controls></video>', text) text = regex_audio.sub('<audio src="\g<k>" controls></audio>', text) text = regex_link_popup.sub('<a href="\g<k>" target="_blank">\g<t></a>', text) text = regex_link_no_anchor.sub('<a href="\g<k>">\g<k></a>', text) text = regex_link.sub('<a href="\g<k>">\g<t></a>', text) text = regex_auto.sub('<a href="\g<k>">\g<k></a>', text) ############################################################# # deal with paragraphs (trick <<ul, <<ol, <<table, <<h1, etc) # the << indicates that there should NOT be a new paragraph # META indicates a code block therefore no new paragraph ############################################################# items = [item.strip() for item in text.split('\n\n')] if sep=='p': text = ''.join(p[:2]!='<<' and p!=META and '<p>%s</p>'%p or '%s'%p for p in items if p) elif sep=='br': text = '<br />'.join(items) ############################################################# # finally get rid of << ############################################################# text=text.replace('<<','<') ############################################################# # process all code text ############################################################# parts = text.split(META) text = parts[0] for i,(code,b) in enumerate(segments): if code==None: html = META else: if b in extra: if code[:1]=='\n': code=code[1:] if code[-1:]=='\n': code=code[:-1] html = extra[b](code) elif b=='cite': html = '['+','.join('<a href="#%s" class="%s">%s</a>' \ % (d,b,d) \ for d in cgi.escape(code).split(','))+']' elif b=='latex': html = LATEX % code.replace('"','\"').replace('\n',' ') elif code[:1]=='\n' or code[-1:]=='\n': if code[:1]=='\n': code=code[1:] if code[-1:]=='\n': code=code[:-1] html = '<pre><code class="%s">%s</code></pre>' % (b,cgi.escape(code)) else: if code[:1]=='\n': code=code[1:] if code[-1:]=='\n': code=code[:-1] html = '<code class="%s">%s</code>' % (b,cgi.escape(code)) text = text+html+parts[i+1] return text def markmin2html(text,extra={},allowed={},sep='p'): return render(text,extra,allowed,sep) if __name__ == '__main__': import sys import doctest if sys.argv[1:2]==['-h']: print '<html><body>'+markmin2html(__doc__)+'</body></html>' elif len(sys.argv)>1: fargv = open(sys.argv[1],'r') try: print '<html><body>'+markmin2html(fargv.read())+'</body></html>' finally: fargv.close() else: doctest.testmod()
Python
#!/usr/bin/env python # created my Massimo Di Pierro # license MIT/BSD/GPL import re import cgi import sys import doctest from optparse import OptionParser __all__ = ['render','markmin2latex'] META = 'META' regex_newlines = re.compile('(\n\r)|(\r\n)') regex_dd=re.compile('\$\$(?P<latex>.*?)\$\$') regex_code = re.compile('('+META+')|(``(?P<t>.*?)``(:(?P<c>\w+))?)',re.S) regex_title = re.compile('^#{1} (?P<t>[^\n]+)',re.M) regex_maps = [ (re.compile('[ \t\r]+\n'),'\n'), (re.compile('[ \t\r]+\n'),'\n'), (re.compile('\*\*(?P<t>[^\s\*]+( +[^\s\*]+)*)\*\*'),'{\\\\bf \g<t>}'), (re.compile("''(?P<t>[^\s']+( +[^\s']+)*)''"),'{\\it \g<t>}'), (re.compile('^#{6} (?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'), (re.compile('^#{5} (?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'), (re.compile('^#{4} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsubsection{\g<t>}\n'), (re.compile('^#{3} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsection{\g<t>}\n'), (re.compile('^#{2} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\section{\g<t>}\n'), (re.compile('^#{1} (?P<t>[^\n]+)',re.M),''), (re.compile('^\- +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'), (re.compile('^\+ +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'), (re.compile('\\\\end\{itemize\}\s+\\\\begin\{itemize\}'),'\n'), (re.compile('\n\s+\n'),'\n\n')] regex_table = re.compile('^\-{4,}\n(?P<t>.*?)\n\-{4,}(:(?P<c>\w+))?\n',re.M|re.S) regex_anchor = re.compile('\[\[(?P<t>\S+)\]\]') regex_bibitem = re.compile('\-\s*\[\[(?P<t>\S+)\]\]') regex_image_width = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center) +(?P<w>\d+px)\]\]') regex_image = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center)\]\]') #regex_video = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +video\]\]') #regex_audio = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +audio\]\]') regex_link = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+)\]\]') regex_auto = re.compile('(?<!["\w])(?P<k>\w+://[\w\.\-\?&%]+)',re.M) regex_commas = re.compile('[ ]+(?P<t>[,;\.])') regex_noindent = re.compile('\n\n(?P<t>[a-z])') regex_quote_left = re.compile('"(?=\w)') regex_quote_right = re.compile('(?=\w\.)"') def latex_escape(text,pound=True): text=text.replace('\\','{\\textbackslash}') for c in '^_&$%{}': text=text.replace(c,'\\'+c) text=text.replace('\\{\\textbackslash\\}','{\\textbackslash}') if pound: text=text.replace('#','\\#') return text def render(text,extra={},allowed={},sep='p',image_mapper=lambda x:x): ############################################################# # replace all blocks marked with ``...``:class with META # store them into segments they will be treated as code ############################################################# text = str(text or '') segments, i = [], 0 text = regex_dd.sub('``\g<latex>``:latex ',text) text = regex_newlines.sub('\n',text) while True: item = regex_code.search(text,i) if not item: break if item.group()==META: segments.append((None,None)) text = text[:item.start()]+META+text[item.end():] else: c = item.group('c') or '' if 'code' in allowed and not c in allowed['code']: c = '' code = item.group('t').replace('!`!','`') segments.append((code,c)) text = text[:item.start()]+META+text[item.end():] i=item.start()+3 ############################################################# # do h1,h2,h3,h4,h5,h6,b,i,ol,ul and normalize spaces ############################################################# title = regex_title.search(text) if not title: title='Title' else: title=title.group('t') text = latex_escape(text,pound=False) texts = text.split('## References',1) text = regex_anchor.sub('\\label{\g<t>}', texts[0]) if len(texts)==2: text += '\n\\begin{thebibliography}{999}\n' text += regex_bibitem.sub('\n\\\\bibitem{\g<t>}', texts[1]) text += '\n\\end{thebibliography}\n' text = '\n'.join(t.strip() for t in text.split('\n')) for regex, sub in regex_maps: text = regex.sub(sub,text) text=text.replace('#','\\#') text=text.replace('`',"'") ############################################################# # process tables and blockquotes ############################################################# while True: item = regex_table.search(text) if not item: break c = item.group('c') or '' if 'table' in allowed and not c in allowed['table']: c = '' content = item.group('t') if ' | ' in content: rows = content.replace('\n','\\\\\n').replace(' | ',' & ') row0,row2 = rows.split('\\\\\n',1) cols=row0.count(' & ')+1 cal='{'+''.join('l' for j in range(cols))+'}' tabular = '\\begin{center}\n{\\begin{tabular}'+cal+'\\hline\n' + row0+'\\\\ \\hline\n'+row2 + ' \\\\ \\hline\n\\end{tabular}}\n\\end{center}' if row2.count('\n')>20: tabular='\\newpage\n'+tabular text = text[:item.start()] + tabular + text[item.end():] else: text = text[:item.start()] + '\\begin{quote}' + content + '\\end{quote}' + text[item.end():] ############################################################# # deal with images, videos, audios and links ############################################################# def sub(x): f=image_mapper(x.group('k')) if not f: return None return '\n\\begin{center}\\includegraphics[width=8cm]{%s}\\end{center}\n' % (f) text = regex_image_width.sub(sub,text) text = regex_image.sub(sub,text) text = regex_link.sub('{\\\\footnotesize\\href{\g<k>}{\g<t>}}', text) text = regex_commas.sub('\g<t>',text) text = regex_noindent.sub('\n\\\\noindent \g<t>',text) ### fix paths in images regex=re.compile('\\\\_[\w_]*\.(eps|png|jpg|gif)') while True: match=regex.search(text) if not match: break text=text[:match.start()]+text[match.start()+1:] text = regex_quote_left.sub('``',text) text = regex_quote_right.sub("''",text) ############################################################# # process all code text ############################################################# parts = text.split(META) text = parts[0] authors = [] for i,(code,b) in enumerate(segments): if code==None: html = META else: if b=='hidden': html='' elif b=='author': author = latex_escape(code.strip()) authors.append(author) html='' elif b=='inxx': html='\inxx{%s}' % latex_escape(code) elif b=='cite': html='~\cite{%s}' % latex_escape(code.strip()) elif b=='ref': html='~\ref{%s}' % latex_escape(code.strip()) elif b=='latex': if '\n' in code: html='\n\\begin{equation}\n%s\n\\end{equation}\n' % code.strip() else: html='$%s$' % code.strip() elif b=='latex_eqnarray': code=code.strip() code='\\\\'.join(x.replace('=','&=&',1) for x in code.split('\\\\')) html='\n\\begin{eqnarray}\n%s\n\\end{eqnarray}\n' % code elif b.startswith('latex_'): key=b[6:] html='\\begin{%s}%s\\end{%s}' % (key,code,key) elif b in extra: if code[:1]=='\n': code=code[1:] if code[-1:]=='\n': code=code[:-1] html = extra[b](code) elif code[:1]=='\n' or code[:-1]=='\n': if code[:1]=='\n': code=code[1:] if code[-1:]=='\n': code=code[:-1] if code.startswith('<') or code.startswith('{{') or code.startswith('http'): html = '\\begin{lstlisting}[keywords={}]\n%s\n\\end{lstlisting}' % code else: html = '\\begin{lstlisting}\n%s\n\\end{lstlisting}' % code else: if code[:1]=='\n': code=code[1:] if code[-1:]=='\n': code=code[:-1] html = '{\\ft %s}' % latex_escape(code) try: text = text+html+parts[i+1] except: text = text + '... WIKI PROCESSING ERROR ...' break text = text.replace(' ~\\cite','~\\cite') return text, title, authors WRAPPER = """ \\documentclass[12pt]{article} \\usepackage{hyperref} \\usepackage{listings} \\usepackage{upquote} \\usepackage{color} \\usepackage{graphicx} \\usepackage{grffile} \\usepackage[utf8x]{inputenc} \\definecolor{lg}{rgb}{0.9,0.9,0.9} \\definecolor{dg}{rgb}{0.3,0.3,0.3} \\def\\ft{\\small\\tt} \\lstset{ basicstyle=\\footnotesize, breaklines=true, basicstyle=\\ttfamily\\color{black}\\footnotesize, keywordstyle=\\bf\\ttfamily, commentstyle=\\it\\ttfamily, stringstyle=\\color{dg}\\it\\ttfamily, numbers=left, numberstyle=\\color{dg}\\tiny, stepnumber=1, numbersep=5pt, backgroundcolor=\\color{lg}, tabsize=4, showspaces=false, showstringspaces=false } \\title{%(title)s} \\author{%(author)s} \\begin{document} \\maketitle \\tableofcontents \\newpage %(body)s \\end{document} """ def markmin2latex(data, image_mapper=lambda x:x, extra={}, wrapper=WRAPPER): body, title, authors = render(data, extra=extra, image_mapper=image_mapper) author = '\n\\and\n'.join(a.replace('\n','\\\\\n\\footnotesize ') for a in authors) return wrapper % dict(title=title, author=author, body=body) if __name__ == '__main__': parser = OptionParser() parser.add_option("-i", "--info", dest="info", help="markmin help") parser.add_option("-t", "--test", dest="test", action="store_true", default=False) parser.add_option("-n", "--no_wrapper", dest="no_wrapper", action="store_true",default=False) parser.add_option("-1", "--one", dest="one",action="store_true", default=False,help="switch section for chapter") parser.add_option("-w", "--wrapper", dest="wrapper", default=False, help="latex file containing header and footer") (options, args) = parser.parse_args() if options.info: import markmin2html markmin2latex(markmin2html.__doc__) elif options.test: doctest.testmod() else: if options.wrapper: fwrapper = open(options.wrapper,'rb') try: wrapper = fwrapper.read() finally: fwrapper.close() elif options.no_wrapper: wrapper = '%(body)s' else: wrapper = WRAPPER for f in args: fargs = open(f,'r') content_data = [] try: content_data.append(fargs.read()) finally: fargs.close() content = '\n'.join(content_data) output= markmin2latex(content,wrapper=wrapper) if options.one: output=output.replace(r'\section*{',r'\chapter*{') output=output.replace(r'\section{',r'\chapter{') output=output.replace(r'subsection{',r'section{') print output
Python
""" Created by Massimo Di Pierro Licese BSD """ import subprocess import os import os.path import re import sys from tempfile import mkstemp, mkdtemp, NamedTemporaryFile from markmin2latex import markmin2latex __all__ = ['markmin2pdf'] def removeall(path): ERROR_STR= """Error removing %(path)s, %(error)s """ def rmgeneric(path, __func__): try: __func__(path) except OSError, (errno, strerror): print ERROR_STR % {'path' : path, 'error': strerror } files=[path] while files: file=files[0] if os.path.isfile(file): f=os.remove rmgeneric(file, os.remove) del files[0] elif os.path.isdir(file): nested = os.listdir(file) if not nested: rmgeneric(file, os.rmdir) del files[0] else: files = [os.path.join(file,x) for x in nested] + files def latex2pdf(latex, pdflatex='pdflatex', passes=3): """ calls pdflatex in a tempfolder Arguments: - pdflatex: path to the pdflatex command. Default is just 'pdflatex'. - passes: defines how often pdflates should be run in the texfile. """ pdflatex=pdflatex passes=passes warnings=[] # setup the envoriment tmpdir = mkdtemp() texfile = open(tmpdir+'/test.tex','wb') texfile.write(latex) texfile.seek(0) texfile.close() texfile = os.path.abspath(texfile.name) # start doing some work for i in range(0, passes): logfd,logname = mkstemp() outfile=os.fdopen(logfd) try: ret = subprocess.call([pdflatex, '-interaction=nonstopmode', '-output-format', 'pdf', '-output-directory', tmpdir, texfile], cwd=os.path.dirname(texfile), stdout=outfile, stderr=subprocess.PIPE) finally: outfile.close() re_errors=re.compile('^\!(.*)$',re.M) re_warnings=re.compile('^LaTeX Warning\:(.*)$',re.M) flog = open(logname) try: loglines = flog.read() finally: flog.close() errors=re_errors.findall(loglines) warnings=re_warnings.findall(loglines) os.unlink(logname) pdffile=texfile.rsplit('.',1)[0]+'.pdf' if os.path.isfile(pdffile): fpdf = open(pdffile, 'rb') try: data = fpdf.read() finally: fpdf.close() else: data = None removeall(tmpdir) return data, warnings, errors def markmin2pdf(text, image_mapper=lambda x: None, extra={}): return latex2pdf(markmin2latex(text,image_mapper=image_mapper, extra=extra)) if __name__ == '__main__': import sys import doctest import markmin2html if sys.argv[1:2]==['-h']: data, warnings, errors = markmin2pdf(markmin2html.__doc__) if errors: print 'ERRORS:'+'\n'.join(errors) print 'WARNGINS:'+'\n'.join(warnings) else: print data elif len(sys.argv)>1: fargv = open(sys.argv[1],'rb') try: data, warnings, errors = markmin2pdf(fargv.read()) finally: fargv.close() if errors: print 'ERRORS:'+'\n'.join(errors) print 'WARNGINS:'+'\n'.join(warnings) else: print data else: doctest.testmod()
Python
# this file exists for backward compatibility __all__ = ['DAL','Field','drivers','gae'] from gluon.dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType, gae
Python
# -*- coding: utf-8 -*- import struct import re try: import cStringIO as StringIO except ImportError: import StringIO from err import Warning, Error, InterfaceError, DataError, \ DatabaseError, OperationalError, IntegrityError, InternalError, \ NotSupportedError, ProgrammingError insert_values = re.compile(r'\svalues\s*(\(.+\))', re.IGNORECASE) class Cursor(object): ''' This is the object you use to interact with the database. ''' def __init__(self, connection): ''' Do not create an instance of a Cursor yourself. Call connections.Connection.cursor(). ''' from weakref import proxy self.connection = proxy(connection) self.description = None self.rownumber = 0 self.rowcount = -1 self.arraysize = 1 self._executed = None self.messages = [] self.errorhandler = connection.errorhandler self._has_next = None self._rows = () def __del__(self): ''' When this gets GC'd close it. ''' self.close() def close(self): ''' Closing a cursor just exhausts all remaining data. ''' if not self.connection: return try: while self.nextset(): pass except: pass self.connection = None def _get_db(self): if not self.connection: self.errorhandler(self, ProgrammingError, "cursor closed") return self.connection def _check_executed(self): if not self._executed: self.errorhandler(self, ProgrammingError, "execute() first") def setinputsizes(self, *args): """Does nothing, required by DB API.""" def setoutputsizes(self, *args): """Does nothing, required by DB API.""" def nextset(self): ''' Get the next query set ''' if self._executed: self.fetchall() del self.messages[:] if not self._has_next: return None connection = self._get_db() connection.next_result() self._do_get_result() return True def execute(self, query, args=None): ''' Execute a query ''' from sys import exc_info conn = self._get_db() charset = conn.charset del self.messages[:] # TODO: make sure that conn.escape is correct if args is not None: query = query % conn.escape(args) if isinstance(query, unicode): query = query.encode(charset) result = 0 try: result = self._query(query) except: exc, value, tb = exc_info() del tb self.messages.append((exc,value)) self.errorhandler(self, exc, value) self._executed = query return result def executemany(self, query, args): ''' Run several data against one query ''' del self.messages[:] conn = self._get_db() if not args: return charset = conn.charset if isinstance(query, unicode): query = query.encode(charset) self.rowcount = sum([ self.execute(query, arg) for arg in args ]) return self.rowcount def callproc(self, procname, args=()): """Execute stored procedure procname with args procname -- string, name of procedure to execute on server args -- Sequence of parameters to use with procedure Returns the original args. Compatibility warning: PEP-249 specifies that any modified parameters must be returned. This is currently impossible as they are only available by storing them in a server variable and then retrieved by a query. Since stored procedures return zero or more result sets, there is no reliable way to get at OUT or INOUT parameters via callproc. The server variables are named @_procname_n, where procname is the parameter above and n is the position of the parameter (from zero). Once all result sets generated by the procedure have been fetched, you can issue a SELECT @_procname_0, ... query using .execute() to get any OUT or INOUT values. Compatibility warning: The act of calling a stored procedure itself creates an empty result set. This appears after any result sets generated by the procedure. This is non-standard behavior with respect to the DB-API. Be sure to use nextset() to advance through all result sets; otherwise you may get disconnected. """ conn = self._get_db() for index, arg in enumerate(args): q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg)) if isinstance(q, unicode): q = q.encode(conn.charset) self._query(q) self.nextset() q = "CALL %s(%s)" % (procname, ','.join(['@_%s_%d' % (procname, i) for i in range(len(args))])) if isinstance(q, unicode): q = q.encode(conn.charset) self._query(q) self._executed = q return args def fetchone(self): ''' Fetch the next row ''' self._check_executed() if self._rows is None or self.rownumber >= len(self._rows): return None result = self._rows[self.rownumber] self.rownumber += 1 return result def fetchmany(self, size=None): ''' Fetch several rows ''' self._check_executed() end = self.rownumber + (size or self.arraysize) result = self._rows[self.rownumber:end] if self._rows is None: return None self.rownumber = min(end, len(self._rows)) return result def fetchall(self): ''' Fetch all the rows ''' self._check_executed() if self._rows is None: return None if self.rownumber: result = self._rows[self.rownumber:] else: result = self._rows self.rownumber = len(self._rows) return result def scroll(self, value, mode='relative'): self._check_executed() if mode == 'relative': r = self.rownumber + value elif mode == 'absolute': r = value else: self.errorhandler(self, ProgrammingError, "unknown scroll mode %s" % mode) if r < 0 or r >= len(self._rows): self.errorhandler(self, IndexError, "out of range") self.rownumber = r def _query(self, q): conn = self._get_db() self._last_executed = q conn.query(q) self._do_get_result() return self.rowcount def _do_get_result(self): conn = self._get_db() self.rowcount = conn._result.affected_rows self.rownumber = 0 self.description = conn._result.description self.lastrowid = conn._result.insert_id self._rows = conn._result.rows self._has_next = conn._result.has_next conn._result = None def __iter__(self): self._check_executed() result = self.rownumber and self._rows[self.rownumber:] or self._rows return iter(result) Warning = Warning Error = Error InterfaceError = InterfaceError DatabaseError = DatabaseError DataError = DataError OperationalError = OperationalError IntegrityError = IntegrityError InternalError = InternalError ProgrammingError = ProgrammingError NotSupportedError = NotSupportedError
Python
import re import datetime import time from constants import FIELD_TYPE, FLAG from charset import charset_by_id try: set except NameError: try: from sets import BaseSet as set except ImportError: from sets import Set as set ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]") ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z', '\'': '\\\'', '"': '\\"', '\\': '\\\\'} def escape_item(val, charset): if type(val) in [tuple, list, set]: return escape_sequence(val, charset) if type(val) is dict: return escape_dict(val, charset) if hasattr(val, "decode") and not isinstance(val, unicode): # deal with py3k bytes val = val.decode(charset) encoder = encoders[type(val)] val = encoder(val) if type(val) is str: return val val = val.encode(charset) return val def escape_dict(val, charset): n = {} for k, v in val.items(): quoted = escape_item(v, charset) n[k] = quoted return n def escape_sequence(val, charset): n = [] for item in val: quoted = escape_item(item, charset) n.append(quoted) return tuple(n) def escape_set(val, charset): val = map(lambda x: escape_item(x, charset), val) return ','.join(val) def escape_bool(value): return str(int(value)) def escape_object(value): return str(value) escape_int = escape_long = escape_object def escape_float(value): return ('%.15g' % value) def escape_string(value): return ("'%s'" % ESCAPE_REGEX.sub( lambda match: ESCAPE_MAP.get(match.group(0)), value)) def escape_unicode(value): return escape_string(value) def escape_None(value): return 'NULL' def escape_timedelta(obj): seconds = int(obj.seconds) % 60 minutes = int(obj.seconds // 60) % 60 hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24 return escape_string('%02d:%02d:%02d' % (hours, minutes, seconds)) def escape_time(obj): s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute), int(obj.second)) if obj.microsecond: s += ".%f" % obj.microsecond return escape_string(s) def escape_datetime(obj): return escape_string(obj.strftime("%Y-%m-%d %H:%M:%S")) def escape_date(obj): return escape_string(obj.strftime("%Y-%m-%d")) def escape_struct_time(obj): return escape_datetime(datetime.datetime(*obj[:6])) def convert_datetime(connection, field, obj): """Returns a DATETIME or TIMESTAMP column value as a datetime object: >>> datetime_or_None('2007-02-25 23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) >>> datetime_or_None('2007-02-25T23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) Illegal values are returned as None: >>> datetime_or_None('2007-02-31T23:06:20') is None True >>> datetime_or_None('0000-00-00 00:00:00') is None True """ if not isinstance(obj, unicode): obj = obj.decode(connection.charset) if ' ' in obj: sep = ' ' elif 'T' in obj: sep = 'T' else: return convert_date(connection, field, obj) try: ymd, hms = obj.split(sep, 1) return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':') ]) except ValueError: return convert_date(connection, field, obj) def convert_timedelta(connection, field, obj): """Returns a TIME column as a timedelta object: >>> timedelta_or_None('25:06:17') datetime.timedelta(1, 3977) >>> timedelta_or_None('-25:06:17') datetime.timedelta(-2, 83177) Illegal values are returned as None: >>> timedelta_or_None('random crap') is None True Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but can accept values as (+|-)DD HH:MM:SS. The latter format will not be parsed correctly by this function. """ from math import modf try: if not isinstance(obj, unicode): obj = obj.decode(connection.charset) hours, minutes, seconds = tuple([int(x) for x in obj.split(':')]) tdelta = datetime.timedelta( hours = int(hours), minutes = int(minutes), seconds = int(seconds), microseconds = int(modf(float(seconds))[0]*1000000), ) return tdelta except ValueError: return None def convert_time(connection, field, obj): """Returns a TIME column as a time object: >>> time_or_None('15:06:17') datetime.time(15, 6, 17) Illegal values are returned as None: >>> time_or_None('-25:06:17') is None True >>> time_or_None('random crap') is None True Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but can accept values as (+|-)DD HH:MM:SS. The latter format will not be parsed correctly by this function. Also note that MySQL's TIME column corresponds more closely to Python's timedelta and not time. However if you want TIME columns to be treated as time-of-day and not a time offset, then you can use set this function as the converter for FIELD_TYPE.TIME. """ from math import modf try: hour, minute, second = obj.split(':') return datetime.time(hour=int(hour), minute=int(minute), second=int(second), microsecond=int(modf(float(second))[0]*1000000)) except ValueError: return None def convert_date(connection, field, obj): """Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True """ try: if not isinstance(obj, unicode): obj = obj.decode(connection.charset) return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) except ValueError: return None def convert_mysql_timestamp(connection, field, timestamp): """Convert a MySQL TIMESTAMP to a Timestamp object. MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME: >>> mysql_timestamp_converter('2007-02-25 22:32:17') datetime.datetime(2007, 2, 25, 22, 32, 17) MySQL < 4.1 uses a big string of numbers: >>> mysql_timestamp_converter('20070225223217') datetime.datetime(2007, 2, 25, 22, 32, 17) Illegal values are returned as None: >>> mysql_timestamp_converter('2007-02-31 22:32:17') is None True >>> mysql_timestamp_converter('00000000000000') is None True """ if not isinstance(timestamp, unicode): timestamp = timestamp.decode(connection.charset) if timestamp[4] == '-': return convert_datetime(connection, field, timestamp) timestamp += "0"*(14-len(timestamp)) # padding year, month, day, hour, minute, second = \ int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \ int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14]) try: return datetime.datetime(year, month, day, hour, minute, second) except ValueError: return None def convert_set(s): return set(s.split(",")) def convert_bit(connection, field, b): #b = "\x00" * (8 - len(b)) + b # pad w/ zeroes #return struct.unpack(">Q", b)[0] # # the snippet above is right, but MySQLdb doesn't process bits, # so we shouldn't either return b def convert_characters(connection, field, data): field_charset = charset_by_id(field.charsetnr).name if field.flags & FLAG.SET: return convert_set(data.decode(field_charset)) if field.flags & FLAG.BINARY: return data if connection.use_unicode: data = data.decode(field_charset) elif connection.charset != field_charset: data = data.decode(field_charset) data = data.encode(connection.charset) else: data = data.decode(connection.charset) return data def convert_int(connection, field, data): return int(data) def convert_long(connection, field, data): return long(data) def convert_float(connection, field, data): return float(data) encoders = { bool: escape_bool, int: escape_int, long: escape_long, float: escape_float, str: escape_string, unicode: escape_unicode, tuple: escape_sequence, list:escape_sequence, set:escape_sequence, dict:escape_dict, type(None):escape_None, datetime.date: escape_date, datetime.datetime : escape_datetime, datetime.timedelta : escape_timedelta, datetime.time : escape_time, time.struct_time : escape_struct_time, } decoders = { FIELD_TYPE.BIT: convert_bit, FIELD_TYPE.TINY: convert_int, FIELD_TYPE.SHORT: convert_int, FIELD_TYPE.LONG: convert_long, FIELD_TYPE.FLOAT: convert_float, FIELD_TYPE.DOUBLE: convert_float, FIELD_TYPE.DECIMAL: convert_float, FIELD_TYPE.NEWDECIMAL: convert_float, FIELD_TYPE.LONGLONG: convert_long, FIELD_TYPE.INT24: convert_int, FIELD_TYPE.YEAR: convert_int, FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp, FIELD_TYPE.DATETIME: convert_datetime, FIELD_TYPE.TIME: convert_timedelta, FIELD_TYPE.DATE: convert_date, FIELD_TYPE.SET: convert_set, FIELD_TYPE.BLOB: convert_characters, FIELD_TYPE.TINY_BLOB: convert_characters, FIELD_TYPE.MEDIUM_BLOB: convert_characters, FIELD_TYPE.LONG_BLOB: convert_characters, FIELD_TYPE.STRING: convert_characters, FIELD_TYPE.VAR_STRING: convert_characters, FIELD_TYPE.VARCHAR: convert_characters, #FIELD_TYPE.BLOB: str, #FIELD_TYPE.STRING: str, #FIELD_TYPE.VAR_STRING: str, #FIELD_TYPE.VARCHAR: str } conversions = decoders # for MySQLdb compatibility try: # python version > 2.3 from decimal import Decimal def convert_decimal(connection, field, data): return Decimal(data) decoders[FIELD_TYPE.DECIMAL] = convert_decimal decoders[FIELD_TYPE.NEWDECIMAL] = convert_decimal def escape_decimal(obj): return unicode(obj) encoders[Decimal] = escape_decimal except ImportError: pass
Python
from time import localtime from datetime import date, datetime, time, timedelta Date = date Time = time TimeDelta = timedelta Timestamp = datetime def DateFromTicks(ticks): return date(*localtime(ticks)[:3]) def TimeFromTicks(ticks): return time(*localtime(ticks)[3:6]) def TimestampFromTicks(ticks): return datetime(*localtime(ticks)[:6])
Python
import struct try: Exception, Warning except ImportError: try: from exceptions import Exception, Warning except ImportError: import sys e = sys.modules['exceptions'] Exception = e.Exception Warning = e.Warning from constants import ER class MySQLError(Exception): """Exception related to operation with MySQL.""" class Warning(Warning, MySQLError): """Exception raised for important warnings like data truncations while inserting, etc.""" class Error(MySQLError): """Exception that is the base class of all other error exceptions (not Warning).""" class InterfaceError(Error): """Exception raised for errors that are related to the database interface rather than the database itself.""" class DatabaseError(Error): """Exception raised for errors that are related to the database.""" class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range, etc.""" class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, a memory allocation error occurred during processing, etc.""" class IntegrityError(DatabaseError): """Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails, duplicate key, etc.""" class InternalError(DatabaseError): """Exception raised when the database encounters an internal error, e.g. the cursor is not valid anymore, the transaction is out of sync, etc.""" class ProgrammingError(DatabaseError): """Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement, wrong number of parameters specified, etc.""" class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database, e.g. requesting a .rollback() on a connection that does not support transaction or has transactions turned off.""" error_map = {} def _map_error(exc, *errors): for error in errors: error_map[error] = exc _map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR, ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME, ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE, ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION, ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION) _map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL, ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL, ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW) _map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW, ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2, ER.CANNOT_ADD_FOREIGN) _map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK, ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE) del _map_error, ER def _get_error_info(data): errno = struct.unpack('<h', data[1:3])[0] if data[3] == "#": # version 4.1 sqlstate = data[4:9].decode("utf8") errorvalue = data[9:].decode("utf8") return (errno, sqlstate, errorvalue) else: # version 4.0 return (errno, None, data[3:].decode("utf8")) def _check_mysql_exception(errinfo): errno, sqlstate, errorvalue = errinfo errorclass = error_map.get(errno, None) if errorclass: raise errorclass, (errno,errorvalue) # couldn't find the right error number raise InternalError, (errno, errorvalue) def raise_mysql_exception(data): errinfo = _get_error_info(data) _check_mysql_exception(errinfo)
Python
import struct def byte2int(b): if isinstance(b, int): return b else: return struct.unpack("!B", b)[0] def int2byte(i): return struct.pack("!B", i) def join_bytes(bs): if len(bs) == 0: return "" else: rv = bs[0] for b in bs[1:]: rv += b return rv
Python
MBLENGTH = { 8:1, 33:3, 88:2, 91:2 } class Charset: def __init__(self, id, name, collation, is_default): self.id, self.name, self.collation = id, name, collation self.is_default = is_default == 'Yes' class Charsets: def __init__(self): self._by_id = {} def add(self, c): self._by_id[c.id] = c def by_id(self, id): return self._by_id[id] def by_name(self, name): for c in self._by_id.values(): if c.name == name and c.is_default: return c _charsets = Charsets() """ Generated with: mysql -N -s -e "select id, character_set_name, collation_name, is_default from information_schema.collations order by id;" | python -c "import sys for l in sys.stdin.readlines(): id, name, collation, is_default = l.split(chr(9)) print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \ % (id, name, collation, is_default.strip()) " """ _charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes')) _charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', '')) _charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes')) _charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes')) _charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', '')) _charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes')) _charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes')) _charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes')) _charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes')) _charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes')) _charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes')) _charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes')) _charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes')) _charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', '')) _charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', '')) _charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes')) _charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes')) _charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes')) _charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', '')) _charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', '')) _charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes')) _charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', '')) _charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes')) _charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes')) _charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes')) _charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', '')) _charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes')) _charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', '')) _charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes')) _charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', '')) _charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes')) _charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes')) _charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', '')) _charsets.add(Charset(35, 'ucs2', 'ucs2_general_ci', 'Yes')) _charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes')) _charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes')) _charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes')) _charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes')) _charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes')) _charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes')) _charsets.add(Charset(42, 'latin7', 'latin7_general_cs', '')) _charsets.add(Charset(43, 'macce', 'macce_bin', '')) _charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', '')) _charsets.add(Charset(47, 'latin1', 'latin1_bin', '')) _charsets.add(Charset(48, 'latin1', 'latin1_general_ci', '')) _charsets.add(Charset(49, 'latin1', 'latin1_general_cs', '')) _charsets.add(Charset(50, 'cp1251', 'cp1251_bin', '')) _charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes')) _charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', '')) _charsets.add(Charset(53, 'macroman', 'macroman_bin', '')) _charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes')) _charsets.add(Charset(58, 'cp1257', 'cp1257_bin', '')) _charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes')) _charsets.add(Charset(63, 'binary', 'binary', 'Yes')) _charsets.add(Charset(64, 'armscii8', 'armscii8_bin', '')) _charsets.add(Charset(65, 'ascii', 'ascii_bin', '')) _charsets.add(Charset(66, 'cp1250', 'cp1250_bin', '')) _charsets.add(Charset(67, 'cp1256', 'cp1256_bin', '')) _charsets.add(Charset(68, 'cp866', 'cp866_bin', '')) _charsets.add(Charset(69, 'dec8', 'dec8_bin', '')) _charsets.add(Charset(70, 'greek', 'greek_bin', '')) _charsets.add(Charset(71, 'hebrew', 'hebrew_bin', '')) _charsets.add(Charset(72, 'hp8', 'hp8_bin', '')) _charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', '')) _charsets.add(Charset(74, 'koi8r', 'koi8r_bin', '')) _charsets.add(Charset(75, 'koi8u', 'koi8u_bin', '')) _charsets.add(Charset(77, 'latin2', 'latin2_bin', '')) _charsets.add(Charset(78, 'latin5', 'latin5_bin', '')) _charsets.add(Charset(79, 'latin7', 'latin7_bin', '')) _charsets.add(Charset(80, 'cp850', 'cp850_bin', '')) _charsets.add(Charset(81, 'cp852', 'cp852_bin', '')) _charsets.add(Charset(82, 'swe7', 'swe7_bin', '')) _charsets.add(Charset(83, 'utf8', 'utf8_bin', '')) _charsets.add(Charset(84, 'big5', 'big5_bin', '')) _charsets.add(Charset(85, 'euckr', 'euckr_bin', '')) _charsets.add(Charset(86, 'gb2312', 'gb2312_bin', '')) _charsets.add(Charset(87, 'gbk', 'gbk_bin', '')) _charsets.add(Charset(88, 'sjis', 'sjis_bin', '')) _charsets.add(Charset(89, 'tis620', 'tis620_bin', '')) _charsets.add(Charset(90, 'ucs2', 'ucs2_bin', '')) _charsets.add(Charset(91, 'ujis', 'ujis_bin', '')) _charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes')) _charsets.add(Charset(93, 'geostd8', 'geostd8_bin', '')) _charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', '')) _charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes')) _charsets.add(Charset(96, 'cp932', 'cp932_bin', '')) _charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes')) _charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', '')) _charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', '')) _charsets.add(Charset(128, 'ucs2', 'ucs2_unicode_ci', '')) _charsets.add(Charset(129, 'ucs2', 'ucs2_icelandic_ci', '')) _charsets.add(Charset(130, 'ucs2', 'ucs2_latvian_ci', '')) _charsets.add(Charset(131, 'ucs2', 'ucs2_romanian_ci', '')) _charsets.add(Charset(132, 'ucs2', 'ucs2_slovenian_ci', '')) _charsets.add(Charset(133, 'ucs2', 'ucs2_polish_ci', '')) _charsets.add(Charset(134, 'ucs2', 'ucs2_estonian_ci', '')) _charsets.add(Charset(135, 'ucs2', 'ucs2_spanish_ci', '')) _charsets.add(Charset(136, 'ucs2', 'ucs2_swedish_ci', '')) _charsets.add(Charset(137, 'ucs2', 'ucs2_turkish_ci', '')) _charsets.add(Charset(138, 'ucs2', 'ucs2_czech_ci', '')) _charsets.add(Charset(139, 'ucs2', 'ucs2_danish_ci', '')) _charsets.add(Charset(140, 'ucs2', 'ucs2_lithuanian_ci', '')) _charsets.add(Charset(141, 'ucs2', 'ucs2_slovak_ci', '')) _charsets.add(Charset(142, 'ucs2', 'ucs2_spanish2_ci', '')) _charsets.add(Charset(143, 'ucs2', 'ucs2_roman_ci', '')) _charsets.add(Charset(144, 'ucs2', 'ucs2_persian_ci', '')) _charsets.add(Charset(145, 'ucs2', 'ucs2_esperanto_ci', '')) _charsets.add(Charset(146, 'ucs2', 'ucs2_hungarian_ci', '')) _charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', '')) _charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', '')) _charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', '')) _charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', '')) _charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', '')) _charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', '')) _charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', '')) _charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', '')) _charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', '')) _charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', '')) _charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', '')) _charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', '')) _charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', '')) _charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', '')) _charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', '')) _charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', '')) _charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', '')) _charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', '')) _charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', '')) def charset_by_name(name): return _charsets.by_name(name) def charset_by_id(id): return _charsets.by_id(id)
Python
import pymysql import unittest class PyMySQLTestCase(unittest.TestCase): databases = [ {"host":"localhost","user":"root", "passwd":"","db":"test_pymysql", "use_unicode": True}, {"host":"localhost","user":"root","passwd":"","db":"test_pymysql2"}] def setUp(self): self.connections = [] for params in self.databases: self.connections.append(pymysql.connect(**params)) def tearDown(self): for connection in self.connections: connection.close()
Python
from pymysql.tests.test_issues import * from pymysql.tests.test_example import * from pymysql.tests.test_basic import * if __name__ == "__main__": import unittest unittest.main()
Python
''' PyMySQL: A pure-Python drop-in replacement for MySQLdb. Copyright (c) 2010 PyMySQL contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' VERSION = (0, 4, None) from constants import FIELD_TYPE from converters import escape_dict, escape_sequence, escape_string from err import Warning, Error, InterfaceError, DataError, \ DatabaseError, OperationalError, IntegrityError, InternalError, \ NotSupportedError, ProgrammingError from times import Date, Time, Timestamp, \ DateFromTicks, TimeFromTicks, TimestampFromTicks import sys try: frozenset except NameError: from sets import ImmutableSet as frozenset try: from sets import BaseSet as set except ImportError: from sets import Set as set threadsafety = 1 apilevel = "2.0" paramstyle = "format" class DBAPISet(frozenset): def __ne__(self, other): if isinstance(other, set): return super(DBAPISet, self).__ne__(self, other) else: return other not in self def __eq__(self, other): if isinstance(other, frozenset): return frozenset.__eq__(self, other) else: return other in self def __hash__(self): return frozenset.__hash__(self) STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING]) BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB, FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB]) NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT, FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG, FIELD_TYPE.TINY, FIELD_TYPE.YEAR]) DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE]) TIME = DBAPISet([FIELD_TYPE.TIME]) TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME]) DATETIME = TIMESTAMP ROWID = DBAPISet() def Binary(x): """Return x as a binary type.""" return str(x) def Connect(*args, **kwargs): """ Connect to the database; see connections.Connection.__init__() for more information. """ from connections import Connection return Connection(*args, **kwargs) def get_client_info(): # for MySQLdb compatibility return '%s.%s.%s' % VERSION connect = Connection = Connect # we include a doctored version_info here for MySQLdb compatibility version_info = (1,2,2,"final",0) NULL = "NULL" __version__ = get_client_info() def thread_safe(): return True # match MySQLdb.thread_safe() def install_as_MySQLdb(): """ After this function is called, any application that imports MySQLdb or _mysql will unwittingly actually use """ sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"] __all__ = [ 'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', 'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError', 'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER', 'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError', 'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect', 'connections', 'constants', 'converters', 'cursors', 'debug', 'escape', 'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info', 'paramstyle', 'string_literal', 'threadsafety', 'version_info', "install_as_MySQLdb", "NULL","__version__", ]
Python
# Python implementation of the MySQL client-server protocol # http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol try: import hashlib sha_new = lambda *args, **kwargs: hashlib.new("sha1", *args, **kwargs) except ImportError: import sha sha_new = sha.new import socket try: import ssl SSL_ENABLED = True except ImportError: SSL_ENABLED = False import struct import sys import os import ConfigParser try: import cStringIO as StringIO except ImportError: import StringIO from charset import MBLENGTH, charset_by_name, charset_by_id from cursors import Cursor from constants import FIELD_TYPE, FLAG from constants import SERVER_STATUS from constants.CLIENT import * from constants.COMMAND import * from util import join_bytes, byte2int, int2byte from converters import escape_item, encoders, decoders from err import raise_mysql_exception, Warning, Error, \ InterfaceError, DataError, DatabaseError, OperationalError, \ IntegrityError, InternalError, NotSupportedError, ProgrammingError DEBUG = False NULL_COLUMN = 251 UNSIGNED_CHAR_COLUMN = 251 UNSIGNED_SHORT_COLUMN = 252 UNSIGNED_INT24_COLUMN = 253 UNSIGNED_INT64_COLUMN = 254 UNSIGNED_CHAR_LENGTH = 1 UNSIGNED_SHORT_LENGTH = 2 UNSIGNED_INT24_LENGTH = 3 UNSIGNED_INT64_LENGTH = 8 DEFAULT_CHARSET = 'latin1' MAX_PACKET_LENGTH = 256*256*256-1 def dump_packet(data): def is_ascii(data): if byte2int(data) >= 65 and byte2int(data) <= 122: #data.isalnum(): return data return '.' print "packet length %d" % len(data) print "method call[1]: %s" % sys._getframe(1).f_code.co_name print "method call[2]: %s" % sys._getframe(2).f_code.co_name print "method call[3]: %s" % sys._getframe(3).f_code.co_name print "method call[4]: %s" % sys._getframe(4).f_code.co_name print "method call[5]: %s" % sys._getframe(5).f_code.co_name print "-" * 88 dump_data = [data[i:i+16] for i in xrange(len(data)) if i%16 == 0] for d in dump_data: print ' '.join(map(lambda x:"%02X" % byte2int(x), d)) + \ ' ' * (16 - len(d)) + ' ' * 2 + \ ' '.join(map(lambda x:"%s" % is_ascii(x), d)) print "-" * 88 print "" def _scramble(password, message): if password == None or len(password) == 0: return int2byte(0) if DEBUG: print 'password=' + password stage1 = sha_new(password).digest() stage2 = sha_new(stage1).digest() s = sha_new() s.update(message) s.update(stage2) result = s.digest() return _my_crypt(result, stage1) def _my_crypt(message1, message2): length = len(message1) result = struct.pack('B', length) for i in xrange(length): x = (struct.unpack('B', message1[i:i+1])[0] ^ \ struct.unpack('B', message2[i:i+1])[0]) result += struct.pack('B', x) return result # old_passwords support ported from libmysql/password.c SCRAMBLE_LENGTH_323 = 8 class RandStruct_323(object): def __init__(self, seed1, seed2): self.max_value = 0x3FFFFFFFL self.seed1 = seed1 % self.max_value self.seed2 = seed2 % self.max_value def my_rnd(self): self.seed1 = (self.seed1 * 3L + self.seed2) % self.max_value self.seed2 = (self.seed1 + self.seed2 + 33L) % self.max_value return float(self.seed1) / float(self.max_value) def _scramble_323(password, message): hash_pass = _hash_password_323(password) hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323]) hash_pass_n = struct.unpack(">LL", hash_pass) hash_message_n = struct.unpack(">LL", hash_message) rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1]) outbuf = StringIO.StringIO() for _ in xrange(min(SCRAMBLE_LENGTH_323, len(message))): outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64)) extra = int2byte(int(rand_st.my_rnd() * 31)) out = outbuf.getvalue() outbuf = StringIO.StringIO() for c in out: outbuf.write(int2byte(byte2int(c) ^ byte2int(extra))) return outbuf.getvalue() def _hash_password_323(password): nr = 1345345333L add = 7L nr2 = 0x12345671L for c in [byte2int(x) for x in password if x not in (' ', '\t')]: nr^= (((nr & 63)+add)*c)+ (nr << 8) & 0xFFFFFFFF nr2= (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF add= (add + c) & 0xFFFFFFFF r1 = nr & ((1L << 31) - 1L) # kill sign bits r2 = nr2 & ((1L << 31) - 1L) # pack return struct.pack(">LL", r1, r2) def pack_int24(n): return struct.pack('BBB', n&0xFF, (n>>8)&0xFF, (n>>16)&0xFF) def unpack_uint16(n): return struct.unpack('<H', n[0:2])[0] # TODO: stop using bit-shifting in these functions... # TODO: rename to "uint" to make it clear they're unsigned... def unpack_int24(n): return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\ (struct.unpack('B',n[2])[0] << 16) def unpack_int32(n): return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\ (struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B', n[3])[0] << 24) def unpack_int64(n): return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0]<<8) +\ (struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B',n[3])[0]<<24)+\ (struct.unpack('B',n[4])[0] << 32) + (struct.unpack('B',n[5])[0]<<40)+\ (struct.unpack('B',n[6])[0] << 48) + (struct.unpack('B',n[7])[0]<<56) def defaulterrorhandler(connection, cursor, errorclass, errorvalue): err = errorclass, errorvalue if DEBUG: raise if cursor: cursor.messages.append(err) else: connection.messages.append(err) del cursor del connection if not issubclass(errorclass, Error): raise Error(errorclass, errorvalue) else: raise errorclass, errorvalue class MysqlPacket(object): """Representation of a MySQL response packet. Reads in the packet from the network socket, removes packet header and provides an interface for reading/parsing the packet results.""" def __init__(self, socket): self.__position = 0 self.__recv_packet(socket) del socket def __recv_packet(self, socket): """Parse the packet header and read entire packet payload into buffer.""" packet_header = socket.recv(4) while len(packet_header) < 4: d = socket.recv(4 - len(packet_header)) if len(d) == 0: raise OperationalError(2013, "Lost connection to MySQL server during query") packet_header += d if DEBUG: dump_packet(packet_header) packet_length_bin = packet_header[:3] self.__packet_number = byte2int(packet_header[3]) # TODO: check packet_num is correct (+1 from last packet) bin_length = packet_length_bin + int2byte(0) # pad little-endian number bytes_to_read = struct.unpack('<I', bin_length)[0] payload_buff = [] # this is faster than cStringIO while bytes_to_read > 0: recv_data = socket.recv(bytes_to_read) if len(recv_data) == 0: raise OperationalError(2013, "Lost connection to MySQL server during query") if DEBUG: dump_packet(recv_data) payload_buff.append(recv_data) bytes_to_read -= len(recv_data) self.__data = join_bytes(payload_buff) def packet_number(self): return self.__packet_number def get_all_data(self): return self.__data def read(self, size): """Read the first 'size' bytes in packet and advance cursor past them.""" result = self.peek(size) self.advance(size) return result def read_all(self): """Read all remaining data in the packet. (Subsequent read() or peek() will return errors.) """ result = self.__data[self.__position:] self.__position = None # ensure no subsequent read() or peek() return result def advance(self, length): """Advance the cursor in data buffer 'length' bytes.""" new_position = self.__position + length if new_position < 0 or new_position > len(self.__data): raise Exception('Invalid advance amount (%s) for cursor. ' 'Position=%s' % (length, new_position)) self.__position = new_position def rewind(self, position=0): """Set the position of the data buffer cursor to 'position'.""" if position < 0 or position > len(self.__data): raise Exception("Invalid position to rewind cursor to: %s." % position) self.__position = position def peek(self, size): """Look at the first 'size' bytes in packet without moving cursor.""" result = self.__data[self.__position:(self.__position+size)] if len(result) != size: error = ('Result length not requested length:\n' 'Expected=%s. Actual=%s. Position: %s. Data Length: %s' % (size, len(result), self.__position, len(self.__data))) if DEBUG: print error self.dump() raise AssertionError(error) return result def get_bytes(self, position, length=1): """Get 'length' bytes starting at 'position'. Position is start of payload (first four packet header bytes are not included) starting at index '0'. No error checking is done. If requesting outside end of buffer an empty string (or string shorter than 'length') may be returned! """ return self.__data[position:(position+length)] def read_length_coded_binary(self): """Read a 'Length Coded Binary' number from the data buffer. Length coded numbers can be anywhere from 1 to 9 bytes depending on the value of the first byte. """ c = byte2int(self.read(1)) if c == NULL_COLUMN: return None if c < UNSIGNED_CHAR_COLUMN: return c elif c == UNSIGNED_SHORT_COLUMN: return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH)) elif c == UNSIGNED_INT24_COLUMN: return unpack_int24(self.read(UNSIGNED_INT24_LENGTH)) elif c == UNSIGNED_INT64_COLUMN: # TODO: what was 'longlong'? confirm it wasn't used? return unpack_int64(self.read(UNSIGNED_INT64_LENGTH)) def read_length_coded_string(self): """Read a 'Length Coded String' from the data buffer. A 'Length Coded String' consists first of a length coded (unsigned, positive) integer represented in 1-9 bytes followed by that many bytes of binary data. (For example "cat" would be "3cat".) """ length = self.read_length_coded_binary() if length is None: return None return self.read(length) def is_ok_packet(self): return byte2int(self.get_bytes(0)) == 0 def is_eof_packet(self): return byte2int(self.get_bytes(0)) == 254 # 'fe' def is_resultset_packet(self): field_count = byte2int(self.get_bytes(0)) return field_count >= 1 and field_count <= 250 def is_error_packet(self): return byte2int(self.get_bytes(0)) == 255 def check_error(self): if self.is_error_packet(): self.rewind() self.advance(1) # field_count == error (we already know that) errno = unpack_uint16(self.read(2)) if DEBUG: print "errno = %d" % errno raise_mysql_exception(self.__data) def dump(self): dump_packet(self.__data) class FieldDescriptorPacket(MysqlPacket): """A MysqlPacket that represents a specific column's metadata in the result. Parsing is automatically done and the results are exported via public attributes on the class such as: db, table_name, name, length, type_code. """ def __init__(self, *args): MysqlPacket.__init__(self, *args) self.__parse_field_descriptor() def __parse_field_descriptor(self): """Parse the 'Field Descriptor' (Metadata) packet. This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). """ self.catalog = self.read_length_coded_string() self.db = self.read_length_coded_string() self.table_name = self.read_length_coded_string() self.org_table = self.read_length_coded_string() self.name = self.read_length_coded_string() self.org_name = self.read_length_coded_string() self.advance(1) # non-null filler self.charsetnr = struct.unpack('<H', self.read(2))[0] self.length = struct.unpack('<I', self.read(4))[0] self.type_code = byte2int(self.read(1)) self.flags = struct.unpack('<H', self.read(2))[0] self.scale = byte2int(self.read(1)) # "decimals" self.advance(2) # filler (always 0x00) # 'default' is a length coded binary and is still in the buffer? # not used for normal result sets... def description(self): """Provides a 7-item tuple compatible with the Python PEP249 DB Spec.""" desc = [] desc.append(self.name) desc.append(self.type_code) desc.append(None) # TODO: display_length; should this be self.length? desc.append(self.get_column_length()) # 'internal_size' desc.append(self.get_column_length()) # 'precision' # TODO: why!?!? desc.append(self.scale) # 'null_ok' -- can this be True/False rather than 1/0? # if so just do: desc.append(bool(self.flags % 2 == 0)) if self.flags % 2 == 0: desc.append(1) else: desc.append(0) return tuple(desc) def get_column_length(self): if self.type_code == FIELD_TYPE.VAR_STRING: mblen = MBLENGTH.get(self.charsetnr, 1) return self.length // mblen return self.length def __str__(self): return ('%s %s.%s.%s, type=%s' % (self.__class__, self.db, self.table_name, self.name, self.type_code)) class Connection(object): """ Representation of a socket with a mysql server. The proper way to get an instance of this class is to call connect().""" errorhandler = defaulterrorhandler def __init__(self, host="localhost", user=None, passwd="", db=None, port=3306, unix_socket=None, charset='', sql_mode=None, read_default_file=None, conv=decoders, use_unicode=None, client_flag=0, cursorclass=Cursor, init_command=None, connect_timeout=None, ssl=None, read_default_group=None, compress=None, named_pipe=None): """ Establish a connection to the MySQL database. Accepts several arguments: host: Host where the database server is located user: Username to log in as passwd: Password to use. db: Database to use, None to not use a particular one. port: MySQL port to use, default is usually OK. unix_socket: Optionally, you can use a unix socket rather than TCP/IP. charset: Charset you want to use. sql_mode: Default SQL_MODE to use. read_default_file: Specifies my.cnf file to read these parameters from under the [client] section. conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters. use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k. client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT. cursorclass: Custom cursor class to use. init_command: Initial SQL statement to run when connection is established. connect_timeout: Timeout before throwing an exception when connecting. ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported. read_default_group: Group to read from in the configuration file. compress; Not supported named_pipe: Not supported """ if use_unicode is None and sys.version_info[0] > 2: use_unicode = True if compress or named_pipe: raise NotImplementedError, "compress and named_pipe arguments are not supported" if ssl and (ssl.has_key('capath') or ssl.has_key('cipher')): raise NotImplementedError, 'ssl options capath and cipher are not supported' self.ssl = False if ssl: if not SSL_ENABLED: raise NotImplementedError, "ssl module not found" self.ssl = True client_flag |= SSL for k in ('key', 'cert', 'ca'): v = None if ssl.has_key(k): v = ssl[k] setattr(self, k, v) if read_default_group and not read_default_file: if sys.platform.startswith("win"): read_default_file = "c:\\my.ini" else: read_default_file = "/etc/my.cnf" if read_default_file: if not read_default_group: read_default_group = "client" cfg = ConfigParser.RawConfigParser() cfg.read(os.path.expanduser(read_default_file)) def _config(key, default): try: return cfg.get(read_default_group,key) except: return default user = _config("user",user) passwd = _config("password",passwd) host = _config("host", host) db = _config("db",db) unix_socket = _config("socket",unix_socket) port = _config("port", port) charset = _config("default-character-set", charset) self.host = host self.port = port self.user = user self.password = passwd self.db = db self.unix_socket = unix_socket if charset: self.charset = charset self.use_unicode = True else: self.charset = DEFAULT_CHARSET self.use_unicode = False if use_unicode: self.use_unicode = use_unicode client_flag |= CAPABILITIES client_flag |= MULTI_STATEMENTS if self.db: client_flag |= CONNECT_WITH_DB self.client_flag = client_flag self.cursorclass = cursorclass self.connect_timeout = connect_timeout self._connect() self.messages = [] self.set_charset(charset) self.encoders = encoders self.decoders = conv self._affected_rows = 0 self.host_info = "Not connected" self.autocommit(False) if sql_mode is not None: c = self.cursor() c.execute("SET sql_mode=%s", (sql_mode,)) self.commit() if init_command is not None: c = self.cursor() c.execute(init_command) self.commit() def close(self): ''' Send the quit message and close the socket ''' send_data = struct.pack('<i',1) + int2byte(COM_QUIT) self.socket.send(send_data) self.socket.close() self.socket = None def autocommit(self, value): ''' Set whether or not to commit after every execute() ''' try: self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \ self.escape(value)) self.read_packet() except: exc,value,tb = sys.exc_info() self.errorhandler(None, exc, value) def commit(self): ''' Commit changes to stable storage ''' try: self._execute_command(COM_QUERY, "COMMIT") self.read_packet() except: exc,value,tb = sys.exc_info() self.errorhandler(None, exc, value) def rollback(self): ''' Roll back the current transaction ''' try: self._execute_command(COM_QUERY, "ROLLBACK") self.read_packet() except: exc,value,tb = sys.exc_info() self.errorhandler(None, exc, value) def escape(self, obj): ''' Escape whatever value you pass to it ''' return escape_item(obj, self.charset) def literal(self, obj): ''' Alias for escape() ''' return escape_item(obj, self.charset) def cursor(self): ''' Create a new cursor to execute queries with ''' return self.cursorclass(self) def __enter__(self): ''' Context manager that returns a Cursor ''' return self.cursor() def __exit__(self, exc, value, traceback): ''' On successful exit, commit. On exception, rollback. ''' if exc: self.rollback() else: self.commit() # The following methods are INTERNAL USE ONLY (called from Cursor) def query(self, sql): self._execute_command(COM_QUERY, sql) self._affected_rows = self._read_query_result() return self._affected_rows def next_result(self): self._affected_rows = self._read_query_result() return self._affected_rows def affected_rows(self): return self._affected_rows def kill(self, thread_id): arg = struct.pack('<I', thread_id) try: self._execute_command(COM_PROCESS_KILL, arg) except: exc,value,tb = sys.exc_info() self.errorhandler(None, exc, value) return pkt = self.read_packet() return pkt.is_ok_packet() def ping(self, reconnect=True): ''' Check if the server is alive ''' try: self._execute_command(COM_PING, "") except: if reconnect: self._connect() return self.ping(False) else: exc,value,tb = sys.exc_info() self.errorhandler(None, exc, value) return pkt = self.read_packet() return pkt.is_ok_packet() def set_charset(self, charset): try: if charset: self._execute_command(COM_QUERY, "SET NAMES %s" % self.escape(charset)) self.read_packet() self.charset = charset except: exc,value,tb = sys.exc_info() self.errorhandler(None, exc, value) def _connect(self): try: if self.unix_socket and (self.host == 'localhost' or self.host == '127.0.0.1'): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) t = sock.gettimeout() sock.settimeout(self.connect_timeout) sock.connect(self.unix_socket) sock.settimeout(t) self.host_info = "Localhost via UNIX socket" if DEBUG: print 'connected using unix_socket' else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) t = sock.gettimeout() sock.settimeout(self.connect_timeout) sock.connect((self.host, self.port)) sock.settimeout(t) self.host_info = "socket %s:%d" % (self.host, self.port) if DEBUG: print 'connected using socket' self.socket = sock self._get_server_information() self._request_authentication() except socket.error, e: raise OperationalError(2003, "Can't connect to MySQL server on %r (%d)" % (self.host, e.args[0])) def read_packet(self, packet_type=MysqlPacket): """Read an entire "mysql packet" in its entirety from the network and return a MysqlPacket type that represents the results.""" # TODO: is socket.recv(small_number) significantly slower than # socket.recv(large_number)? if so, maybe we should buffer # the socket.recv() (though that obviously makes memory management # more complicated. packet = packet_type(self.socket) packet.check_error() return packet def _read_query_result(self): result = MySQLResult(self) result.read() self._result = result return result.affected_rows def _send_command(self, command, sql): #send_data = struct.pack('<i', len(sql) + 1) + command + sql # could probably be more efficient, at least it's correct if not self.socket: self.errorhandler(None, InterfaceError, "(0, '')") if isinstance(sql, unicode): sql = sql.encode(self.charset) buf = int2byte(command) + sql pckt_no = 0 while len(buf) >= MAX_PACKET_LENGTH: header = struct.pack('<i', MAX_PACKET_LENGTH)[:-1]+int2byte(pckt_no) send_data = header + buf[:MAX_PACKET_LENGTH] self.socket.send(send_data) if DEBUG: dump_packet(send_data) buf = buf[MAX_PACKET_LENGTH:] pckt_no += 1 header = struct.pack('<i', len(buf))[:-1]+int2byte(pckt_no) self.socket.send(header+buf) #sock = self.socket #sock.send(send_data) # def _execute_command(self, command, sql): self._send_command(command, sql) def _request_authentication(self): self._send_authentication() def _send_authentication(self): sock = self.socket self.client_flag |= CAPABILITIES if self.server_version.startswith('5'): self.client_flag |= MULTI_RESULTS if self.user is None: raise ValueError, "Did not specify a username" charset_id = charset_by_name(self.charset).id self.user = self.user.encode(self.charset) data_init = struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + \ int2byte(charset_id) + int2byte(0)*23 next_packet = 1 if self.ssl: data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init next_packet += 1 if DEBUG: dump_packet(data) sock.send(data) sock = self.socket = ssl.wrap_socket(sock, keyfile=self.key, certfile=self.cert, ssl_version=ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca) data = data_init + self.user+int2byte(0) + _scramble(self.password.encode(self.charset), self.salt) if self.db: self.db = self.db.encode(self.charset) data += self.db + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data next_packet += 2 if DEBUG: dump_packet(data) sock.send(data) auth_packet = MysqlPacket(sock) auth_packet.check_error() if DEBUG: auth_packet.dump() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake #raise NotImplementedError, "old_passwords are not supported. Check to see if mysqld was started with --old-passwords, if old-passwords=1 in a my.cnf file, or if there are some short hashes in your mysql.user table." # TODO: is this the correct charset? data = _scramble_323(self.password.encode(self.charset), self.salt.encode(self.charset)) + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data sock.send(data) auth_packet = MysqlPacket(sock) auth_packet.check_error() if DEBUG: auth_packet.dump() # _mysql support def thread_id(self): return self.server_thread_id[0] def character_set_name(self): return self.charset def get_host_info(self): return self.host_info def get_proto_info(self): return self.protocol_version def _get_server_information(self): sock = self.socket i = 0 packet = MysqlPacket(sock) data = packet.get_all_data() if DEBUG: dump_packet(data) #packet_len = byte2int(data[i:i+1]) #i += 4 self.protocol_version = byte2int(data[i:i+1]) i += 1 server_end = data.find(int2byte(0), i) # TODO: is this the correct charset? should it be default_charset? self.server_version = data[i:server_end].decode(self.charset) i = server_end + 1 self.server_thread_id = struct.unpack('<h', data[i:i+2]) i += 4 self.salt = data[i:i+8] i += 9 if len(data) >= i + 1: i += 1 self.server_capabilities = struct.unpack('<h', data[i:i+2])[0] i += 1 self.server_language = byte2int(data[i:i+1]) self.server_charset = charset_by_id(self.server_language).name i += 16 if len(data) >= i+12-1: rest_salt = data[i:i+12] self.salt += rest_salt def get_server_info(self): return self.server_version Warning = Warning Error = Error InterfaceError = InterfaceError DatabaseError = DatabaseError DataError = DataError OperationalError = OperationalError IntegrityError = IntegrityError InternalError = InternalError ProgrammingError = ProgrammingError NotSupportedError = NotSupportedError # TODO: move OK and EOF packet parsing/logic into a proper subclass # of MysqlPacket like has been done with FieldDescriptorPacket. class MySQLResult(object): def __init__(self, connection): from weakref import proxy self.connection = proxy(connection) self.affected_rows = None self.insert_id = None self.server_status = 0 self.warning_count = 0 self.message = None self.field_count = 0 self.description = None self.rows = None self.has_next = None def read(self): self.first_packet = self.connection.read_packet() # TODO: use classes for different packet types? if self.first_packet.is_ok_packet(): self._read_ok_packet() else: self._read_result_packet() def _read_ok_packet(self): self.first_packet.advance(1) # field_count (always '0') self.affected_rows = self.first_packet.read_length_coded_binary() self.insert_id = self.first_packet.read_length_coded_binary() self.server_status = struct.unpack('<H', self.first_packet.read(2))[0] self.warning_count = struct.unpack('<H', self.first_packet.read(2))[0] self.message = self.first_packet.read_all() def _read_result_packet(self): self.field_count = byte2int(self.first_packet.read(1)) self._get_descriptions() self._read_rowdata_packet() # TODO: implement this as an iteratable so that it is more # memory efficient and lower-latency to client... def _read_rowdata_packet(self): """Read a rowdata packet for each data row in the result set.""" rows = [] while True: packet = self.connection.read_packet() if packet.is_eof_packet(): self.warning_count = packet.read(2) server_status = struct.unpack('<h', packet.read(2))[0] self.has_next = (server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS) break row = [] for field in self.fields: if field.type_code in self.connection.decoders: converter = self.connection.decoders[field.type_code] if DEBUG: print "DEBUG: field=%s, converter=%s" % (field, converter) data = packet.read_length_coded_string() converted = None if data != None: converted = converter(self.connection, field, data) row.append(converted) rows.append(tuple(row)) self.affected_rows = len(rows) self.rows = tuple(rows) if DEBUG: self.rows def _get_descriptions(self): """Read a column descriptor packet for each column in the result.""" self.fields = [] description = [] for i in xrange(self.field_count): field = self.connection.read_packet(FieldDescriptorPacket) self.fields.append(field) description.append(field.description()) eof_packet = self.connection.read_packet() assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' self.description = tuple(description)
Python
COM_SLEEP = 0x00 COM_QUIT = 0x01 COM_INIT_DB = 0x02 COM_QUERY = 0x03 COM_FIELD_LIST = 0x04 COM_CREATE_DB = 0x05 COM_DROP_DB = 0x06 COM_REFRESH = 0x07 COM_SHUTDOWN = 0x08 COM_STATISTICS = 0x09 COM_PROCESS_INFO = 0x0a COM_CONNECT = 0x0b COM_PROCESS_KILL = 0x0c COM_DEBUG = 0x0d COM_PING = 0x0e COM_TIME = 0x0f COM_DELAYED_INSERT = 0x10 COM_CHANGE_USER = 0x11 COM_BINLOG_DUMP = 0x12 COM_TABLE_DUMP = 0x13 COM_CONNECT_OUT = 0x14 COM_REGISTER_SLAVE = 0x15
Python
DECIMAL = 0 TINY = 1 SHORT = 2 LONG = 3 FLOAT = 4 DOUBLE = 5 NULL = 6 TIMESTAMP = 7 LONGLONG = 8 INT24 = 9 DATE = 10 TIME = 11 DATETIME = 12 YEAR = 13 NEWDATE = 14 VARCHAR = 15 BIT = 16 NEWDECIMAL = 246 ENUM = 247 SET = 248 TINY_BLOB = 249 MEDIUM_BLOB = 250 LONG_BLOB = 251 BLOB = 252 VAR_STRING = 253 STRING = 254 GEOMETRY = 255 CHAR = TINY INTERVAL = ENUM
Python
NOT_NULL = 1 PRI_KEY = 2 UNIQUE_KEY = 4 MULTIPLE_KEY = 8 BLOB = 16 UNSIGNED = 32 ZEROFILL = 64 BINARY = 128 ENUM = 256 AUTO_INCREMENT = 512 TIMESTAMP = 1024 SET = 2048 PART_KEY = 16384 GROUP = 32767 UNIQUE = 65536
Python
ERROR_FIRST = 1000 HASHCHK = 1000 NISAMCHK = 1001 NO = 1002 YES = 1003 CANT_CREATE_FILE = 1004 CANT_CREATE_TABLE = 1005 CANT_CREATE_DB = 1006 DB_CREATE_EXISTS = 1007 DB_DROP_EXISTS = 1008 DB_DROP_DELETE = 1009 DB_DROP_RMDIR = 1010 CANT_DELETE_FILE = 1011 CANT_FIND_SYSTEM_REC = 1012 CANT_GET_STAT = 1013 CANT_GET_WD = 1014 CANT_LOCK = 1015 CANT_OPEN_FILE = 1016 FILE_NOT_FOUND = 1017 CANT_READ_DIR = 1018 CANT_SET_WD = 1019 CHECKREAD = 1020 DISK_FULL = 1021 DUP_KEY = 1022 ERROR_ON_CLOSE = 1023 ERROR_ON_READ = 1024 ERROR_ON_RENAME = 1025 ERROR_ON_WRITE = 1026 FILE_USED = 1027 FILSORT_ABORT = 1028 FORM_NOT_FOUND = 1029 GET_ERRNO = 1030 ILLEGAL_HA = 1031 KEY_NOT_FOUND = 1032 NOT_FORM_FILE = 1033 NOT_KEYFILE = 1034 OLD_KEYFILE = 1035 OPEN_AS_READONLY = 1036 OUTOFMEMORY = 1037 OUT_OF_SORTMEMORY = 1038 UNEXPECTED_EOF = 1039 CON_COUNT_ERROR = 1040 OUT_OF_RESOURCES = 1041 BAD_HOST_ERROR = 1042 HANDSHAKE_ERROR = 1043 DBACCESS_DENIED_ERROR = 1044 ACCESS_DENIED_ERROR = 1045 NO_DB_ERROR = 1046 UNKNOWN_COM_ERROR = 1047 BAD_NULL_ERROR = 1048 BAD_DB_ERROR = 1049 TABLE_EXISTS_ERROR = 1050 BAD_TABLE_ERROR = 1051 NON_UNIQ_ERROR = 1052 SERVER_SHUTDOWN = 1053 BAD_FIELD_ERROR = 1054 WRONG_FIELD_WITH_GROUP = 1055 WRONG_GROUP_FIELD = 1056 WRONG_SUM_SELECT = 1057 WRONG_VALUE_COUNT = 1058 TOO_LONG_IDENT = 1059 DUP_FIELDNAME = 1060 DUP_KEYNAME = 1061 DUP_ENTRY = 1062 WRONG_FIELD_SPEC = 1063 PARSE_ERROR = 1064 EMPTY_QUERY = 1065 NONUNIQ_TABLE = 1066 INVALID_DEFAULT = 1067 MULTIPLE_PRI_KEY = 1068 TOO_MANY_KEYS = 1069 TOO_MANY_KEY_PARTS = 1070 TOO_LONG_KEY = 1071 KEY_COLUMN_DOES_NOT_EXITS = 1072 BLOB_USED_AS_KEY = 1073 TOO_BIG_FIELDLENGTH = 1074 WRONG_AUTO_KEY = 1075 READY = 1076 NORMAL_SHUTDOWN = 1077 GOT_SIGNAL = 1078 SHUTDOWN_COMPLETE = 1079 FORCING_CLOSE = 1080 IPSOCK_ERROR = 1081 NO_SUCH_INDEX = 1082 WRONG_FIELD_TERMINATORS = 1083 BLOBS_AND_NO_TERMINATED = 1084 TEXTFILE_NOT_READABLE = 1085 FILE_EXISTS_ERROR = 1086 LOAD_INFO = 1087 ALTER_INFO = 1088 WRONG_SUB_KEY = 1089 CANT_REMOVE_ALL_FIELDS = 1090 CANT_DROP_FIELD_OR_KEY = 1091 INSERT_INFO = 1092 UPDATE_TABLE_USED = 1093 NO_SUCH_THREAD = 1094 KILL_DENIED_ERROR = 1095 NO_TABLES_USED = 1096 TOO_BIG_SET = 1097 NO_UNIQUE_LOGFILE = 1098 TABLE_NOT_LOCKED_FOR_WRITE = 1099 TABLE_NOT_LOCKED = 1100 BLOB_CANT_HAVE_DEFAULT = 1101 WRONG_DB_NAME = 1102 WRONG_TABLE_NAME = 1103 TOO_BIG_SELECT = 1104 UNKNOWN_ERROR = 1105 UNKNOWN_PROCEDURE = 1106 WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 WRONG_PARAMETERS_TO_PROCEDURE = 1108 UNKNOWN_TABLE = 1109 FIELD_SPECIFIED_TWICE = 1110 INVALID_GROUP_FUNC_USE = 1111 UNSUPPORTED_EXTENSION = 1112 TABLE_MUST_HAVE_COLUMNS = 1113 RECORD_FILE_FULL = 1114 UNKNOWN_CHARACTER_SET = 1115 TOO_MANY_TABLES = 1116 TOO_MANY_FIELDS = 1117 TOO_BIG_ROWSIZE = 1118 STACK_OVERRUN = 1119 WRONG_OUTER_JOIN = 1120 NULL_COLUMN_IN_INDEX = 1121 CANT_FIND_UDF = 1122 CANT_INITIALIZE_UDF = 1123 UDF_NO_PATHS = 1124 UDF_EXISTS = 1125 CANT_OPEN_LIBRARY = 1126 CANT_FIND_DL_ENTRY = 1127 FUNCTION_NOT_DEFINED = 1128 HOST_IS_BLOCKED = 1129 HOST_NOT_PRIVILEGED = 1130 PASSWORD_ANONYMOUS_USER = 1131 PASSWORD_NOT_ALLOWED = 1132 PASSWORD_NO_MATCH = 1133 UPDATE_INFO = 1134 CANT_CREATE_THREAD = 1135 WRONG_VALUE_COUNT_ON_ROW = 1136 CANT_REOPEN_TABLE = 1137 INVALID_USE_OF_NULL = 1138 REGEXP_ERROR = 1139 MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 NONEXISTING_GRANT = 1141 TABLEACCESS_DENIED_ERROR = 1142 COLUMNACCESS_DENIED_ERROR = 1143 ILLEGAL_GRANT_FOR_TABLE = 1144 GRANT_WRONG_HOST_OR_USER = 1145 NO_SUCH_TABLE = 1146 NONEXISTING_TABLE_GRANT = 1147 NOT_ALLOWED_COMMAND = 1148 SYNTAX_ERROR = 1149 DELAYED_CANT_CHANGE_LOCK = 1150 TOO_MANY_DELAYED_THREADS = 1151 ABORTING_CONNECTION = 1152 NET_PACKET_TOO_LARGE = 1153 NET_READ_ERROR_FROM_PIPE = 1154 NET_FCNTL_ERROR = 1155 NET_PACKETS_OUT_OF_ORDER = 1156 NET_UNCOMPRESS_ERROR = 1157 NET_READ_ERROR = 1158 NET_READ_INTERRUPTED = 1159 NET_ERROR_ON_WRITE = 1160 NET_WRITE_INTERRUPTED = 1161 TOO_LONG_STRING = 1162 TABLE_CANT_HANDLE_BLOB = 1163 TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 DELAYED_INSERT_TABLE_LOCKED = 1165 WRONG_COLUMN_NAME = 1166 WRONG_KEY_COLUMN = 1167 WRONG_MRG_TABLE = 1168 DUP_UNIQUE = 1169 BLOB_KEY_WITHOUT_LENGTH = 1170 PRIMARY_CANT_HAVE_NULL = 1171 TOO_MANY_ROWS = 1172 REQUIRES_PRIMARY_KEY = 1173 NO_RAID_COMPILED = 1174 UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 KEY_DOES_NOT_EXITS = 1176 CHECK_NO_SUCH_TABLE = 1177 CHECK_NOT_IMPLEMENTED = 1178 CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 ERROR_DURING_COMMIT = 1180 ERROR_DURING_ROLLBACK = 1181 ERROR_DURING_FLUSH_LOGS = 1182 ERROR_DURING_CHECKPOINT = 1183 NEW_ABORTING_CONNECTION = 1184 DUMP_NOT_IMPLEMENTED = 1185 FLUSH_MASTER_BINLOG_CLOSED = 1186 INDEX_REBUILD = 1187 MASTER = 1188 MASTER_NET_READ = 1189 MASTER_NET_WRITE = 1190 FT_MATCHING_KEY_NOT_FOUND = 1191 LOCK_OR_ACTIVE_TRANSACTION = 1192 UNKNOWN_SYSTEM_VARIABLE = 1193 CRASHED_ON_USAGE = 1194 CRASHED_ON_REPAIR = 1195 WARNING_NOT_COMPLETE_ROLLBACK = 1196 TRANS_CACHE_FULL = 1197 SLAVE_MUST_STOP = 1198 SLAVE_NOT_RUNNING = 1199 BAD_SLAVE = 1200 MASTER_INFO = 1201 SLAVE_THREAD = 1202 TOO_MANY_USER_CONNECTIONS = 1203 SET_CONSTANTS_ONLY = 1204 LOCK_WAIT_TIMEOUT = 1205 LOCK_TABLE_FULL = 1206 READ_ONLY_TRANSACTION = 1207 DROP_DB_WITH_READ_LOCK = 1208 CREATE_DB_WITH_READ_LOCK = 1209 WRONG_ARGUMENTS = 1210 NO_PERMISSION_TO_CREATE_USER = 1211 UNION_TABLES_IN_DIFFERENT_DIR = 1212 LOCK_DEADLOCK = 1213 TABLE_CANT_HANDLE_FT = 1214 CANNOT_ADD_FOREIGN = 1215 NO_REFERENCED_ROW = 1216 ROW_IS_REFERENCED = 1217 CONNECT_TO_MASTER = 1218 QUERY_ON_MASTER = 1219 ERROR_WHEN_EXECUTING_COMMAND = 1220 WRONG_USAGE = 1221 WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 CANT_UPDATE_WITH_READLOCK = 1223 MIXING_NOT_ALLOWED = 1224 DUP_ARGUMENT = 1225 USER_LIMIT_REACHED = 1226 SPECIFIC_ACCESS_DENIED_ERROR = 1227 LOCAL_VARIABLE = 1228 GLOBAL_VARIABLE = 1229 NO_DEFAULT = 1230 WRONG_VALUE_FOR_VAR = 1231 WRONG_TYPE_FOR_VAR = 1232 VAR_CANT_BE_READ = 1233 CANT_USE_OPTION_HERE = 1234 NOT_SUPPORTED_YET = 1235 MASTER_FATAL_ERROR_READING_BINLOG = 1236 SLAVE_IGNORED_TABLE = 1237 INCORRECT_GLOBAL_LOCAL_VAR = 1238 WRONG_FK_DEF = 1239 KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 OPERAND_COLUMNS = 1241 SUBQUERY_NO_1_ROW = 1242 UNKNOWN_STMT_HANDLER = 1243 CORRUPT_HELP_DB = 1244 CYCLIC_REFERENCE = 1245 AUTO_CONVERT = 1246 ILLEGAL_REFERENCE = 1247 DERIVED_MUST_HAVE_ALIAS = 1248 SELECT_REDUCED = 1249 TABLENAME_NOT_ALLOWED_HERE = 1250 NOT_SUPPORTED_AUTH_MODE = 1251 SPATIAL_CANT_HAVE_NULL = 1252 COLLATION_CHARSET_MISMATCH = 1253 SLAVE_WAS_RUNNING = 1254 SLAVE_WAS_NOT_RUNNING = 1255 TOO_BIG_FOR_UNCOMPRESS = 1256 ZLIB_Z_MEM_ERROR = 1257 ZLIB_Z_BUF_ERROR = 1258 ZLIB_Z_DATA_ERROR = 1259 CUT_VALUE_GROUP_CONCAT = 1260 WARN_TOO_FEW_RECORDS = 1261 WARN_TOO_MANY_RECORDS = 1262 WARN_NULL_TO_NOTNULL = 1263 WARN_DATA_OUT_OF_RANGE = 1264 WARN_DATA_TRUNCATED = 1265 WARN_USING_OTHER_HANDLER = 1266 CANT_AGGREGATE_2COLLATIONS = 1267 DROP_USER = 1268 REVOKE_GRANTS = 1269 CANT_AGGREGATE_3COLLATIONS = 1270 CANT_AGGREGATE_NCOLLATIONS = 1271 VARIABLE_IS_NOT_STRUCT = 1272 UNKNOWN_COLLATION = 1273 SLAVE_IGNORED_SSL_PARAMS = 1274 SERVER_IS_IN_SECURE_AUTH_MODE = 1275 WARN_FIELD_RESOLVED = 1276 BAD_SLAVE_UNTIL_COND = 1277 MISSING_SKIP_SLAVE = 1278 UNTIL_COND_IGNORED = 1279 WRONG_NAME_FOR_INDEX = 1280 WRONG_NAME_FOR_CATALOG = 1281 WARN_QC_RESIZE = 1282 BAD_FT_COLUMN = 1283 UNKNOWN_KEY_CACHE = 1284 WARN_HOSTNAME_WONT_WORK = 1285 UNKNOWN_STORAGE_ENGINE = 1286 WARN_DEPRECATED_SYNTAX = 1287 NON_UPDATABLE_TABLE = 1288 FEATURE_DISABLED = 1289 OPTION_PREVENTS_STATEMENT = 1290 DUPLICATED_VALUE_IN_TYPE = 1291 TRUNCATED_WRONG_VALUE = 1292 TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 INVALID_ON_UPDATE = 1294 UNSUPPORTED_PS = 1295 GET_ERRMSG = 1296 GET_TEMPORARY_ERRMSG = 1297 UNKNOWN_TIME_ZONE = 1298 WARN_INVALID_TIMESTAMP = 1299 INVALID_CHARACTER_STRING = 1300 WARN_ALLOWED_PACKET_OVERFLOWED = 1301 CONFLICTING_DECLARATIONS = 1302 SP_NO_RECURSIVE_CREATE = 1303 SP_ALREADY_EXISTS = 1304 SP_DOES_NOT_EXIST = 1305 SP_DROP_FAILED = 1306 SP_STORE_FAILED = 1307 SP_LILABEL_MISMATCH = 1308 SP_LABEL_REDEFINE = 1309 SP_LABEL_MISMATCH = 1310 SP_UNINIT_VAR = 1311 SP_BADSELECT = 1312 SP_BADRETURN = 1313 SP_BADSTATEMENT = 1314 UPDATE_LOG_DEPRECATED_IGNORED = 1315 UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 QUERY_INTERRUPTED = 1317 SP_WRONG_NO_OF_ARGS = 1318 SP_COND_MISMATCH = 1319 SP_NORETURN = 1320 SP_NORETURNEND = 1321 SP_BAD_CURSOR_QUERY = 1322 SP_BAD_CURSOR_SELECT = 1323 SP_CURSOR_MISMATCH = 1324 SP_CURSOR_ALREADY_OPEN = 1325 SP_CURSOR_NOT_OPEN = 1326 SP_UNDECLARED_VAR = 1327 SP_WRONG_NO_OF_FETCH_ARGS = 1328 SP_FETCH_NO_DATA = 1329 SP_DUP_PARAM = 1330 SP_DUP_VAR = 1331 SP_DUP_COND = 1332 SP_DUP_CURS = 1333 SP_CANT_ALTER = 1334 SP_SUBSELECT_NYI = 1335 STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 SP_VARCOND_AFTER_CURSHNDLR = 1337 SP_CURSOR_AFTER_HANDLER = 1338 SP_CASE_NOT_FOUND = 1339 FPARSER_TOO_BIG_FILE = 1340 FPARSER_BAD_HEADER = 1341 FPARSER_EOF_IN_COMMENT = 1342 FPARSER_ERROR_IN_PARAMETER = 1343 FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 VIEW_NO_EXPLAIN = 1345 FRM_UNKNOWN_TYPE = 1346 WRONG_OBJECT = 1347 NONUPDATEABLE_COLUMN = 1348 VIEW_SELECT_DERIVED = 1349 VIEW_SELECT_CLAUSE = 1350 VIEW_SELECT_VARIABLE = 1351 VIEW_SELECT_TMPTABLE = 1352 VIEW_WRONG_LIST = 1353 WARN_VIEW_MERGE = 1354 WARN_VIEW_WITHOUT_KEY = 1355 VIEW_INVALID = 1356 SP_NO_DROP_SP = 1357 SP_GOTO_IN_HNDLR = 1358 TRG_ALREADY_EXISTS = 1359 TRG_DOES_NOT_EXIST = 1360 TRG_ON_VIEW_OR_TEMP_TABLE = 1361 TRG_CANT_CHANGE_ROW = 1362 TRG_NO_SUCH_ROW_IN_TRG = 1363 NO_DEFAULT_FOR_FIELD = 1364 DIVISION_BY_ZERO = 1365 TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 ILLEGAL_VALUE_FOR_TYPE = 1367 VIEW_NONUPD_CHECK = 1368 VIEW_CHECK_FAILED = 1369 PROCACCESS_DENIED_ERROR = 1370 RELAY_LOG_FAIL = 1371 PASSWD_LENGTH = 1372 UNKNOWN_TARGET_BINLOG = 1373 IO_ERR_LOG_INDEX_READ = 1374 BINLOG_PURGE_PROHIBITED = 1375 FSEEK_FAIL = 1376 BINLOG_PURGE_FATAL_ERR = 1377 LOG_IN_USE = 1378 LOG_PURGE_UNKNOWN_ERR = 1379 RELAY_LOG_INIT = 1380 NO_BINARY_LOGGING = 1381 RESERVED_SYNTAX = 1382 WSAS_FAILED = 1383 DIFF_GROUPS_PROC = 1384 NO_GROUP_FOR_PROC = 1385 ORDER_WITH_PROC = 1386 LOGGING_PROHIBIT_CHANGING_OF = 1387 NO_FILE_MAPPING = 1388 WRONG_MAGIC = 1389 PS_MANY_PARAM = 1390 KEY_PART_0 = 1391 VIEW_CHECKSUM = 1392 VIEW_MULTIUPDATE = 1393 VIEW_NO_INSERT_FIELD_LIST = 1394 VIEW_DELETE_MERGE_VIEW = 1395 CANNOT_USER = 1396 XAER_NOTA = 1397 XAER_INVAL = 1398 XAER_RMFAIL = 1399 XAER_OUTSIDE = 1400 XAER_RMERR = 1401 XA_RBROLLBACK = 1402 NONEXISTING_PROC_GRANT = 1403 PROC_AUTO_GRANT_FAIL = 1404 PROC_AUTO_REVOKE_FAIL = 1405 DATA_TOO_LONG = 1406 SP_BAD_SQLSTATE = 1407 STARTUP = 1408 LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 CANT_CREATE_USER_WITH_GRANT = 1410 WRONG_VALUE_FOR_TYPE = 1411 TABLE_DEF_CHANGED = 1412 SP_DUP_HANDLER = 1413 SP_NOT_VAR_ARG = 1414 SP_NO_RETSET = 1415 CANT_CREATE_GEOMETRY_OBJECT = 1416 FAILED_ROUTINE_BREAK_BINLOG = 1417 BINLOG_UNSAFE_ROUTINE = 1418 BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 EXEC_STMT_WITH_OPEN_CURSOR = 1420 STMT_HAS_NO_OPEN_CURSOR = 1421 COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 NO_DEFAULT_FOR_VIEW_FIELD = 1423 SP_NO_RECURSION = 1424 TOO_BIG_SCALE = 1425 TOO_BIG_PRECISION = 1426 M_BIGGER_THAN_D = 1427 WRONG_LOCK_OF_SYSTEM_TABLE = 1428 CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 QUERY_ON_FOREIGN_DATA_SOURCE = 1430 FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 FOREIGN_DATA_STRING_INVALID = 1433 CANT_CREATE_FEDERATED_TABLE = 1434 TRG_IN_WRONG_SCHEMA = 1435 STACK_OVERRUN_NEED_MORE = 1436 TOO_LONG_BODY = 1437 WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 TOO_BIG_DISPLAYWIDTH = 1439 XAER_DUPID = 1440 DATETIME_FUNCTION_OVERFLOW = 1441 CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 VIEW_PREVENT_UPDATE = 1443 PS_NO_RECURSION = 1444 SP_CANT_SET_AUTOCOMMIT = 1445 MALFORMED_DEFINER = 1446 VIEW_FRM_NO_USER = 1447 VIEW_OTHER_USER = 1448 NO_SUCH_USER = 1449 FORBID_SCHEMA_CHANGE = 1450 ROW_IS_REFERENCED_2 = 1451 NO_REFERENCED_ROW_2 = 1452 SP_BAD_VAR_SHADOW = 1453 TRG_NO_DEFINER = 1454 OLD_FILE_FORMAT = 1455 SP_RECURSION_LIMIT = 1456 SP_PROC_TABLE_CORRUPT = 1457 SP_WRONG_NAME = 1458 TABLE_NEEDS_UPGRADE = 1459 SP_NO_AGGREGATE = 1460 MAX_PREPARED_STMT_COUNT_REACHED = 1461 VIEW_RECURSIVE = 1462 NON_GROUPING_FIELD_USED = 1463 TABLE_CANT_HANDLE_SPKEYS = 1464 NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 USERNAME = 1466 HOSTNAME = 1467 WRONG_STRING_LENGTH = 1468 ERROR_LAST = 1468
Python
SERVER_STATUS_IN_TRANS = 1 SERVER_STATUS_AUTOCOMMIT = 2 SERVER_MORE_RESULTS_EXISTS = 8 SERVER_QUERY_NO_GOOD_INDEX_USED = 16 SERVER_QUERY_NO_INDEX_USED = 32 SERVER_STATUS_CURSOR_EXISTS = 64 SERVER_STATUS_LAST_ROW_SENT = 128 SERVER_STATUS_DB_DROPPED = 256 SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512 SERVER_STATUS_METADATA_CHANGED = 1024
Python
LONG_PASSWORD = 1 FOUND_ROWS = 1 << 1 LONG_FLAG = 1 << 2 CONNECT_WITH_DB = 1 << 3 NO_SCHEMA = 1 << 4 COMPRESS = 1 << 5 ODBC = 1 << 6 LOCAL_FILES = 1 << 7 IGNORE_SPACE = 1 << 8 PROTOCOL_41 = 1 << 9 INTERACTIVE = 1 << 10 SSL = 1 << 11 IGNORE_SIGPIPE = 1 << 12 TRANSACTIONS = 1 << 13 SECURE_CONNECTION = 1 << 15 MULTI_STATEMENTS = 1 << 16 MULTI_RESULTS = 1 << 17 CAPABILITIES = LONG_PASSWORD|LONG_FLAG|TRANSACTIONS| \ PROTOCOL_41|SECURE_CONNECTION
Python
#!/usr/bin/env python """ client module for memcached (memory cache daemon) Overview ======== See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached. Usage summary ============= This should give you a feel for how this module operates:: import memcache mc = memcache.Client(['127.0.0.1:11211'], debug=0) mc.set("some_key", "Some value") value = mc.get("some_key") mc.set("another_key", 3) mc.delete("another_key") mc.set("key", "1") # note that the key used for incr/decr must be a string. mc.incr("key") mc.decr("key") The standard way to use memcache with a database is like this:: key = derive_key(obj) obj = mc.get(key) if not obj: obj = backend_api.get(...) mc.set(obj) # we now have obj, and future passes through this code # will use the object from the cache. Detailed Documentation ====================== More detailed documentation is available in the L{Client} class. """ import sys import socket import time import os import re import types try: import cPickle as pickle except ImportError: import pickle try: from zlib import compress, decompress _supports_compress = True except ImportError: _supports_compress = False # quickly define a decompress just in case we recv compressed data. def decompress(val): raise _Error("received compressed data but I don't support compession (import error)") try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from binascii import crc32 # zlib version is not cross-platform serverHashFunction = crc32 __author__ = "Evan Martin <martine@danga.com>" __version__ = "1.44" __copyright__ = "Copyright (C) 2003 Danga Interactive" __license__ = "Python" SERVER_MAX_KEY_LENGTH = 250 # Storing values larger than 1MB requires recompiling memcached. If you do, # this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N" # after importing this module. SERVER_MAX_VALUE_LENGTH = 1024*1024 class _Error(Exception): pass try: # Only exists in Python 2.4+ from threading import local except ImportError: # TODO: add the pure-python local implementation class local(object): pass class Client(local): """ Object representing a pool of memcache servers. See L{memcache} for an overview. In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ _FLAG_PICKLE = 1<<0 _FLAG_INTEGER = 1<<1 _FLAG_LONG = 1<<2 _FLAG_COMPRESSED = 1<<3 _SERVER_RETRIES = 10 # how many times to try finding a free server. # exceptions for Client class MemcachedKeyError(Exception): pass class MemcachedKeyLengthError(MemcachedKeyError): pass class MemcachedKeyCharacterError(MemcachedKeyError): pass class MemcachedKeyNoneError(MemcachedKeyError): pass class MemcachedKeyTypeError(MemcachedKeyError): pass class MemcachedStringEncodingError(Exception): pass def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, pload=None, pid=None): """ Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. """ local.__init__(self) self.set_servers(servers) self.debug = debug self.stats = {} # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.persistent_load = pload self.persistent_id = pid # figure out the pickler style file = StringIO() try: pickler = self.pickler(file, protocol = self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def set_servers(self, servers): """ Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ self.servers = [_Host(s, self.debuglog) for s in servers] self._init_buckets() def get_stats(self): '''Get statistics from each of the servers. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. ''' data = [] for s in self.servers: if not s.connect(): continue if s.family == socket.AF_INET: name = '%s:%s (%s)' % ( s.ip, s.port, s.weight ) else: name = 'unix:%s (%s)' % ( s.address, s.weight ) s.send_cmd('stats') serverData = {} data.append(( name, serverData )) readline = s.readline while 1: line = readline() if not line or line.strip() == 'END': break stats = line.split(' ', 2) serverData[stats[1]] = stats[2] return(data) def get_slabs(self): data = [] for s in self.servers: if not s.connect(): continue if s.family == socket.AF_INET: name = '%s:%s (%s)' % ( s.ip, s.port, s.weight ) else: name = 'unix:%s (%s)' % ( s.address, s.weight ) serverData = {} data.append(( name, serverData )) s.send_cmd('stats items') readline = s.readline while 1: line = readline() if not line or line.strip() == 'END': break item = line.split(' ', 2) #0 = STAT, 1 = ITEM, 2 = Value slab = item[1].split(':', 2) #0 = items, 1 = Slab #, 2 = Name if not serverData.has_key(slab[1]): serverData[slab[1]] = {} serverData[slab[1]][slab[2]] = item[2] return data def flush_all(self): 'Expire all data currently in the memcache servers.' for s in self.servers: if not s.connect(): continue s.send_cmd('flush_all') s.expect("OK") def debuglog(self, str): if self.debug: sys.stderr.write("MemCached: %s\n" % str) def _statlog(self, func): if not self.stats.has_key(func): self.stats[func] = 1 else: self.stats[func] += 1 def forget_dead_hosts(self): """ Reset every host in the pool to an "alive" state. """ for s in self.servers: s.deaduntil = 0 def _init_buckets(self): self.buckets = [] for server in self.servers: for i in range(server.weight): self.buckets.append(server) def _get_server(self, key): if type(key) == types.TupleType: serverhash, key = key else: serverhash = serverHashFunction(key) for i in range(Client._SERVER_RETRIES): server = self.buckets[serverhash % len(self.buckets)] if server.connect(): #print "(using server %s)" % server, return server, key serverhash = serverHashFunction(str(serverhash) + str(i)) return None, None def disconnect_all(self): for s in self.servers: s.close_socket() def delete_multi(self, keys, time=0, key_prefix=''): ''' Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @return: 1 if no failure in communication with any memcacheds. @rtype: int ''' self._statlog('delete_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) # send out all requests on each server before reading anything dead_servers = [] rc = 1 for server in server_keys.iterkeys(): bigcmd = [] write = bigcmd.append if time != None: for key in server_keys[server]: # These are mangled keys write("delete %s %d\r\n" % (key, time)) else: for key in server_keys[server]: # These are mangled keys write("delete %s\r\n" % key) try: server.send_cmds(''.join(bigcmd)) except socket.error, msg: rc = 0 if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] notstored = [] # original keys. for server, keys in server_keys.iteritems(): try: for key in keys: server.expect("DELETED") except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) rc = 0 return rc def delete(self, key, time=0): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @rtype: int ''' check_key(key) server, key = self._get_server(key) if not server: return 0 self._statlog('delete') if time != None: cmd = "delete %s %d" % (key, time) else: cmd = "delete %s" % key try: server.send_cmd(cmd) server.expect("DELETED") except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) return 0 return 1 def incr(self, key, delta=1): """ Sends a command to the server to atomically increment the value for C{key} by C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for C{key} must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") # returns 1, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 2**32. See L{decr}. @param delta: Integer amount to increment by (should be zero or greater). @return: New value after incrementing. @rtype: int """ return self._incrdecr("incr", key, delta) def decr(self, key, delta=1): """ Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and new values are capped at 0. If server value is 1, a decrement of 2 returns 0, not -1. @param delta: Integer amount to decrement by (should be zero or greater). @return: New value after decrementing. @rtype: int """ return self._incrdecr("decr", key, delta) def _incrdecr(self, cmd, key, delta): check_key(key) server, key = self._get_server(key) if not server: return 0 self._statlog(cmd) cmd = "%s %s %d" % (cmd, key, delta) try: server.send_cmd(cmd) line = server.readline() return int(line) except ValueError: return None except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) return None def add(self, key, val, time = 0, min_compress_len = 0): ''' Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' return self._set("add", key, val, time, min_compress_len) def append(self, key, val, time=0, min_compress_len=0): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' return self._set("append", key, val, time, min_compress_len) def prepend(self, key, val, time=0, min_compress_len=0): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' return self._set("prepend", key, val, time, min_compress_len) def replace(self, key, val, time=0, min_compress_len=0): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' return self._set("replace", key, val, time, min_compress_len) def set(self, key, val, time=0, min_compress_len=0): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. ''' return self._set("set", key, val, time, min_compress_len) def _map_and_prefix_keys(self, key_iterable, key_prefix): """Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of prefixed key -> original key. """ # Check it just once ... key_extra_len=len(key_prefix) if key_prefix: check_key(key_prefix) # server (_Host) -> list of unprefixed server keys in mapping server_keys = {} prefixed_to_orig_key = {} # build up a list for each server of all the keys we want. for orig_key in key_iterable: if type(orig_key) is types.TupleType: # Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on. # Ensure call to _get_server gets a Tuple as well. str_orig_key = str(orig_key[1]) server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key. else: str_orig_key = str(orig_key) # set_multi supports int / long keys. server, key = self._get_server(key_prefix + str_orig_key) # Now check to make sure key length is proper ... check_key(str_orig_key, key_extra_len=key_extra_len) if not server: continue if not server_keys.has_key(server): server_keys[server] = [] server_keys[server].append(key) prefixed_to_orig_key[key] = orig_key return (server_keys, prefixed_to_orig_key) def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0): ''' Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' self._statlog('set_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix) # send out all requests on each server before reading anything dead_servers = [] for server in server_keys.iterkeys(): bigcmd = [] write = bigcmd.append try: for key in server_keys[server]: # These are mangled keys store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len) write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2])) server.send_cmds(''.join(bigcmd)) except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] # short-circuit if there are no servers, just return all keys if not server_keys: return(mapping.keys()) notstored = [] # original keys. for server, keys in server_keys.iteritems(): try: for key in keys: line = server.readline() if line == 'STORED': continue else: notstored.append(prefixed_to_orig_key[key]) #un-mangle. except (_Error, socket.error), msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) return notstored def _val_to_store_info(self, val, min_compress_len): """ Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself. """ flags = 0 if isinstance(val, str): pass elif isinstance(val, int): flags |= Client._FLAG_INTEGER val = "%d" % val # force no attempt to compress this silly string. min_compress_len = 0 elif isinstance(val, long): flags |= Client._FLAG_LONG val = "%d" % val # force no attempt to compress this silly string. min_compress_len = 0 else: flags |= Client._FLAG_PICKLE file = StringIO() if self.picklerIsKeyword: pickler = self.pickler(file, protocol = self.pickleProtocol) else: pickler = self.pickler(file, self.pickleProtocol) if self.persistent_id: pickler.persistent_id = self.persistent_id pickler.dump(val) val = file.getvalue() lv = len(val) # We should try to compress if min_compress_len > 0 and we could # import zlib and this string is longer than our min threshold. if min_compress_len and _supports_compress and lv > min_compress_len: comp_val = compress(val) # Only retain the result if the compression result is smaller # than the original. if len(comp_val) < lv: flags |= Client._FLAG_COMPRESSED val = comp_val # silently do not store if value length exceeds maximum if len(val) >= SERVER_MAX_VALUE_LENGTH: return(0) return (flags, len(val), val) def _set(self, cmd, key, val, time, min_compress_len = 0): check_key(key) server, key = self._get_server(key) if not server: return 0 self._statlog(cmd) store_info = self._val_to_store_info(val, min_compress_len) if not store_info: return(0) fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, store_info[0], time, store_info[1], store_info[2]) try: server.send_cmd(fullcmd) return(server.expect("STORED") == "STORED") except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) return 0 def get(self, key): '''Retrieves a key from the memcache. @return: The value or None. ''' check_key(key) server, key = self._get_server(key) if not server: return None self._statlog('get') try: server.send_cmd("get %s" % key) rkey, flags, rlen, = self._expectvalue(server) if not rkey: return None value = self._recv_value(server, flags, rlen) server.expect("END") except (_Error, socket.error), msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) return None return value def get_multi(self, keys, key_prefix=''): ''' Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42} 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'} 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present. ''' self._statlog('get_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) # send out all requests on each server before reading anything dead_servers = [] for server in server_keys.iterkeys(): try: server.send_cmd("get %s" % " ".join(server_keys[server])) except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] retvals = {} for server in server_keys.iterkeys(): try: line = server.readline() while line and line != 'END': rkey, flags, rlen = self._expectvalue(server, line) # Bo Yang reports that this can sometimes be None if rkey is not None: val = self._recv_value(server, flags, rlen) retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key. line = server.readline() except (_Error, socket.error), msg: if type(msg) is types.TupleType: msg = msg[1] server.mark_dead(msg) return retvals def _expectvalue(self, server, line=None): if not line: line = server.readline() if line[:5] == 'VALUE': resp, rkey, flags, len = line.split() flags = int(flags) rlen = int(len) return (rkey, flags, rlen) else: return (None, None, None) def _recv_value(self, server, flags, rlen): rlen += 2 # include \r\n buf = server.recv(rlen) if len(buf) != rlen: raise _Error("received %d bytes when expecting %d" % (len(buf), rlen)) if len(buf) == rlen: buf = buf[:-2] # strip \r\n if flags & Client._FLAG_COMPRESSED: buf = decompress(buf) if flags == 0 or flags == Client._FLAG_COMPRESSED: # Either a bare string or a compressed string now decompressed... val = buf elif flags & Client._FLAG_INTEGER: val = int(buf) elif flags & Client._FLAG_LONG: val = long(buf) elif flags & Client._FLAG_PICKLE: try: file = StringIO(buf) unpickler = self.unpickler(file) if self.persistent_load: unpickler.persistent_load = self.persistent_load val = unpickler.load() except Exception, e: self.debuglog('Pickle error: %s\n' % e) val = None else: self.debuglog("unknown flags on get: %x\n" % flags) return val class _Host: _DEAD_RETRY = 30 # number of seconds before retrying a dead server. _SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout. def __init__(self, host, debugfunc=None): if isinstance(host, types.TupleType): host, self.weight = host else: self.weight = 1 # parse the connection string m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host) if not m: m = re.match(r'^(?P<proto>inet):' r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host) if not m: m = re.match(r'^(?P<host>[^:]+):(?P<port>[0-9]+)$', host) if not m: raise ValueError('Unable to parse connection string: "%s"' % host) hostData = m.groupdict() if hostData.get('proto') == 'unix': self.family = socket.AF_UNIX self.address = hostData['path'] else: self.family = socket.AF_INET self.ip = hostData['host'] self.port = int(hostData.get('port', 11211)) self.address = ( self.ip, self.port ) if not debugfunc: debugfunc = lambda x: x self.debuglog = debugfunc self.deaduntil = 0 self.socket = None self.buffer = '' def _check_dead(self): if self.deaduntil and self.deaduntil > time.time(): return 1 self.deaduntil = 0 return 0 def connect(self): if self._get_socket(): return 1 return 0 def mark_dead(self, reason): self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason)) self.deaduntil = time.time() + _Host._DEAD_RETRY self.close_socket() def _get_socket(self): if self._check_dead(): return None if self.socket: return self.socket s = socket.socket(self.family, socket.SOCK_STREAM) if hasattr(s, 'settimeout'): s.settimeout(self._SOCKET_TIMEOUT) try: s.connect(self.address) except socket.timeout, msg: self.mark_dead("connect: %s" % msg) return None except socket.error, msg: if type(msg) is types.TupleType: msg = msg[1] self.mark_dead("connect: %s" % msg[1]) return None self.socket = s self.buffer = '' return s def close_socket(self): if self.socket: self.socket.close() self.socket = None def send_cmd(self, cmd): self.socket.sendall(cmd + '\r\n') def send_cmds(self, cmds): """ cmds already has trailing \r\n's applied """ self.socket.sendall(cmds) def readline(self): buf = self.buffer recv = self.socket.recv while True: index = buf.find('\r\n') if index >= 0: break data = recv(4096) if not data: self.mark_dead('Connection closed while reading from %s' % repr(self)) break buf += data if index >= 0: self.buffer = buf[index+2:] buf = buf[:index] else: self.buffer = '' return buf def expect(self, text): line = self.readline() if line != text: self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line)) return line def recv(self, rlen): self_socket_recv = self.socket.recv buf = self.buffer while len(buf) < rlen: foo = self_socket_recv(4096) buf += foo if len(foo) == 0: raise _Error, ( 'Read %d bytes, expecting %d, ' 'read returned 0 length bytes' % ( len(buf), rlen )) self.buffer = buf[rlen:] return buf[:rlen] def __str__(self): d = '' if self.deaduntil: d = " (dead until %d)" % self.deaduntil if self.family == socket.AF_INET: return "inet:%s:%d%s" % (self.address[0], self.address[1], d) else: return "unix:%s%s" % (self.address, d) def check_key(key, key_extra_len=0): """Checks sanity of key. Fails if: Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). Contains control characters (Raises MemcachedKeyCharacterError). Is not a string (Raises MemcachedStringEncodingError) Is an unicode string (Raises MemcachedStringEncodingError) Is not a string (Raises MemcachedKeyError) Is None (Raises MemcachedKeyError) """ if type(key) == types.TupleType: key = key[1] if not key: raise Client.MemcachedKeyNoneError, ("Key is None") if isinstance(key, unicode): raise Client.MemcachedStringEncodingError, ("Keys must be str()'s, not " "unicode. Convert your unicode strings using " "mystring.encode(charset)!") if not isinstance(key, str): raise Client.MemcachedKeyTypeError, ("Key must be str()'s") if isinstance(key, basestring): if len(key) + key_extra_len > SERVER_MAX_KEY_LENGTH: raise Client.MemcachedKeyLengthError, ("Key length is > %s" % SERVER_MAX_KEY_LENGTH) for char in key: if ord(char) < 32 or ord(char) == 127: raise Client.MemcachedKeyCharacterError, "Control characters not allowed" def _doctest(): import doctest, memcache servers = ["127.0.0.1:11211"] mc = Client(servers, debug=1) globs = {"mc": mc} return doctest.testmod(memcache, globs=globs) if __name__ == "__main__": print "Testing docstrings..." _doctest() print "Running tests:" print serverList = [["127.0.0.1:11211"]] if '--do-unix' in sys.argv: serverList.append([os.path.join(os.getcwd(), 'memcached.socket')]) for servers in serverList: mc = Client(servers, debug=1) def to_s(val): if not isinstance(val, types.StringTypes): return "%s (%s)" % (val, type(val)) return "%s" % val def test_setget(key, val): print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)), mc.set(key, val) newval = mc.get(key) if newval == val: print "OK" return 1 else: print "FAIL" return 0 class FooStruct: def __init__(self): self.bar = "baz" def __str__(self): return "A FooStruct" def __eq__(self, other): if isinstance(other, FooStruct): return self.bar == other.bar return 0 test_setget("a_string", "some random string") test_setget("an_integer", 42) if test_setget("long", long(1<<30)): print "Testing delete ...", if mc.delete("long"): print "OK" else: print "FAIL" print "Testing get_multi ...", print mc.get_multi(["a_string", "an_integer"]) print "Testing get(unknown value) ...", print to_s(mc.get("unknown_value")) f = FooStruct() test_setget("foostruct", f) print "Testing incr ...", x = mc.incr("an_integer", 1) if x == 43: print "OK" else: print "FAIL" print "Testing decr ...", x = mc.decr("an_integer", 1) if x == 42: print "OK" else: print "FAIL" # sanity tests print "Testing sending spaces...", try: x = mc.set("this has spaces", 1) except Client.MemcachedKeyCharacterError, msg: print "OK" else: print "FAIL" print "Testing sending control characters...", try: x = mc.set("this\x10has\x11control characters\x02", 1) except Client.MemcachedKeyCharacterError, msg: print "OK" else: print "FAIL" print "Testing using insanely long key...", try: x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'aaaa', 1) except Client.MemcachedKeyLengthError, msg: print "OK" else: print "FAIL" print "Testing sending a unicode-string key...", try: x = mc.set(u'keyhere', 1) except Client.MemcachedStringEncodingError, msg: print "OK", else: print "FAIL", try: x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1) except: print "FAIL", else: print "OK", import pickle s = pickle.loads('V\\u4f1a\np0\n.') try: x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1) except Client.MemcachedKeyLengthError: print "OK" else: print "FAIL" print "Testing using a value larger than the memcached value limit...", x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH) if mc.get('keyhere') == None: print "OK", else: print "FAIL", x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa') if mc.get('keyhere') == None: print "OK" else: print "FAIL" print "Testing set_multi() with no memcacheds running", mc.disconnect_all() errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'}) if errors != []: print "FAIL" else: print "OK" print "Testing delete_multi() with no memcacheds running", mc.disconnect_all() ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'}) if ret != 1: print "FAIL" else: print "OK" # vim: ts=4 sw=4 et :
Python
from gluon.contrib.memcache.memcache import Client from gluon.cache import CacheAbstract import time """ examle of usage: cache.memcache = MemcacheClient(request,[127.0.0.1:11211],debug=true) """ import cPickle as pickle import thread locker = thread.allocate_lock() def MemcacheClient(*a, **b): locker.acquire() try: if not hasattr(MemcacheClient, '__mc_instance'): MemcacheClient.__mc_instance = _MemcacheClient(*a, **b) finally: locker.release() return MemcacheClient.__mc_instance class _MemcacheClient(Client): meta_storage = {} def __init__(self, request, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, pload=None, pid=None): self.request=request if request: app = request.application else: app = '' Client.__init__(self, servers, debug, pickleProtocol, pickler, unpickler, pload, pid) if not app in self.meta_storage: self.storage = self.meta_storage[app] = { CacheAbstract.cache_stats_name: { 'hit_total': 0, 'misses': 0, }} else: self.storage = self.meta_storage[app] def __call__(self, key, f, time_expire=300): if time_expire == None: time_expire = 10**10 # this must be commented because get and set are redefined # key = self.__keyFormat__(key) value = None obj = self.get(key) if obj: value = obj elif f is None: if obj: self.delete(key) else: value = f() self.set(key, value, time_expire) return value def increment(self, key, value=1, time_expire=300): newKey = self.__keyFormat__(key) obj = self.get(newKey) if obj: return Client.incr(self, newKey, value) else: self.set(newKey, value, time_expire) return value def set(self, key, value, time_expire=300): newKey = self.__keyFormat__(key) return Client.set(self, newKey, value, time_expire) def get(self, key): newKey = self.__keyFormat__(key) return Client.get(self, newKey) def delete(self, key): newKey = self.__keyFormat__(key) return Client.delete(self, newKey) def __keyFormat__(self, key): return '%s/%s' % (self.request.application, key.replace(' ', '_'))
Python
# (c) 2007 Chris AtLee <chris@atlee.ca> # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license.php """ PAM module for python Provides an authenticate function that will allow the caller to authenticate a user against the Pluggable Authentication Modules (PAM) on the system. Implemented using ctypes, so no compilation is necessary. """ __all__ = ['authenticate'] from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int from ctypes.util import find_library LIBPAM = CDLL(find_library("pam")) LIBC = CDLL(find_library("c")) CALLOC = LIBC.calloc CALLOC.restype = c_void_p CALLOC.argtypes = [c_uint, c_uint] STRDUP = LIBC.strdup STRDUP.argstypes = [c_char_p] STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!! # Various constants PAM_PROMPT_ECHO_OFF = 1 PAM_PROMPT_ECHO_ON = 2 PAM_ERROR_MSG = 3 PAM_TEXT_INFO = 4 class PamHandle(Structure): """wrapper class for pam_handle_t""" _fields_ = [ ("handle", c_void_p) ] def __init__(self): Structure.__init__(self) self.handle = 0 class PamMessage(Structure): """wrapper class for pam_message structure""" _fields_ = [ ("msg_style", c_int), ("msg", c_char_p), ] def __repr__(self): return "<PamMessage %i '%s'>" % (self.msg_style, self.msg) class PamResponse(Structure): """wrapper class for pam_response structure""" _fields_ = [ ("resp", c_char_p), ("resp_retcode", c_int), ] def __repr__(self): return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp) CONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p) class PamConv(Structure): """wrapper class for pam_conv structure""" _fields_ = [ ("conv", CONV_FUNC), ("appdata_ptr", c_void_p) ] PAM_START = LIBPAM.pam_start PAM_START.restype = c_int PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)] PAM_AUTHENTICATE = LIBPAM.pam_authenticate PAM_AUTHENTICATE.restype = c_int PAM_AUTHENTICATE.argtypes = [PamHandle, c_int] def authenticate(username, password, service='login'): """Returns True if the given username and password authenticate for the given service. Returns False otherwise ``username``: the username to authenticate ``password``: the password in plain text ``service``: the PAM service to authenticate against. Defaults to 'login'""" @CONV_FUNC def my_conv(n_messages, messages, p_response, app_data): """Simple conversation function that responds to any prompt where the echo is off with the supplied password""" # Create an array of n_messages response objects addr = CALLOC(n_messages, sizeof(PamResponse)) p_response[0] = cast(addr, POINTER(PamResponse)) for i in range(n_messages): if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: pw_copy = STRDUP(str(password)) p_response.contents[i].resp = cast(pw_copy, c_char_p) p_response.contents[i].resp_retcode = 0 return 0 handle = PamHandle() conv = PamConv(my_conv, 0) retval = PAM_START(service, username, pointer(conv), pointer(handle)) if retval != 0: # TODO: This is not an authentication error, something # has gone wrong starting up PAM return False retval = PAM_AUTHENTICATE(handle, 0) return retval == 0 if __name__ == "__main__": import getpass print authenticate(getpass.getuser(), getpass.getpass())
Python