repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
pjg101/SickRage | lib/pgi/codegen/cffi_backend.py | 19 | 12193 | # Copyright 2012,2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
import textwrap
from cffi import FFI
from pgi.clib.gir import GIRepository, GITypeTag, GIInfoType
from .backend import Backend
from .utils import CodeBlock, parse_with_objects, VariableFactory
from .utils import TypeTagRegistry
from .. import _compat
_glib_defs = """
typedef char gchar;
typedef const void * gconstpointer;
typedef double gdouble;
typedef float gfloat;
typedef int gboolean;
typedef int16_t gint16;
typedef int32_t gint32;
typedef int64_t gint64;
typedef int8_t gint8;
typedef int gint;
typedef long glong;
typedef short gshort;
typedef size_t gsize;
typedef uint16_t guint16;
typedef uint32_t guint32;
typedef uint64_t guint64;
typedef uint8_t guint8;
typedef unsigned int guint;
typedef unsigned long gulong;
typedef unsigned short gushort;
typedef intptr_t gpointer;
typedef gulong GType;
"""
def typeinfo_to_cffi(info):
tag = info.tag.value
ptr = info.is_pointer
if not ptr:
if tag == GITypeTag.UINT32:
return "guint32"
elif tag == GITypeTag.INT32:
return "gint32"
elif tag == GITypeTag.BOOLEAN:
return "gboolean"
elif tag == GITypeTag.VOID:
return "void"
elif tag == GITypeTag.GTYPE:
return "GType"
elif tag == GITypeTag.INTERFACE:
iface = info.get_interface()
iface_type = iface.type.value
if iface_type == GIInfoType.STRUCT:
return "gpointer"
elif iface_type == GIInfoType.ENUM:
return "guint32"
raise NotImplementedError(
"Couldn't convert interface ptr %r to cffi type" % iface.type)
else:
if tag == GITypeTag.UTF8 or tag == GITypeTag.FILENAME:
return "gchar*"
elif tag == GITypeTag.INTERFACE:
iface = info.get_interface()
iface_type = iface.type.value
if iface_type == GIInfoType.ENUM:
return "guint32"
elif iface_type == GIInfoType.OBJECT:
return "gpointer"
elif iface_type == GIInfoType.STRUCT:
return "gpointer"
raise NotImplementedError(
"Couldn't convert interface %r to cffi type" % iface.type)
raise NotImplementedError("Couldn't convert %r to cffi type" % info.tag)
registry = TypeTagRegistry()
def get_type(type_, gen, desc, may_be_null, may_return_null):
try:
cls = registry.get_type(type_)
except LookupError as e:
raise NotImplementedError(e)
return cls(gen, type_, desc, may_be_null, may_return_null)
class BaseType(object):
GI_TYPE_TAG = None
type = None
py_type = None
def __init__(self, gen, type_, desc, may_be_null, may_return_null):
self._gen = gen
self.block = CodeBlock()
self.type = type_
self.may_be_null = may_be_null
self.may_return_null = may_return_null
self.desc = desc
def get_type(self, type_, may_be_null=False, may_return_null=False):
return get_type(type_, self._gen, may_be_null, may_return_null)
def var(self):
return self._gen.var()
@classmethod
def get_class(cls, type_):
return cls
def parse(self, code, **kwargs):
assert "DESC" not in kwargs
kwargs["DESC"] = self.desc
code = textwrap.dedent(code)
block, var = self._gen.parse(code, **kwargs)
block.write_into(self.block)
return var
def get_reference(self, value):
raise NotImplementedError
def free(self, name):
raise NotImplementedError
def __getattribute__(self, name):
try:
if _compat.PY3:
return object.__getattribute__(self, name + "_py3")
else:
return object.__getattribute__(self, name + "_py2")
except AttributeError:
return object.__getattribute__(self, name)
class BasicType(BaseType):
def pack_in(self, value):
raise NotImplementedError
def pack_out(self, value):
raise NotImplementedError
def unpack_out(self, value):
raise NotImplementedError
def unpack_return(self, value):
raise NotImplementedError
def new(self):
raise NotImplementedError
@registry.register(GITypeTag.BOOLEAN)
class Boolean(BasicType):
def _check(self, name):
return self.parse("""
$bool = $_.bool($value)
""", value=name)["bool"]
pack_out = _check
pack_in = _check
def unpack_return(self, name):
return self.parse("""
$bool = $_.bool($value)
""", value=name)["bool"]
unpack_out = unpack_return
def new(self):
return self.parse("""
$value = $ffi.cast("gboolean", 0)
""")["value"]
class BaseInt(BasicType):
CTYPE = None
def _check(self, value):
int_ = self.parse("""
if not $_.isinstance($value, $basestring):
$int = $_.int($value)
else:
raise $_.TypeError("$DESC: not a number")
""", value=value, basestring=_compat.string_types)["int"]
if self.CTYPE.startswith("u"):
bits = int(self.CTYPE[4:])
return self.parse("""
if not 0 <= $int < 2**$bits:
raise $_.OverflowError("$DESC: %r not in range" % $int)
""", int=int_, bits=bits)["int"]
else:
bits = int(self.CTYPE[3:])
return self.parse("""
if not -2**$bits <= $int < 2**$bits:
raise $_.OverflowError("$DESC: %r not in range" % $int)
""", int=int_, bits=bits - 1)["int"]
def pack_in(self, value):
return self._check(value)
def pack_out(self, value):
checked = self._check(value)
return self.parse("""
$value = $ffi.cast("g$ctype", $value)
""", ctype=self.CTYPE, value=checked)["value"]
def unpack_return(self, value):
return value
def unpack_out(self, value):
return self.parse("""
$value = int($value)
""", value=value)["value"]
def new(self):
return self.parse("""
$value = $ffi.cast("g$ctype", 0)
""", ctype=self.CTYPE)["value"]
for name in ["Int16", "UInt16", "Int32", "UInt32", "Int64", "UInt64"]:
cls = type(name, (BaseInt,), {"CTYPE": name.lower()})
type_tag = getattr(GITypeTag, name.upper())
registry.register(type_tag)(cls)
@registry.register(GITypeTag.UTF8)
class Utf8(BasicType):
def _check_py3(self, name):
if self.may_be_null:
return self.parse("""
if $value is not None:
if isinstance($value, $_.str):
$string = $value.encode("utf-8")
else:
$string = $value
else:
$string = None
""", value=name)["string"]
return self.parse("""
if isinstance($value, $_.str):
$string = $value.encode("utf-8")
elif not isinstance($value, $_.bytes):
raise TypeError
else:
$string = $value
""", value=name)["string"]
def _check_py2(self, name):
if self.may_be_null:
return self.parse("""
if $value is not $none:
if isinstance($value, $_.unicode):
$string = $value.encode("utf-8")
elif not isinstance($value, $_.str):
raise $_.TypeError("$DESC: %r not a string or None" % $value)
else:
$string = $value
else:
$string = $none
""", value=name, none=None)["string"]
return self.parse("""
if $_.isinstance($value, $_.unicode):
$string = $value.encode("utf-8")
elif not $_.isinstance($value, $_.str):
raise $_.TypeError("$DESC: %r not a string" % $value)
else:
$string = $value
""", value=name)["string"]
def pack_in_py2(self, value):
value = self._check(value)
return self.parse("""
if $value:
$c_value = $value
else:
$c_value = $ffi.cast("char*", 0)
""", value=value)["c_value"]
def pack_in_py3(self, value):
value = self._check(value)
return self.parse("""
if $value is not None:
$c_value = $value
else:
$c_value = $ffi.cast("char*", 0)
""", value=value)["c_value"]
def dup(self, name):
raise NotImplementedError
def unpack_out(self, name):
raise NotImplementedError
def unpack_return_py2(self, value):
return self.parse("""
if $value == $ffi.NULL:
$value = None
else:
$value = $ffi.string($value)
""", value=value)["value"]
def unpack_return_py3(self, value):
return self.parse("""
if $value == $ffi.NULL:
$value = None
else:
$value = $ffi.string($value).decode("utf-8")
""", value=value)["value"]
def new(self):
raise NotImplementedError
class CFFICodeGen(object):
def __init__(self, var, ffi):
self.var = var
self._ffi = ffi
def parse(self, code, **kwargs):
assert "ffi" not in kwargs
kwargs["ffi"] = self._ffi
assert "_" not in kwargs
kwargs["_"] = _compat.builtins
block, var = parse_with_objects(code, self.var, **kwargs)
return block, var
class CFFIBackend(Backend):
NAME = "cffi"
_libs = {}
_ffi = FFI()
_ffi.cdef(_glib_defs)
def __init__(self):
Backend.__init__(self)
self._gen = CFFICodeGen(VariableFactory(), self._ffi)
@property
def var(self):
return self._gen.var
def get_library(self, namespace):
if namespace not in self._libs:
paths = GIRepository().get_shared_library(namespace)
if not paths:
raise NotImplementedError("No shared library")
path = paths.split(",")[0]
self._libs[namespace] = self._ffi.dlopen(path)
return self._libs[namespace]
def get_function(self, lib, symbol, args, ret, method=False, throws=False):
block = CodeBlock()
cdef_types = []
if method:
cdef_types.append("gpointer")
for arg in args:
cdef_types.append(typeinfo_to_cffi(arg.type))
if ret:
cffi_ret = typeinfo_to_cffi(ret.type)
else:
cffi_ret = "void"
cdef = "%s %s(%s);" % (cffi_ret, symbol, ", ".join(cdef_types))
self._ffi.cdef(cdef, override=True)
block.write_line("# " + cdef)
try:
func = getattr(lib, symbol)
except (KeyError, AttributeError):
raise NotImplementedError(
"Library doesn't provide symbol: %s" % symbol)
return block, func
def get_type(self, type_, desc="", may_be_null=False,
may_return_null=False):
return get_type(type_, self._gen, desc, may_be_null, may_return_null)
def parse(self, code, **kwargs):
return self._gen.parse(code, **kwargs)
def cast_pointer(self, name, type_):
block, var = self.parse("""
$value = $ffi.cast("$type*", $value)
""", value=name, type=typeinfo_to_cffi(type_))
return block, name
def assign_pointer(self, ptr, value):
raise NotImplementedError
def deref_pointer(self, name):
block, var = self.parse("""
$value = $value[0]
""", value=name)
return block, var["value"]
| gpl-3.0 |
hottwaj/django | django/forms/utils.py | 241 | 6131 | from __future__ import unicode_literals
import json
import sys
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause |
sillvan/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 1 | 3320 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The Hyperspy developers
#
# This file is part of Hyperspy.
#
# Hyperspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hyperspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hyperspy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
---------
x1: array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
x2: array or float
The position of the end of the line segment in x.
see x1 arguments
y: array or float
The position of line segment in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import numpy as np
>>> im = signals.Image(np.zeros((100, 100)))
>>> m = utils.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {}
lp['color'] = 'black'
lp['linewidth'] = 1
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 |
dgsantana/arsenalsuite | cpp/lib/PyQt4/examples/widgets/spinboxes.py | 20 | 8877 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
self.createSpinBoxes()
self.createDateTimeEdits()
self.createDoubleSpinBoxes()
layout = QtGui.QHBoxLayout()
layout.addWidget(self.spinBoxesGroup)
layout.addWidget(self.editsGroup)
layout.addWidget(self.doubleSpinBoxesGroup)
self.setLayout(layout)
self.setWindowTitle("Spin Boxes")
def createSpinBoxes(self):
self.spinBoxesGroup = QtGui.QGroupBox("Spinboxes")
integerLabel = QtGui.QLabel("Enter a value between %d and %d:" % (-20, 20))
integerSpinBox = QtGui.QSpinBox()
integerSpinBox.setRange(-20, 20)
integerSpinBox.setSingleStep(1)
integerSpinBox.setValue(0)
zoomLabel = QtGui.QLabel("Enter a zoom value between %d and %d:" % (0, 1000))
zoomSpinBox = QtGui.QSpinBox()
zoomSpinBox.setRange(0, 1000)
zoomSpinBox.setSingleStep(10)
zoomSpinBox.setSuffix('%')
zoomSpinBox.setSpecialValueText("Automatic")
zoomSpinBox.setValue(100)
priceLabel = QtGui.QLabel("Enter a price between %d and %d:" % (0, 999))
priceSpinBox = QtGui.QSpinBox()
priceSpinBox.setRange(0, 999)
priceSpinBox.setSingleStep(1)
priceSpinBox.setPrefix('$')
priceSpinBox.setValue(99)
spinBoxLayout = QtGui.QVBoxLayout()
spinBoxLayout.addWidget(integerLabel)
spinBoxLayout.addWidget(integerSpinBox)
spinBoxLayout.addWidget(zoomLabel)
spinBoxLayout.addWidget(zoomSpinBox)
spinBoxLayout.addWidget(priceLabel)
spinBoxLayout.addWidget(priceSpinBox)
self.spinBoxesGroup.setLayout(spinBoxLayout)
def createDateTimeEdits(self):
self.editsGroup = QtGui.QGroupBox("Date and time spin boxes")
dateLabel = QtGui.QLabel()
dateEdit = QtGui.QDateEdit(QtCore.QDate.currentDate())
dateEdit.setDateRange(QtCore.QDate(2005, 1, 1), QtCore.QDate(2010, 12, 31))
dateLabel.setText("Appointment date (between %s and %s):" %
(dateEdit.minimumDate().toString(QtCore.Qt.ISODate),
dateEdit.maximumDate().toString(QtCore.Qt.ISODate)))
timeLabel = QtGui.QLabel()
timeEdit = QtGui.QTimeEdit(QtCore.QTime.currentTime())
timeEdit.setTimeRange(QtCore.QTime(9, 0, 0, 0), QtCore.QTime(16, 30, 0, 0))
timeLabel.setText("Appointment time (between %s and %s):" %
(timeEdit.minimumTime().toString(QtCore.Qt.ISODate),
timeEdit.maximumTime().toString(QtCore.Qt.ISODate)))
self.meetingLabel = QtGui.QLabel()
self.meetingEdit = QtGui.QDateTimeEdit(QtCore.QDateTime.currentDateTime())
formatLabel = QtGui.QLabel("Format string for the meeting date and time:")
formatComboBox = QtGui.QComboBox()
formatComboBox.addItem('yyyy-MM-dd hh:mm:ss (zzz \'ms\')')
formatComboBox.addItem('hh:mm:ss MM/dd/yyyy')
formatComboBox.addItem('hh:mm:ss dd/MM/yyyy')
formatComboBox.addItem('hh:mm:ss')
formatComboBox.addItem('hh:mm ap')
formatComboBox.activated[str].connect(self.setFormatString)
self.setFormatString(formatComboBox.currentText())
editsLayout = QtGui.QVBoxLayout()
editsLayout.addWidget(dateLabel)
editsLayout.addWidget(dateEdit)
editsLayout.addWidget(timeLabel)
editsLayout.addWidget(timeEdit)
editsLayout.addWidget(self.meetingLabel)
editsLayout.addWidget(self.meetingEdit)
editsLayout.addWidget(formatLabel)
editsLayout.addWidget(formatComboBox)
self.editsGroup.setLayout(editsLayout)
def setFormatString(self, formatString):
self.meetingEdit.setDisplayFormat(formatString)
if self.meetingEdit.displayedSections() & QtGui.QDateTimeEdit.DateSections_Mask:
self.meetingEdit.setDateRange(QtCore.QDate(2004, 11, 1), QtCore.QDate(2005, 11, 30))
self.meetingLabel.setText("Meeting date (between %s and %s):" %
(self.meetingEdit.minimumDate().toString(QtCore.Qt.ISODate),
self.meetingEdit.maximumDate().toString(QtCore.Qt.ISODate)))
else:
self.meetingEdit.setTimeRange(QtCore.QTime(0, 7, 20, 0), QtCore.QTime(21, 0, 0, 0))
self.meetingLabel.setText("Meeting time (between %s and %s):" %
(self.meetingEdit.minimumTime().toString(QtCore.Qt.ISODate),
self.meetingEdit.maximumTime().toString(QtCore.Qt.ISODate)))
def createDoubleSpinBoxes(self):
self.doubleSpinBoxesGroup = QtGui.QGroupBox("Double precision spinboxes")
precisionLabel = QtGui.QLabel("Number of decimal places to show:")
precisionSpinBox = QtGui.QSpinBox()
precisionSpinBox.setRange(0, 100)
precisionSpinBox.setValue(2)
doubleLabel = QtGui.QLabel("Enter a value between %d and %d:" % (-20, 20))
self.doubleSpinBox = QtGui.QDoubleSpinBox()
self.doubleSpinBox.setRange(-20.0, 20.0)
self.doubleSpinBox.setSingleStep(1.0)
self.doubleSpinBox.setValue(0.0)
scaleLabel = QtGui.QLabel("Enter a scale factor between %d and %d:" % (0, 1000))
self.scaleSpinBox = QtGui.QDoubleSpinBox()
self.scaleSpinBox.setRange(0.0, 1000.0)
self.scaleSpinBox.setSingleStep(10.0)
self.scaleSpinBox.setSuffix('%')
self.scaleSpinBox.setSpecialValueText("No scaling")
self.scaleSpinBox.setValue(100.0)
priceLabel = QtGui.QLabel("Enter a price between %d and %d:" % (0, 1000))
self.priceSpinBox = QtGui.QDoubleSpinBox()
self.priceSpinBox.setRange(0.0, 1000.0)
self.priceSpinBox.setSingleStep(1.0)
self.priceSpinBox.setPrefix('$')
self.priceSpinBox.setValue(99.99)
precisionSpinBox.valueChanged.connect(self.changePrecision)
spinBoxLayout = QtGui.QVBoxLayout()
spinBoxLayout.addWidget(precisionLabel)
spinBoxLayout.addWidget(precisionSpinBox)
spinBoxLayout.addWidget(doubleLabel)
spinBoxLayout.addWidget(self.doubleSpinBox)
spinBoxLayout.addWidget(scaleLabel)
spinBoxLayout.addWidget(self.scaleSpinBox)
spinBoxLayout.addWidget(priceLabel)
spinBoxLayout.addWidget(self.priceSpinBox)
self.doubleSpinBoxesGroup.setLayout(spinBoxLayout)
def changePrecision(self, decimals):
self.doubleSpinBox.setDecimals(decimals)
self.scaleSpinBox.setDecimals(decimals)
self.priceSpinBox.setDecimals(decimals)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| gpl-2.0 |
giacomov/lclike | lclike/duration_computation.py | 1 | 12141 | __author__ = 'giacomov'
# !/usr/bin/env python
# add |^| to the top line to run the script without needing 'python' to run it at cmd
# importing modules1
import numpy as np
# cant use 'show' inside the farm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import argparse
import decayLikelihood
import warnings
####################################################################
mycmd = argparse.ArgumentParser() # this is a class
mycmd.add_argument('triggername', help="The name of the GRB in YYMMDDXXX format (ex. bn080916009)")
mycmd.add_argument('redshift', help="Redshift for object.")
mycmd.add_argument('function', help="Function to model. (ex. crystalball2, band)")
mycmd.add_argument('directory', help="Directory containing the file produced by gtburst")
if __name__ == "__main__":
args = mycmd.parse_args()
os.chdir(args.directory)
##############################################################################
textfile = os.path.join(args.directory, '%s_res.txt' % (args.triggername))
tbin = np.recfromtxt(textfile, names=True)
textfile = os.path.join(args.directory, '%s_MCsamples_%s.txt' % (args.triggername, args.function))
samples = np.recfromtxt(textfile, names=True)
# function for returning 1 and 2 sigma errors from sample median
def getErr(sampleArr):
# compute sample percentiles for 1 and 2 sigma
m, c, p = np.percentile(sampleArr, [16, 50, 84])
# print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
m2, c2, p2 = np.percentile(sampleArr, [3, 50, 97])
return m, c, p, m2, c2, p2
# prepare for plotting and LOOP
t = np.logspace(0, 4, 100)
t = np.append(t, np.linspace(0, 1, 10))
t.sort()
t = np.unique(t)
print('NUMBER OF times to iterate: %s' % (len(t)))
x = decayLikelihood.DecayLikelihood()
if args.function == 'crystalball2':
crystal = decayLikelihood.CrystalBall2() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(crystal)
# CrystalBall DiffFlux####################################################
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0])
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
# NORMALIZATION IS THE FLUX AT THE PEAK
pB = parameters[3] # decay time is independent of scale # (y*.001) # scale =0.001, for all xml files
fBe = pB / np.e
# t = (fBe/N)**(-1/a) defined to be 1
mu = parameters[0]
tP = mu
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
teP = mu + (fBe / parameters[3]) ** (
-1 / parameters[2]) # sometimes 'RuntimeWarning: overflow encountered in double_scalars'
except Warning:
print('RuntimeWarning Raised! mu,sigma,decayIndex,and N:', parameters)
teP = parameters[0] + (fBe / parameters[3]) ** (-1 / parameters[2])
Peak[i] = pB
ePeak[i] = fBe
# redshift correcting t/(1+z)
tPeak[i] = tP / (1 + float(args.redshift)) ################################
tePeak[i] = teP / (1 + float(args.redshift)) ################################
elif args.function == 'band':
band = decayLikelihood.DecayBand() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(band)
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0]) # fractional brightness used in calcuating char-time, but not needed otherwise
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0]) # characteristic time
T05 = np.zeros(samples.shape[0])
T90 = np.zeros(samples.shape[0])
T95 = np.zeros(samples.shape[0])
T25 = np.zeros(samples.shape[0])
T50 = np.zeros(samples.shape[0])
T75 = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
tc = band.getCharacteristicTime() # get the characteristic time.
# T50/T90 TAKING TOO LONG (1/4)
# t90, t05, t95 = band.getTsomething( 90 ) # if the argument is 90, returns the T90 as well as the T05 and the T95. If the argument is 50, returns the T50 as well as the T25 and T75, and so on.
# t50, t25, t75 = band.getTsomething( 50 )
tp, fp = band.getPeakTimeAndFlux() # returns the time of the peak, as well as the peak flux
tePeak[i] = tc / (1 + float(args.redshift)) ################################
tPeak[i] = tp / (1 + float(args.redshift))
Peak[i] = fp
# T50/T90 TAKING TOO LONG (2/4)
# T05[i] = t05/(1+float(args.redshift))
# T90[i] = t90/(1+float(args.redshift))
# T95[i] = t95/(1+float(args.redshift))
# T50/T90 TAKING TOO LONG (3/4)
# T25[i] = t25/(1+float(args.redshift))
# T50[i] = t50/(1+float(args.redshift))
# T75[i] = t75/(1+float(args.redshift))
# Defining sigma bands
print('ENTERING Percentile LOOP')
upper = np.zeros(t.shape[0])
lower = np.zeros(t.shape[0])
upper2 = np.zeros(t.shape[0])
lower2 = np.zeros(t.shape[0])
meas = np.zeros(t.shape[0])
fluxMatrix = np.zeros([samples.shape[0], t.shape[0]])
for i, s in enumerate(samples):
x.decayFunction.setParameters(*s)
fluxes = map(x.decayFunction.getDifferentialFlux, t)
fluxMatrix[i, :] = np.array(fluxes)
for i, tt in enumerate(t):
allFluxes = fluxMatrix[:, i]
m, p = np.percentile(allFluxes, [16, 84])
lower[i] = m
upper[i] = p
m2, p2 = np.percentile(allFluxes, [2.5, 97.5])
lower2[i] = m2
upper2[i] = p2
wdir = '%s' % (args.directory)
# save TXT files instead of .npy
placeFile = os.path.join(wdir, "%s_tBrightness_%s" % (args.triggername, args.function))
with open(placeFile, 'w+') as f:
f.write("Peak tPeak ePeak tePeak\n")
for i, s in enumerate(Peak):
f.write("%s %s %s %s\n" % (Peak[i], tPeak[i], ePeak[i], tePeak[i]))
# CALCULATING T50/T90 TAKES TOO LONG
# T50/T90 TAKING TOO LONG (4/4)
# if args.function == 'band':
# #compute percentiles for 1 sigma
# m90,c90,p90 = np.percentile(T90,[16,50,84])
# m50,c50,p50 = np.percentile(T50,[16,50,84])
# #compute percentiles for 1 and 2 sigma
# #90m,90c,90p,90m2,90c2,90p2 = getErr(T90)
# #50m,50c,50p,50m2,50c2,50p2 = getErr(T50)
# #print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
#
# placeFile=os.path.join(wdir,"%s_t90_t50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 90minus 90plus t50 50minus 50plus\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s %s %s %s\n" % (m90,m90-c90,p90-c90,c50,m50-c50,p50-c50)) #c,m-c,p-c
#
# placeFile=os.path.join(wdir,"%s_samplesT90_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 t05 t95\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s\n" % (T90[i],T05[i],T95[i]))
# placeFile=os.path.join(wdir,"%s_samplesT50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t50 t25 t25\n")
# for i,s in enumerate(T50):
# f.write("%s %s %s\n" % (T50[i],T25[i],T75[i]))
# compute char-time percentiles for 1 and 2 sigma
m, c, p, m2, c2, p2 = getErr(tePeak)
# saves txt file
wkdir = '%s' % (args.directory)
fileDir = os.path.join(wkdir, '%s_timeRes_%s' % (args.triggername, args.function))
with open(fileDir, 'w+') as f:
f.write('%s %s %s\n' % ('median', 'minus', 'plus'))
f.write('%s %s %s\n' % (c, m - c, p - c))
# PLOTTING BINS AND SIGMA BAND
print("PLOTTING...")
fig = plt.figure()
# median is your "x"
# Y is your "y"
# DY is the array containing the errors
# DY==0 filters only the zero error
data = tbin
# redshift correction /(1+args.redshif)
median = (data["tstart"] + data["tstop"]) / 2 / (1 + float(args.redshift))
start = data['tstart'] / (1 + float(args.redshift)) ##
stop = data['tstop'] / (1 + float(args.redshift)) ##
y = data["photonFlux"]
Dy = data["photonFluxError"]
try:
y = np.core.defchararray.replace(y, "<", "", count=None) # runs through array and removes strings
except:
print('No Upper-Limits Found in %s.' % (args.triggername))
try:
Dy = np.core.defchararray.replace(Dy, "n.a.", "0",
count=None) ## 0 error is nonphysical, and will be checked for in plotting
except:
print('No 0-Error Found in %s.' % (args.triggername))
bar = 0.5
color = "blue"
Y = np.empty(0, dtype=float) # makes empty 1-D array for float values
for i in y:
Y = np.append(Y, float(i))
DY = np.empty(0, dtype=float)
for i in Dy:
DY = np.append(DY, float(i))
plt.clf()
if (DY > 0).sum() > 0: # if sum() gives a non-zero value then there are error values
plt.errorbar(median[DY > 0], Y[DY > 0],
xerr=[median[DY > 0] - start[DY > 0], stop[DY > 0] - median[DY > 0]],
yerr=DY[DY > 0], ls='None', marker='o', mfc=color, mec=color, ecolor=color, lw=2, label=None)
if (DY == 0).sum() > 0:
plt.errorbar(median[DY == 0], Y[DY == 0],
xerr=[median[DY == 0] - start[DY == 0], stop[DY == 0] - median[DY == 0]],
yerr=[bar * Y[DY == 0], 0.0 * Y[DY == 0]], lolims=True, ls='None', marker='', mfc=color, mec=color,
ecolor=color, lw=2, label=None)
plt.suptitle('%s photonFlux per Time' % (args.triggername))
plt.xlabel('Rest Frame Time(s)')
plt.ylabel('Photon Flux')
plt.xscale('symlog')
plt.yscale('log')
plt.grid(True)
if args.function == 'crystalball2':
SCALE = 0.001
elif args.function == 'band':
SCALE = 1.0 # 0.1 # shouldn't need a scale anymore for Band function
ylo = 1e-7 # min(lower2*SCALE)*1e-1 # CANT GET THIS TO WORK YET DYNAMICALLY
yup = max(upper2 * SCALE) * 10
plt.ylim([ylo, yup])
# correcting for redshift t/(1+args.redshift)
plt.fill_between(t / (1 + float(args.redshift)), lower * SCALE, upper * SCALE, alpha=0.5, color='blue')
plt.fill_between(t / (1 + float(args.redshift)), lower2 * SCALE, upper2 * SCALE, alpha=0.3, color='green')
# y = map(x.decayFunction.getDifferentialFlux, t) # maps infinitesimal values of flux at time t to y
# raw_input("Press ENTER")
# PowerLaw
# plt.plot(t,,'o')
# saves plots
wdir = '%s' % (args.directory)
imsave = os.path.join(wdir, '%s_objFit_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
# histograms of 1/e and save
print("Making histograms")
fig = plt.figure(figsize=(10, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
bins = np.linspace(min(tePeak), np.max(tePeak), 100)
ax0 = plt.subplot(gs[0])
ax0.hist(tePeak, bins, normed=True)
plt.title('1/e (min to medx2)')
plt.xlabel('1/e time (s)')
plt.xlim([min(tePeak), np.median(tePeak) * 2])
ax1 = plt.subplot(gs[1])
ax1.hist(tePeak, bins, normed=True)
plt.title('1/e (min to max)')
plt.xlabel('time (s)')
plt.tight_layout()
imsave = os.path.join(wdir, '%s_hist_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
print("Finished Potting/Saving!")
| bsd-3-clause |
kingvuplus/boom | lib/python/Components/Button.py | 2 | 1147 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from VariableText import VariableText
from enigma import eButton
class Button(VariableText, HTMLComponent, GUIComponent):
def __init__(self, text = '', onClick = None):
if not onClick:
onClick = []
GUIComponent.__init__(self)
VariableText.__init__(self)
self.setText(text)
self.onClick = onClick
def push(self):
for x in self.onClick:
x()
return 0
def disable(self):
pass
def enable(self):
pass
def connectDownstream(self, downstream):
pass
def checkSuspend(self):
pass
def disconnectDownstream(self, downstream):
pass
def produceHTML(self):
return '<input type="submit" text="' + self.getText() + '">\n'
GUI_WIDGET = eButton
def postWidgetCreate(self, instance):
instance.setText(self.text)
instance.selected.get().append(self.push)
def preWidgetRemove(self, instance):
instance.selected.get().remove(self.push)
| gpl-2.0 |
trianam/tkLayoutTests | TestRouting/test21/conf/xml/longbarrel_cmsIdealGeometryXML_cff.py | 43 | 6122 | import FWCore.ParameterSet.Config as cms
from Geometry.CMSCommonData.cmsIdealGeometryXML_cfi import *
XMLIdealGeometryESSource.geomXMLFiles = cms.vstring(
'SLHCUpgradeSimulations/Geometry/data/longbarrel/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/normal/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/CMSCommonData/data/cmsTracker.xml',
'Geometry/CMSCommonData/data/caloBase.xml',
'Geometry/CMSCommonData/data/cmsCalo.xml',
'Geometry/CMSCommonData/data/muonBase.xml',
'Geometry/CMSCommonData/data/cmsMuon.xml',
'Geometry/CMSCommonData/data/mgnt.xml',
'Geometry/CMSCommonData/data/beampipe.xml',
'Geometry/CMSCommonData/data/cmsBeam.xml',
'Geometry/CMSCommonData/data/muonMB.xml',
'Geometry/CMSCommonData/data/muonMagnet.xml',
'Geometry/TrackerCommonData/data/pixfwdMaterials.xml',
'Geometry/TrackerCommonData/data/pixfwdCommon.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x2.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x3.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x4.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPanelBase.xml',
'Geometry/TrackerCommonData/data/pixfwdPanel.xml',
'Geometry/TrackerCommonData/data/pixfwdBlade.xml',
'Geometry/TrackerCommonData/data/pixfwdNipple.xml',
'Geometry/TrackerCommonData/data/pixfwdDisk.xml',
'Geometry/TrackerCommonData/data/pixfwdCylinder.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/pixfwd.xml',
'Geometry/TrackerCommonData/data/pixbarmaterial.xml',
'Geometry/TrackerCommonData/data/pixbarladder.xml',
'Geometry/TrackerCommonData/data/pixbarladderfull.xml',
'Geometry/TrackerCommonData/data/pixbarladderhalf.xml',
'Geometry/TrackerCommonData/data/pixbarlayer.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/pixbarlayer0.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/pixbarlayer1.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/pixbarlayer2.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/pixbarlayer3.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/pixbar.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/newtracker.xml',
'Geometry/TrackerCommonData/data/trackermaterial.xml',
'Geometry/TrackerCommonData/data/tracker.xml',
'Geometry/TrackerCommonData/data/trackerpixbar.xml',
'Geometry/TrackerCommonData/data/trackerpixfwd.xml',
'Geometry/TrackerCommonData/data/trackerother.xml',
'Geometry/EcalCommonData/data/eregalgo.xml',
'Geometry/EcalCommonData/data/ebalgo.xml',
'Geometry/EcalCommonData/data/ebcon.xml',
'Geometry/EcalCommonData/data/ebrot.xml',
'Geometry/EcalCommonData/data/eecon.xml',
'Geometry/EcalCommonData/data/eefixed.xml',
'Geometry/EcalCommonData/data/eehier.xml',
'Geometry/EcalCommonData/data/eealgo.xml',
'Geometry/EcalCommonData/data/escon.xml',
'Geometry/EcalCommonData/data/esalgo.xml',
'Geometry/EcalCommonData/data/eeF.xml',
'Geometry/EcalCommonData/data/eeB.xml',
'Geometry/HcalCommonData/data/hcalrotations.xml',
'Geometry/HcalCommonData/data/hcalalgo.xml',
'Geometry/HcalCommonData/data/hcalbarrelalgo.xml',
'Geometry/HcalCommonData/data/hcalendcapalgo.xml',
'Geometry/HcalCommonData/data/hcalouteralgo.xml',
'Geometry/HcalCommonData/data/hcalforwardalgo.xml',
'Geometry/HcalCommonData/data/hcalforwardfibre.xml',
'Geometry/HcalCommonData/data/hcalforwardmaterial.xml',
'Geometry/MuonCommonData/data/mbCommon.xml',
'Geometry/MuonCommonData/data/mb1.xml',
'Geometry/MuonCommonData/data/mb2.xml',
'Geometry/MuonCommonData/data/mb3.xml',
'Geometry/MuonCommonData/data/mb4.xml',
'Geometry/MuonCommonData/data/muonYoke.xml',
'Geometry/MuonCommonData/data/mf.xml',
'Geometry/ForwardCommonData/data/forward.xml',
'Geometry/ForwardCommonData/data/forwardshield.xml',
'Geometry/ForwardCommonData/data/brmrotations.xml',
'Geometry/ForwardCommonData/data/brm.xml',
'Geometry/ForwardCommonData/data/totemMaterials.xml',
'Geometry/ForwardCommonData/data/totemRotations.xml',
'Geometry/ForwardCommonData/data/totemt1.xml',
'Geometry/ForwardCommonData/data/totemt2.xml',
'Geometry/ForwardCommonData/data/ionpump.xml',
'Geometry/MuonCommonData/data/muonNumbering.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/trackerStructureTopology.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/trackersens.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/trackerRecoMaterial.xml',
'Geometry/EcalSimData/data/ecalsens.xml',
'Geometry/HcalCommonData/data/hcalsens.xml',
'Geometry/HcalSimData/data/CaloUtil.xml',
'Geometry/MuonSimData/data/muonSens.xml',
'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecs.xml',
'Geometry/RPCGeometryBuilder/data/RPCSpecs.xml',
'Geometry/ForwardCommonData/data/brmsens.xml',
'Geometry/HcalSimData/data/HcalProdCuts.xml',
'Geometry/EcalSimData/data/EcalProdCuts.xml',
'SLHCUpgradeSimulations/Geometry/data/longbarrel/trackerProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml',
'Geometry/MuonSimData/data/muonProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml'
)
| gpl-2.0 |
horizontracy/rpi_tool | api/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 1783 | 19590 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| mit |
davidcoallier/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/qt.py | 61 | 13252 |
"""SCons.Tool.qt
Tool-specific initialization for Qt.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/qt.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
class ToolQtWarning(SCons.Warnings.Warning):
pass
class GeneratedMocFileNotIncluded(ToolQtWarning):
pass
class QtdirNotFound(ToolQtWarning):
pass
SCons.Warnings.enableWarningClass(ToolQtWarning)
header_extensions = [".h", ".hxx", ".hpp", ".hh"]
if SCons.Util.case_sensitive_suffixes('.h', '.H'):
header_extensions.append('.H')
cplusplus = __import__('c++', globals(), locals(), [])
cxx_suffixes = cplusplus.CXXSuffixes
def checkMocIncluded(target, source, env):
moc = target[0]
cpp = source[0]
# looks like cpp.includes is cleared before the build stage :-(
# not really sure about the path transformations (moc.cwd? cpp.cwd?) :-/
path = SCons.Defaults.CScan.path(env, moc.cwd)
includes = SCons.Defaults.CScan(cpp, env, path)
if not moc in includes:
SCons.Warnings.warn(
GeneratedMocFileNotIncluded,
"Generated moc file '%s' is not included by '%s'" %
(str(moc), str(cpp)))
def find_file(filename, paths, node_factory):
for dir in paths:
node = node_factory(filename, dir)
if node.rexists():
return node
return None
class _Automoc(object):
"""
Callable class, which works as an emitter for Programs, SharedLibraries and
StaticLibraries.
"""
def __init__(self, objBuilderName):
self.objBuilderName = objBuilderName
def __call__(self, target, source, env):
"""
Smart autoscan function. Gets the list of objects for the Program
or Lib. Adds objects and builders for the special qt files.
"""
try:
if int(env.subst('$QT_AUTOSCAN')) == 0:
return target, source
except ValueError:
pass
try:
debug = int(env.subst('$QT_DEBUG'))
except ValueError:
debug = 0
# some shortcuts used in the scanner
splitext = SCons.Util.splitext
objBuilder = getattr(env, self.objBuilderName)
# some regular expressions:
# Q_OBJECT detection
q_object_search = re.compile(r'[^A-Za-z0-9]Q_OBJECT[^A-Za-z0-9]')
# cxx and c comment 'eater'
#comment = re.compile(r'(//.*)|(/\*(([^*])|(\*[^/]))*\*/)')
# CW: something must be wrong with the regexp. See also bug #998222
# CURRENTLY THERE IS NO TEST CASE FOR THAT
# The following is kind of hacky to get builders working properly (FIXME)
objBuilderEnv = objBuilder.env
objBuilder.env = env
mocBuilderEnv = env.Moc.env
env.Moc.env = env
# make a deep copy for the result; MocH objects will be appended
out_sources = source[:]
for obj in source:
if not obj.has_builder():
# binary obj file provided
if debug:
print "scons: qt: '%s' seems to be a binary. Discarded." % str(obj)
continue
cpp = obj.sources[0]
if not splitext(str(cpp))[1] in cxx_suffixes:
if debug:
print "scons: qt: '%s' is no cxx file. Discarded." % str(cpp)
# c or fortran source
continue
#cpp_contents = comment.sub('', cpp.get_text_contents())
cpp_contents = cpp.get_text_contents()
h=None
for h_ext in header_extensions:
# try to find the header file in the corresponding source
# directory
hname = splitext(cpp.name)[0] + h_ext
h = find_file(hname, (cpp.get_dir(),), env.File)
if h:
if debug:
print "scons: qt: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
#h_contents = comment.sub('', h.get_text_contents())
h_contents = h.get_text_contents()
break
if not h and debug:
print "scons: qt: no header for '%s'." % (str(cpp))
if h and q_object_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.Moc(h)
moc_o = objBuilder(moc_cpp)
out_sources.append(moc_o)
#moc_cpp.target_scanner = SCons.Defaults.CScan
if debug:
print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp))
if cpp and q_object_search.search(cpp_contents):
# cpp file with Q_OBJECT macro found -> add moc
# (to be included in cpp)
moc = env.Moc(cpp)
env.Ignore(moc, moc)
if debug:
print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
#moc.source_scanner = SCons.Defaults.CScan
# restore the original env attributes (FIXME)
objBuilder.env = objBuilderEnv
env.Moc.env = mocBuilderEnv
return (target, out_sources)
AutomocShared = _Automoc('SharedObject')
AutomocStatic = _Automoc('StaticObject')
def _detect(env):
"""Not really safe, but fast method to detect the QT library"""
QTDIR = None
if not QTDIR:
QTDIR = env.get('QTDIR',None)
if not QTDIR:
QTDIR = os.environ.get('QTDIR',None)
if not QTDIR:
moc = env.WhereIs('moc')
if moc:
QTDIR = os.path.dirname(os.path.dirname(moc))
SCons.Warnings.warn(
QtdirNotFound,
"Could not detect qt, using moc executable as a hint (QTDIR=%s)" % QTDIR)
else:
QTDIR = None
SCons.Warnings.warn(
QtdirNotFound,
"Could not detect qt, using empty QTDIR")
return QTDIR
def uicEmitter(target, source, env):
adjustixes = SCons.Util.adjustixes
bs = SCons.Util.splitext(str(source[0].name))[0]
bs = os.path.join(str(target[0].get_dir()),bs)
# first target (header) is automatically added by builder
if len(target) < 2:
# second target is implementation
target.append(adjustixes(bs,
env.subst('$QT_UICIMPLPREFIX'),
env.subst('$QT_UICIMPLSUFFIX')))
if len(target) < 3:
# third target is moc file
target.append(adjustixes(bs,
env.subst('$QT_MOCHPREFIX'),
env.subst('$QT_MOCHSUFFIX')))
return target, source
def uicScannerFunc(node, env, path):
lookout = []
lookout.extend(env['CPPPATH'])
lookout.append(str(node.rfile().dir))
includes = re.findall("<include.*?>(.*?)</include>", node.get_text_contents())
result = []
for incFile in includes:
dep = env.FindFile(incFile,lookout)
if dep:
result.append(dep)
return result
uicScanner = SCons.Scanner.Base(uicScannerFunc,
name = "UicScanner",
node_class = SCons.Node.FS.File,
node_factory = SCons.Node.FS.File,
recursive = 0)
def generate(env):
"""Add Builders and construction variables for qt to an Environment."""
CLVar = SCons.Util.CLVar
Action = SCons.Action.Action
Builder = SCons.Builder.Builder
env.SetDefault(QTDIR = _detect(env),
QT_BINPATH = os.path.join('$QTDIR', 'bin'),
QT_CPPPATH = os.path.join('$QTDIR', 'include'),
QT_LIBPATH = os.path.join('$QTDIR', 'lib'),
QT_MOC = os.path.join('$QT_BINPATH','moc'),
QT_UIC = os.path.join('$QT_BINPATH','uic'),
QT_LIB = 'qt', # may be set to qt-mt
QT_AUTOSCAN = 1, # scan for moc'able sources
# Some QT specific flags. I don't expect someone wants to
# manipulate those ...
QT_UICIMPLFLAGS = CLVar(''),
QT_UICDECLFLAGS = CLVar(''),
QT_MOCFROMHFLAGS = CLVar(''),
QT_MOCFROMCXXFLAGS = CLVar('-i'),
# suffixes/prefixes for the headers / sources to generate
QT_UICDECLPREFIX = '',
QT_UICDECLSUFFIX = '.h',
QT_UICIMPLPREFIX = 'uic_',
QT_UICIMPLSUFFIX = '$CXXFILESUFFIX',
QT_MOCHPREFIX = 'moc_',
QT_MOCHSUFFIX = '$CXXFILESUFFIX',
QT_MOCCXXPREFIX = '',
QT_MOCCXXSUFFIX = '.moc',
QT_UISUFFIX = '.ui',
# Commands for the qt support ...
# command to generate header, implementation and moc-file
# from a .ui file
QT_UICCOM = [
CLVar('$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE'),
CLVar('$QT_UIC $QT_UICIMPLFLAGS -impl ${TARGETS[0].file} '
'-o ${TARGETS[1]} $SOURCE'),
CLVar('$QT_MOC $QT_MOCFROMHFLAGS -o ${TARGETS[2]} ${TARGETS[0]}')],
# command to generate meta object information for a class
# declarated in a header
QT_MOCFROMHCOM = (
'$QT_MOC $QT_MOCFROMHFLAGS -o ${TARGETS[0]} $SOURCE'),
# command to generate meta object information for a class
# declarated in a cpp file
QT_MOCFROMCXXCOM = [
CLVar('$QT_MOC $QT_MOCFROMCXXFLAGS -o ${TARGETS[0]} $SOURCE'),
Action(checkMocIncluded,None)])
# ... and the corresponding builders
uicBld = Builder(action=SCons.Action.Action('$QT_UICCOM', '$QT_UICCOMSTR'),
emitter=uicEmitter,
src_suffix='$QT_UISUFFIX',
suffix='$QT_UICDECLSUFFIX',
prefix='$QT_UICDECLPREFIX',
source_scanner=uicScanner)
mocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.Action('$QT_MOCFROMHCOM', '$QT_MOCFROMHCOMSTR')
mocBld.add_action(h, act)
mocBld.prefix[h] = '$QT_MOCHPREFIX'
mocBld.suffix[h] = '$QT_MOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.Action('$QT_MOCFROMCXXCOM', '$QT_MOCFROMCXXCOMSTR')
mocBld.add_action(cxx, act)
mocBld.prefix[cxx] = '$QT_MOCCXXPREFIX'
mocBld.suffix[cxx] = '$QT_MOCCXXSUFFIX'
# register the builders
env['BUILDERS']['Uic'] = uicBld
env['BUILDERS']['Moc'] = mocBld
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
static_obj.add_src_builder('Uic')
shared_obj.add_src_builder('Uic')
# We use the emitters of Program / StaticLibrary / SharedLibrary
# to scan for moc'able files
# We can't refer to the builders directly, we have to fetch them
# as Environment attributes because that sets them up to be called
# correctly later by our emitter.
env.AppendUnique(PROGEMITTER =[AutomocStatic],
SHLIBEMITTER=[AutomocShared],
LIBEMITTER =[AutomocStatic],
# Of course, we need to link against the qt libraries
CPPPATH=["$QT_CPPPATH"],
LIBPATH=["$QT_LIBPATH"],
LIBS=['$QT_LIB'])
def exists(env):
return _detect(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
blighj/django | django/conf/locale/hr/formats.py | 65 | 2039 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
orchidinfosys/odoo | addons/crm/wizard/crm_merge_opportunities.py | 47 | 3563 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_merge_opportunity(osv.osv_memory):
"""
Merge opportunities together.
If we're talking about opportunities, it's just because it makes more sense
to merge opps than leads, because the leads are more ephemeral objects.
But since opportunities are leads, it's also possible to merge leads
together (resulting in a new lead), or leads and opps together (resulting
in a new opp).
"""
_name = 'crm.merge.opportunity'
_description = 'Merge opportunities'
_columns = {
'opportunity_ids': fields.many2many('crm.lead', rel='merge_opportunity_rel', id1='merge_id', id2='opportunity_id', string='Leads/Opportunities'),
'user_id': fields.many2one('res.users', 'Salesperson', select=True),
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id', select=True),
}
def action_merge(self, cr, uid, ids, context=None):
context = dict(context or {})
lead_obj = self.pool.get('crm.lead')
wizard = self.browse(cr, uid, ids[0], context=context)
opportunity2merge_ids = wizard.opportunity_ids
#TODO: why is this passed through the context ?
context['lead_ids'] = [opportunity2merge_ids[0].id]
merge_id = lead_obj.merge_opportunity(cr, uid, [x.id for x in opportunity2merge_ids], wizard.user_id.id, wizard.team_id.id, context=context)
# The newly created lead might be a lead or an opp: redirect toward the right view
merge_result = lead_obj.browse(cr, uid, merge_id, context=context)
if merge_result.type == 'opportunity':
return lead_obj.redirect_opportunity_view(cr, uid, merge_id, context=context)
else:
return lead_obj.redirect_lead_view(cr, uid, merge_id, context=context)
def default_get(self, cr, uid, fields, context=None):
"""
Use active_ids from the context to fetch the leads/opps to merge.
In order to get merged, these leads/opps can't be in 'Dead' or 'Closed'
"""
if context is None:
context = {}
record_ids = context.get('active_ids', False)
res = super(crm_merge_opportunity, self).default_get(cr, uid, fields, context=context)
if record_ids:
opp_ids = []
opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)
for opp in opps:
if opp.probability < 100:
opp_ids.append(opp.id)
if 'opportunity_ids' in fields:
res.update({'opportunity_ids': opp_ids})
return res
def on_change_user(self, cr, uid, ids, user_id, team_id, context=None):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
if user_id:
if team_id:
user_in_team = self.pool.get('crm.team').search(cr, uid, [('id', '=', team_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True)
else:
user_in_team = False
if not user_in_team:
team_id = False
team_ids = self.pool.get('crm.team').search(cr, uid, ['|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context)
if team_ids:
team_id = team_ids[0]
return {'value': {'team_id': team_id}}
| gpl-3.0 |
alingse/jsoncsv | jsoncsv/dumptool.py | 1 | 3539 | # coding=utf-8
# author@alingse
# 2015.10.09
import json
import unicodecsv as csv
import xlwt
class Dump(object):
def __init__(self, fin, fout, **kwargs):
self.fin = fin
self.fout = fout
self.initialize(**kwargs)
def initialize(self, **kwargs):
pass
def prepare(self):
pass
def dump_file(self, obj):
raise NotImplementedError
def on_finish(self):
pass
def dump(self):
self.prepare()
self.dump_file()
self.on_finish()
class ReadHeadersMixin(object):
@staticmethod
def load_headers(fin, read_row=None, sort_type=None):
headers = set()
datas = []
# read
if not read_row or read_row < 1:
read_row = -1
for line in fin:
obj = json.loads(line)
headers.update(obj.keys())
datas.append(obj)
read_row -= 1
if not read_row:
break
# TODO: add some sort_type here
headers = sorted(list(headers))
return (list(headers), datas)
class DumpExcel(Dump, ReadHeadersMixin):
def initialize(self, **kwargs):
super(DumpExcel, self).initialize(**kwargs)
self._read_row = kwargs.get('read_row')
self._sort_type = kwargs.get('sort_type')
def prepare(self):
headers, datas = self.load_headers(self.fin, self._read_row,
self._sort_type)
self._headers = headers
self._datas = datas
def write_headers(self):
raise NotImplementedError
def write_obj(self):
raise NotImplementedError
def dump_file(self):
self.write_headers()
for obj in self._datas:
self.write_obj(obj)
for line in self.fin:
obj = json.loads(line)
self.write_obj(obj)
class DumpCSV(DumpExcel):
def initialize(self, **kwargs):
super(DumpCSV, self).initialize(**kwargs)
self.csv_writer = None
def write_headers(self):
self.csv_writer = csv.DictWriter(self.fout, self._headers)
self.csv_writer.writeheader()
def write_obj(self, obj):
patched_obj = {
key: self.patch_value(value)
for key, value in obj.items()
}
self.csv_writer.writerow(patched_obj)
def patch_value(self, value):
if value in (None, {}, []):
return ""
return value
class DumpXLS(DumpExcel):
def initialize(self, **kwargs):
super(DumpXLS, self).initialize(**kwargs)
self.sheet = kwargs.get('sheet', 'Sheet1')
self.wb = xlwt.Workbook(encoding='utf-8')
self.ws = self.wb.add_sheet(self.sheet)
self.row = 0
self.cloumn = 0
def write_headers(self):
for head in self._headers:
self.ws.write(self.row, self.cloumn, head)
self.cloumn += 1
self.row += 1
def write_obj(self, obj):
self.cloumn = 0
for head in self._headers:
value = obj.get(head)
# patch
if value in ({},):
value = "{}"
self.ws.write(self.row, self.cloumn, value)
self.cloumn += 1
self.row += 1
def on_finish(self):
self.wb.save(self.fout)
def dump_excel(fin, fout, klass, **kwargs):
if not isinstance(klass, type) or not issubclass(klass, DumpExcel):
raise ValueError("unknow dumpexcel type")
dump = klass(fin, fout, **kwargs)
dump.dump()
| apache-2.0 |
shaftoe/home-assistant | tests/components/light/test_mqtt_template.py | 8 | 20067 | """The tests for the MQTT Template light platform.
Configuration example with all features:
light:
platform: mqtt_template
name: mqtt_template_light_1
state_topic: 'home/rgb1'
command_topic: 'home/rgb1/set'
command_on_template: >
on,{{ brightness|d }},{{ red|d }}-{{ green|d }}-{{ blue|d }}
command_off_template: 'off'
state_template: '{{ value.split(",")[0] }}'
brightness_template: '{{ value.split(",")[1] }}'
color_temp_template: '{{ value.split(",")[2] }}'
white_value_template: '{{ value.split(",")[3] }}'
red_template: '{{ value.split(",")[4].split("-")[0] }}'
green_template: '{{ value.split(",")[4].split("-")[1] }}'
blue_template: '{{ value.split(",")[4].split("-")[2] }}'
If your light doesn't support brightness feature, omit `brightness_template`.
If your light doesn't support color temp feature, omit `color_temp_template`.
If your light doesn't support white value feature, omit `white_value_template`.
If your light doesn't support RGB feature, omit `(red|green|blue)_template`.
"""
import unittest
from homeassistant.setup import setup_component
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ASSUMED_STATE
import homeassistant.components.light as light
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message,
assert_setup_component)
class TestLightMQTTTemplate(unittest.TestCase):
"""Test the MQTT Template light."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_fails(self): \
# pylint: disable=invalid-name
"""Test that setup fails with missing required configuration items."""
with assert_setup_component(0):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
}
})
self.assertIsNone(self.hass.states.get('light.test'))
def test_state_change_via_topic(self): \
# pylint: disable=invalid-name
"""Test state change via topic."""
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'state_topic': 'test_light_rgb',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ color_temp|d }},'
'{{ white_value|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }}',
'command_off_template': 'off',
'state_template': '{{ value.split(",")[0] }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_light_rgb', 'on')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('white_value'))
def test_state_brightness_color_effect_temp_white_change_via_topic(self): \
# pylint: disable=invalid-name
"""Test state, bri, color, effect, color temp, white val change."""
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'effect_list': ['rainbow', 'colorloop'],
'state_topic': 'test_light_rgb',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ color_temp|d }},'
'{{ white_value|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }},'
'{{ effect|d }}',
'command_off_template': 'off',
'state_template': '{{ value.split(",")[0] }}',
'brightness_template': '{{ value.split(",")[1] }}',
'color_temp_template': '{{ value.split(",")[2] }}',
'white_value_template': '{{ value.split(",")[3] }}',
'red_template': '{{ value.split(",")[4].'
'split("-")[0] }}',
'green_template': '{{ value.split(",")[4].'
'split("-")[1] }}',
'blue_template': '{{ value.split(",")[4].'
'split("-")[2] }}',
'effect_template': '{{ value.split(",")[5] }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('effect'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
# turn on the light, full white
fire_mqtt_message(self.hass, 'test_light_rgb',
'on,255,145,123,255-255-255,')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
self.assertEqual(255, state.attributes.get('brightness'))
self.assertEqual(145, state.attributes.get('color_temp'))
self.assertEqual(123, state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get('effect'))
# turn the light off
fire_mqtt_message(self.hass, 'test_light_rgb', 'off')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# lower the brightness
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,100')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(100, light_state.attributes['brightness'])
# change the color temp
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,,195')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(195, light_state.attributes['color_temp'])
# change the color
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,,,,41-42-43')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual([41, 42, 43], light_state.attributes.get('rgb_color'))
# change the white value
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,,,134')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(134, light_state.attributes['white_value'])
# change the effect
fire_mqtt_message(self.hass, 'test_light_rgb',
'on,,,,41-42-43,rainbow')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual('rainbow', light_state.attributes.get('effect'))
def test_optimistic(self): \
# pylint: disable=invalid-name
"""Test optimistic mode."""
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ color_temp|d }},'
'{{ white_value|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }}',
'command_off_template': 'off',
'qos': 2
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
# turn on the light
light.turn_on(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light_rgb/set', 'on,,,,--', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
# turn the light off
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light_rgb/set', 'off', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# turn on the light with brightness, color, color temp and white val
light.turn_on(self.hass, 'light.test', brightness=50,
rgb_color=[75, 75, 75], color_temp=200, white_value=139)
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-2][1][0])
self.assertEqual(2, self.mock_publish.mock_calls[-2][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-2][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-2][1][1]
self.assertEqual('on,50,200,139,75-75-75', payload)
# check the state
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual((75, 75, 75), state.attributes['rgb_color'])
self.assertEqual(50, state.attributes['brightness'])
self.assertEqual(200, state.attributes['color_temp'])
self.assertEqual(139, state.attributes['white_value'])
def test_flash(self): \
# pylint: disable=invalid-name
"""Test flash."""
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,{{ flash }}',
'command_off_template': 'off',
'qos': 0
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# short flash
light.turn_on(self.hass, 'light.test', flash='short')
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-2][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-2][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-2][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-2][1][1]
self.assertEqual('on,short', payload)
# long flash
light.turn_on(self.hass, 'light.test', flash='long')
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-2][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-2][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-2][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-2][1][1]
self.assertEqual('on,long', payload)
def test_transition(self):
"""Test for transition time being sent when included."""
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,{{ transition }}',
'command_off_template': 'off,{{ transition|d }}'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# transition on
light.turn_on(self.hass, 'light.test', transition=10)
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-2][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-2][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-2][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-2][1][1]
self.assertEqual('on,10', payload)
# transition off
light.turn_off(self.hass, 'light.test', transition=4)
self.hass.block_till_done()
self.assertEqual('test_light_rgb/set',
self.mock_publish.mock_calls[-2][1][0])
self.assertEqual(0, self.mock_publish.mock_calls[-2][1][2])
self.assertEqual(False, self.mock_publish.mock_calls[-2][1][3])
# check the payload
payload = self.mock_publish.mock_calls[-2][1][1]
self.assertEqual('off,4', payload)
def test_invalid_values(self): \
# pylint: disable=invalid-name
"""Test that invalid values are ignored."""
with assert_setup_component(1):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt_template',
'name': 'test',
'effect_list': ['rainbow', 'colorloop'],
'state_topic': 'test_light_rgb',
'command_topic': 'test_light_rgb/set',
'command_on_template': 'on,'
'{{ brightness|d }},'
'{{ color_temp|d }},'
'{{ red|d }}-'
'{{ green|d }}-'
'{{ blue|d }},'
'{{ effect|d }}',
'command_off_template': 'off',
'state_template': '{{ value.split(",")[0] }}',
'brightness_template': '{{ value.split(",")[1] }}',
'color_temp_template': '{{ value.split(",")[2] }}',
'white_value_template': '{{ value.split(",")[3] }}',
'red_template': '{{ value.split(",")[4].'
'split("-")[0] }}',
'green_template': '{{ value.split(",")[4].'
'split("-")[1] }}',
'blue_template': '{{ value.split(",")[4].'
'split("-")[2] }}',
'effect_template': '{{ value.split(",")[5] }}',
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('effect'))
self.assertIsNone(state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
# turn on the light, full white
fire_mqtt_message(self.hass, 'test_light_rgb',
'on,255,215,222,255-255-255,rainbow')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('brightness'))
self.assertEqual(215, state.attributes.get('color_temp'))
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
self.assertEqual(222, state.attributes.get('white_value'))
self.assertEqual('rainbow', state.attributes.get('effect'))
# bad state value
fire_mqtt_message(self.hass, 'test_light_rgb', 'offf')
self.hass.block_till_done()
# state should not have changed
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
# bad brightness values
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,off,255-255-255')
self.hass.block_till_done()
# brightness should not have changed
state = self.hass.states.get('light.test')
self.assertEqual(255, state.attributes.get('brightness'))
# bad color temp values
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,,off,255-255-255')
self.hass.block_till_done()
# color temp should not have changed
state = self.hass.states.get('light.test')
self.assertEqual(215, state.attributes.get('color_temp'))
# bad color values
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,255,a-b-c')
self.hass.block_till_done()
# color should not have changed
state = self.hass.states.get('light.test')
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
# bad white value values
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,,,off,255-255-255')
self.hass.block_till_done()
# white value should not have changed
state = self.hass.states.get('light.test')
self.assertEqual(222, state.attributes.get('white_value'))
# bad effect value
fire_mqtt_message(self.hass, 'test_light_rgb', 'on,255,a-b-c,white')
self.hass.block_till_done()
# effect should not have changed
state = self.hass.states.get('light.test')
self.assertEqual('rainbow', state.attributes.get('effect'))
| apache-2.0 |
adamtheturtle/flocker | flocker/route/functional/test_iptables_create.py | 15 | 17993 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for :py:mod:`flocker.route._iptables`.
"""
from __future__ import print_function
from time import sleep
from errno import ECONNREFUSED
from os import getuid, getpid
from socket import error, socket
from unittest import skipUnless
from subprocess import check_call
from ipaddr import IPAddress, IPNetwork
from eliot.testing import LoggedAction, validateLogging, assertHasAction
from twisted.trial.unittest import TestCase
from twisted.python.procutils import which
from ...testtools import if_root
from .. import make_host_network, OpenPort
from .._logging import (
CREATE_PROXY_TO, DELETE_PROXY, IPTABLES, DELETE_OPEN_PORT
)
from .networktests import make_network_tests
try:
from .iptables import create_network_namespace, get_iptables_rules
NOMENCLATURE_INSTALLED = True
except ImportError:
NOMENCLATURE_INSTALLED = False
def connect_nonblocking(ip, port):
"""
Attempt a TCP connection to the given address without blocking.
"""
client = socket()
client.setblocking(False)
client.connect_ex((ip.exploded, port))
return client
def create_user_rule():
"""
Create an iptables rule which simulates an existing (or otherwise
configured beyond flocker's control) rule on the system and needs to be
ignored by :py:func:`enumerate_proxies`.
"""
check_call([
b"iptables",
# Stick it in the PREROUTING chain based on our knowledge that the
# implementation inspects this chain to enumerate proxies.
b"--table", b"nat", b"--append", b"PREROUTING",
b"--protocol", b"tcp", b"--dport", b"12345",
b"--match", b"addrtype", b"--dst-type", b"LOCAL",
b"--jump", b"DNAT", b"--to-destination", b"10.7.8.9",
])
def is_environment_configured():
"""
Determine whether it is possible to exercise the proxy setup functionality
in the current execution environment.
:return: :obj:`True` if the proxy setup functionality could work given the
underlying system and the privileges of this process, :obj:`False`
otherwise.
"""
return getuid() == 0
def some_iptables_logged(parent_action_type):
"""
Create a validator which assert that some ``IPTABLES`` actions got logged.
They should be logged as children of a ``parent_action_type`` action (but
this function will not verify that). No other assertions are made about
the particulars of the message because that would be difficult (by virtue
of requiring we duplicate the exact iptables commands from the
implementation here, in the tests, which is tedious and produces fragile
tests).
"""
def validate(case, logger):
assertHasAction(case, logger, parent_action_type, succeeded=True)
# Remember what the docstring said? Ideally this would inspect the
# children of the action returned by assertHasAction but the interfaces
# don't seem to line up.
iptables = LoggedAction.ofType(logger.messages, IPTABLES)
case.assertNotEqual(iptables, [])
return validate
_environment_skip = skipUnless(
is_environment_configured(),
"Cannot test port forwarding without suitable test environment.")
_dependency_skip = skipUnless(
NOMENCLATURE_INSTALLED,
"Cannot test port forwarding without nomenclature installed.")
_iptables_skip = skipUnless(
which(b"iptables-save"),
"Cannot set up isolated environment without iptables-save.")
class GetIPTablesTests(TestCase):
"""
Tests for the iptables rule preserving helper.
"""
@_dependency_skip
@_environment_skip
def test_get_iptables_rules(self):
"""
:py:code:`get_iptables_rules()` returns the same list of
bytes as long as no rules have changed.
"""
first = get_iptables_rules()
# The most likely reason the result might change is that
# `iptables-save` includes timestamps with one-second resolution in its
# output.
sleep(1.0)
second = get_iptables_rules()
self.assertEqual(first, second)
class IPTablesNetworkTests(make_network_tests(make_host_network)):
"""
Apply the generic ``INetwork`` test suite to the implementation which
manipulates the actual system configuration.
"""
@_dependency_skip
@_environment_skip
def setUp(self):
"""
Arrange for the tests to not corrupt the system network configuration.
"""
self.namespace = create_network_namespace()
self.addCleanup(self.namespace.restore)
super(IPTablesNetworkTests, self).setUp()
class CreateTests(TestCase):
"""
Tests for the creation of new external routing rules.
"""
@_dependency_skip
@_environment_skip
def setUp(self):
"""
Select some addresses between which to proxy and set up a server to act
as the target of the proxying.
"""
self.namespace = create_network_namespace()
self.addCleanup(self.namespace.restore)
self.network = make_host_network()
# https://clusterhq.atlassian.net/browse/FLOC-135
# Don't hardcode addresses in the created namespace
self.server_ip = self.namespace.ADDRESSES[0]
self.proxy_ip = self.namespace.ADDRESSES[1]
# This is the target of the proxy which will be created.
self.server = socket()
self.server.bind((self.server_ip.exploded, 0))
self.server.listen(1)
# This is used to accept connections over the local network stack.
# They should be nearly instantaneous. If they are not then something
# is *probably* wrong (and hopefully it isn't just an instance of the
# machine being so loaded the local network stack can't complete a TCP
# handshake in under one second...).
self.server.settimeout(1)
self.port = self.server.getsockname()[1]
def test_setup(self):
"""
A connection attempt to the server created in ``setUp`` is successful.
"""
client = connect_nonblocking(self.server_ip, self.port)
accepted, client_address = self.server.accept()
self.assertEqual(client.getsockname(), client_address)
@validateLogging(some_iptables_logged(CREATE_PROXY_TO))
def test_connection(self, logger):
"""
A connection attempt is forwarded to the specified destination address.
"""
self.patch(self.network, "logger", logger)
self.network.create_proxy_to(self.server_ip, self.port)
client = connect_nonblocking(self.proxy_ip, self.port)
accepted, client_address = self.server.accept()
self.assertEqual(client.getsockname(), client_address)
def test_client_to_server(self):
"""
A proxied connection will deliver bytes from the client side to the
server side.
"""
self.network.create_proxy_to(self.server_ip, self.port)
client = connect_nonblocking(self.proxy_ip, self.port)
accepted, client_address = self.server.accept()
client.send(b"x")
self.assertEqual(b"x", accepted.recv(1))
def test_server_to_client(self):
"""
A proxied connection will deliver bytes from the server side to the
client side.
"""
self.network.create_proxy_to(self.server_ip, self.port)
client = connect_nonblocking(self.proxy_ip, self.port)
accepted, client_address = self.server.accept()
accepted.send(b"x")
self.assertEqual(b"x", client.recv(1))
def test_remote_connections_unaffected(self):
"""
A connection attempt to an IP not assigned to this host on the proxied
port is not proxied.
"""
network = IPNetwork("172.16.0.0/12")
gateway = network[1]
address = network[2]
# The strategy taken by this test is to create a new, clean network
# stack and then treat it like a foreign host. A connection to that
# foreign host should not be proxied. This is possible because Linux
# supports the creation of an arbitrary number of instances of its
# network stack, all isolated from each other.
#
# To learn more, here are some links:
#
# http://man7.org/linux/man-pages/man8/ip-netns.8.html
# http://blog.scottlowe.org/2013/09/04/introducing-linux-network-namespaces/
#
# Note also that Linux network namespaces are how Docker creates
# isolated network environments.
# Create a remote "host" that the test can reliably fail a connection
# attempt to.
pid = getpid()
veth0 = b"veth_" + hex(pid)
veth1 = b"veth1"
network_namespace = b"%s.%s" % (self.id(), getpid())
def run(cmd):
check_call(cmd.split())
# Destroy whatever system resources we go on to allocate in this test.
# We set this up first so even if one of the operations encounters an
# error after a resource has been allocated we'll still clean it up.
# It's not an error to try to delete things that don't exist
# (conveniently).
self.addCleanup(run, b"ip netns delete " + network_namespace)
self.addCleanup(run, b"ip link delete " + veth0)
ops = [
# Create a new network namespace where we can assign a non-local
# address to use as the target of a connection attempt.
b"ip netns add %(netns)s",
# Create a virtual ethernet pair so there is a network link between
# the host and the new network namespace.
b"ip link add %(veth0)s type veth peer name %(veth1)s",
# Assign an address to the virtual ethernet interface that will
# remain on the host. This will be our "gateway" into the network
# namespace.
b"ip address add %(gateway)s dev %(veth0)s",
# Bring it up.
b"ip link set dev %(veth0)s up",
# Put the other virtual ethernet interface into the network
# namespace. Now it will only affect networking behavior for code
# running in that network namespace, not for code running directly
# on the host network (like the code in this test and whatever
# iptables rules we created).
b"ip link set %(veth1)s netns %(netns)s",
# Assign to that virtual ethernet interface an address on the same
# (private, unused) network as the address we gave to the gateway
# interface.
b"ip netns exec %(netns)s ip address add %(address)s "
b"dev %(veth1)s",
# And bring it up.
b"ip netns exec %(netns)s ip link set dev %(veth1)s up",
# Add a route into the network namespace via the virtual interface
# for traffic bound for addresses on that network.
b"ip route add %(network)s dev %(veth0)s scope link",
# And add a reciprocal route so traffic generated inside the
# network namespace (like TCP RST packets) can get back to us.
b"ip netns exec %(netns)s ip route add default dev %(veth1)s",
]
params = dict(
netns=network_namespace, veth0=veth0, veth1=veth1,
address=address, gateway=gateway, network=network,
)
for op in ops:
run(op % params)
# Create the proxy which we expect not to be invoked.
self.network.create_proxy_to(self.server_ip, self.port)
client = socket()
client.settimeout(1)
# Try to connect to an address hosted inside that network namespace.
# It should fail. It should not be proxied to the server created in
# setUp.
exception = self.assertRaises(
error, client.connect, (str(address), self.port))
self.assertEqual(ECONNREFUSED, exception.errno)
class EnumerateTests(TestCase):
"""
Tests for the enumerate of Flocker-managed external routing rules.
"""
@_dependency_skip
@_environment_skip
def setUp(self):
self.addCleanup(create_network_namespace().restore)
self.network = make_host_network()
def test_unrelated_iptables_rules(self):
"""
If there are rules in NAT table which aren't related to flocker then
:py:func:`enumerate_proxies` does not include information about them in
its return value.
"""
create_user_rule()
proxy = self.network.create_proxy_to(IPAddress("10.1.2.3"), 1234)
self.assertEqual([proxy], self.network.enumerate_proxies())
class DeleteTests(TestCase):
"""
Tests for the deletion of Flocker-managed external routing rules.
"""
@_dependency_skip
@_environment_skip
def setUp(self):
self.addCleanup(create_network_namespace().restore)
self.network = make_host_network()
@validateLogging(some_iptables_logged(DELETE_PROXY))
def test_created_rules_deleted(self, logger):
"""
After a route created using :py:func:`flocker.route.create_proxy_to` is
deleted using :py:meth:`delete_proxy` the iptables rules which were
added by the former are removed.
"""
original_rules = get_iptables_rules()
proxy = self.network.create_proxy_to(IPAddress("10.1.2.3"), 12345)
# Only interested in logging behavior of delete_proxy here.
self.patch(self.network, "logger", logger)
self.network.delete_proxy(proxy)
# Capture the new rules
new_rules = get_iptables_rules()
# And compare them against the rules when we started.
self.assertEqual(
original_rules,
new_rules)
def test_only_specified_proxy_deleted(self):
"""
Only the rules associated with the proxy specified by the object passed
to :py:func:`delete_proxy` are deleted.
"""
self.network.create_proxy_to(IPAddress("10.1.2.3"), 12345)
# Capture the rules that exist now for comparison later.
expected = get_iptables_rules()
delete = self.network.create_proxy_to(IPAddress("10.1.2.4"), 23456)
self.network.delete_proxy(delete)
# Capture the new rules
actual = get_iptables_rules()
# They should match because only the second proxy should have been torn
# down.
self.assertEqual(
expected,
actual)
class UsedPortsTests(TestCase):
"""
Tests for enumeration of used ports.
"""
@if_root
@_iptables_skip
def setUp(self):
pass
def _listening_test(self, interface):
"""
Verify that a socket listening on the given interface has its port
number included in the result of ``HostNetwork.enumerate_used_ports``.
:param str interface: A native string giving the address of the
interface to which the listening socket will be bound.
:raise: If the port number is not indicated as used, a failure
exception is raised.
"""
network = make_host_network()
listener = socket()
self.addCleanup(listener.close)
listener.bind((interface, 0))
listener.listen(3)
self.assertIn(
listener.getsockname()[1], network.enumerate_used_ports())
def test_listening_ports(self):
"""
If a socket is bound to a port and listening the port number is
included in the result of ``HostNetwork.enumerate_used_ports``.
"""
self._listening_test('')
def test_localhost_listening_ports(self):
"""
If a socket is bound to a port on localhost only the port number is
included in the result of ``HostNetwork.enumerate_used_ports``.
"""
self._listening_test('127.0.0.1')
def test_client_ports(self):
"""
If a socket is bound to a port and connected to a server then the
client port is included in ``HostNetwork.enumerate_used_ports``\ s
return value.
"""
network = make_host_network()
listener = socket()
self.addCleanup(listener.close)
listener.listen(3)
client = socket()
self.addCleanup(client.close)
client.setblocking(False)
try:
client.connect_ex(listener.getsockname())
except error:
pass
self.assertIn(
client.getsockname()[1], network.enumerate_used_ports())
class DeleteOpenPortTests(TestCase):
"""
Tests for ``HostNetwork.delete_open_port``.
"""
expected_port = 12345
@_dependency_skip
@_environment_skip
def setUp(self):
self.addCleanup(create_network_namespace().restore)
self.network = make_host_network()
@validateLogging(
assertHasAction,
DELETE_OPEN_PORT,
succeeded=True,
startFields={'target_port': expected_port}
)
def test_success(self, logger):
"""
``HostNetwork.delete_open_port`` logs a successful ``DELETE_OPEN_PORT``
action when the requested port has been opened.
"""
port = self.network.open_port(self.expected_port)
self.patch(self.network, "logger", logger)
self.network.delete_open_port(port)
@validateLogging(
assertHasAction,
DELETE_OPEN_PORT,
succeeded=False,
startFields={'target_port': expected_port}
)
def test_failure(self, logger):
"""
``HostNetwork.delete_open_port`` logs a failed ``DELETE_OPEN_PORT``
action when the requested port has not been opened.
"""
self.patch(self.network, "logger", logger)
self.assertRaises(
Exception,
self.network.delete_open_port, OpenPort(port=self.expected_port)
)
| apache-2.0 |
armani-dev/android_kernel_xiaomi_armani | scripts/gcc-wrapper.py | 181 | 3495 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"hci_conn.c:407",
"cpufreq_interactive.c:804",
"cpufreq_interactive.c:847",
"ene_ub6250.c:2118",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/examples/openmdao.examples.bar3simulation/openmdao/examples/bar3simulation/bar3_optimization.py | 1 | 4444 | """
bar3_optimization.py - Top level assembly for the example problem.
"""
# Optimize the bar3 design using the CONMIN optimizer.
# pylint: disable-msg=E0611,F0401
from openmdao.lib.drivers.api import CONMINdriver
from openmdao.main.api import Assembly
from openmdao.main.datatypes.api import Float
# from openmdao.examples.bar3simulation.bar3 import Bar3Truss
from openmdao.examples.bar3simulation.bar3_wrap_f import Bar3Truss
class Bar3Optimization(Assembly):
""" Optimization of a three bar truss. """
# set up interface to the framework
# pylint: disable-msg=E1101
# Constraint allowables
bar1_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 1')
bar2_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 2')
bar3_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 3')
displacement_x_dir_allowable = Float(0.20, iotype='in', units='inch',
desc='Displacement limitation in x-direction')
displacement_y_dir_allowable = Float(0.05, iotype='in', units='inch',
desc='Displacement limitation in y-direction')
frequency_allowable = Float(14.1421, iotype='in', units='Hz',
desc='Frequency limitation in Hertz')
def configure(self):
# Create CONMIN Optimizer instance
self.add('driver', CONMINdriver())
# Create Bar3_Truss component instances
self.add('bar3_truss', Bar3Truss())
self.driver.workflow.add('bar3_truss')
# CONMIN Flags
self.driver.iprint = 0
self.driver.itmax = 30
self.driver.fdch = .00001
self.driver.fdchm = .00001
self.driver.ct = -.001
# CONMIN Objective
self.driver.add_objective('bar3_truss.weight')
# CONMIN Design Variables
for param, low, high in zip(['bar3_truss.bar1_area',
'bar3_truss.bar2_area',
'bar3_truss.bar3_area'],
[0.001, 0.001, 0.001],
[10000.0, 10000.0, 10000.0]):
self.driver.add_parameter(param, low=low, high=high)
# CONMIN Constraints
constraints = [
'abs(bar3_truss.bar1_stress/bar1_stress_allowable) <= 1.0',
'abs(bar3_truss.bar2_stress/bar2_stress_allowable) <= 1.0',
'abs(bar3_truss.bar3_stress/bar3_stress_allowable) <= 1.0',
'abs(bar3_truss.displacement_x_dir/displacement_x_dir_allowable) <= 1.0',
'abs(bar3_truss.displacement_y_dir/displacement_y_dir_allowable) <= 1.0',
'frequency_allowable**2 <= bar3_truss.frequency**2']
map(self.driver.add_constraint, constraints)
if __name__ == "__main__": # pragma: no cover
import time
# pylint: disable-msg=E1101
opt_bar3 = Bar3Optimization()
def prz(title):
""" Print before and after"""
print '---------------------------------'
print title
print '---------------------------------'
print 'Bar3: Weight = ', opt_bar3.bar3_truss.weight
print 'DV1: Bar1_area = ', opt_bar3.bar3_truss.bar1_area
print 'DV2: Bar2_area = ', opt_bar3.bar3_truss.bar2_area
print 'Dv3: Bar3_area = ', opt_bar3.bar3_truss.bar3_area
print '---------------------------------'
print 'Con1: Bar1_stress = ', opt_bar3.bar3_truss.bar1_stress
print 'Con2: Bar2_stress = ', opt_bar3.bar3_truss.bar2_stress
print 'Con3: Bar3_stress = ', opt_bar3.bar3_truss.bar3_stress
print 'Con4: Displ_u = ', opt_bar3.bar3_truss.displacement_x_dir
print 'Con5: Displ_v = ', opt_bar3.bar3_truss.displacement_y_dir
print 'Con6: Frequency = ', opt_bar3.bar3_truss.frequency
print '\n'
opt_bar3.bar3_truss.run()
prz('Old Design')
time1 = time.time()
opt_bar3.run()
prz('New Design')
print "CONMIN Iterations: ", opt_bar3.driver.iter_count
print ""
print "Elapsed time: ", time.time() - time1
# end bar3_optimization.py
| mit |
agriffis/django-allauth | allauth/socialaccount/providers/amazon/views.py | 73 | 1304 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import AmazonProvider
class AmazonOAuth2Adapter(OAuth2Adapter):
provider_id = AmazonProvider.id
access_token_url = 'https://api.amazon.com/auth/o2/token'
authorize_url = 'http://www.amazon.com/ap/oa'
profile_url = 'https://www.amazon.com/ap/user/profile'
supports_state = False
redirect_uri_protocol = 'https'
def complete_login(self, request, app, token, **kwargs):
response = requests.get(
self.profile_url,
params={'access_token': token})
extra_data = response.json()
if 'Profile' in extra_data:
extra_data = {
'user_id': extra_data['Profile']['CustomerId'],
'name': extra_data['Profile']['Name'],
'email': extra_data['Profile']['PrimaryEmail']
}
return self.get_provider().sociallogin_from_response(
request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(AmazonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(AmazonOAuth2Adapter)
| mit |
hall1467/wikidata_usage_tracking | python_analysis_scripts/edit_analyses/session_stats.py | 1 | 2861 | """
Selects number of distinct revisions.
Usage:
session_stats (-h|--help)
session_stats <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to input file to process.
<output> Where output will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
import sys
import mysqltsv
from collections import defaultdict
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(
open(args['<input>'],'rt'), headers=True,
types=[str, str, str, str, str, int, str, str, str, str, str, str,
str, str])
output_file = open(args['<output>'], "w")
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
sessions = defaultdict(lambda: defaultdict(int))
bot_sessions = defaultdict(lambda: defaultdict(int))
human_sessions = defaultdict(lambda: defaultdict(int))
revision_namespaces = defaultdict(int)
bot_revisions_sum = 0
human_revisions_sum = 0
for i, line in enumerate(input_file):
sessions[line["user"]][line["session_start"]] = 1
revision_namespaces[line["namespace"]] += 1
if line["edit_type"] == 'bot':
bot_revisions_sum += 1
bot_sessions[line["user"]][line["session_start"]] = 1
else:
human_revisions_sum += 1
human_sessions[line["user"]][line["session_start"]] = 1
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("Revisions analyzed: {0}\n".format(i))
sys.stderr.flush()
session_sum = 0
for user in sessions:
for session_start in sessions[user]:
session_sum += 1
bot_session_sum = 0
for user in bot_sessions:
for session_start in bot_sessions[user]:
bot_session_sum += 1
human_session_sum = 0
for user in human_sessions:
for session_start in human_sessions[user]:
human_session_sum += 1
output_file.write("Sessions: {0}\n".format(session_sum))
output_file.write("Bot sessions: {0}\n".format(bot_session_sum))
output_file.write("Bot revisions: {0}\n".format(bot_revisions_sum))
output_file.write("Human sessions: {0}\n".format(human_session_sum))
output_file.write("Human revisions: {0}\n".format(human_revisions_sum))
output_file.write("Revision namespaces: {0}\n".format(revision_namespaces))
main()
| mit |
barentsen/csvkit | csvkit/convert/xls.py | 21 | 4661 | #!/usr/bin/env python
import datetime
import six
import xlrd
from csvkit import table
from csvkit.exceptions import XLSDataError
def normalize_empty(values, **kwargs):
"""
Normalize a column which contains only empty cells.
"""
return None, [None] * len(values)
def normalize_text(values, **kwargs):
"""
Normalize a column of text cells.
"""
return six.text_type, [six.text_type(v) if v else None for v in values]
def normalize_numbers(values, **kwargs):
"""
Normalize a column of numeric cells.
"""
# Test if all values are whole numbers, if so coerce floats it ints
integral = True
for v in values:
if v and v % 1 != 0:
integral = False
break
if integral:
return int, [int(v) if v != '' else None for v in values]
else:
# Convert blanks to None
return float, [v if v else None for v in values]
def normalize_dates(values, datemode=0, **kwargs):
"""
Normalize a column of date cells.
"""
normal_values = []
normal_types_set = set()
for v in values:
# Convert blanks to None
if v == '':
normal_values.append(None)
continue
v_tuple = xlrd.xldate_as_tuple(v, datemode)
if v_tuple == (0, 0, 0, 0, 0, 0):
# Midnight
normal_values.append(datetime.time(*v_tuple[3:]))
normal_types_set.add(datetime.time)
elif v_tuple[3:] == (0, 0, 0):
# Date only
normal_values.append(datetime.date(*v_tuple[:3]))
normal_types_set.add(datetime.date)
elif v_tuple[:3] == (0, 0, 0):
# Time only
normal_values.append(datetime.time(*v_tuple[3:]))
normal_types_set.add(datetime.time)
else:
# Date and time
normal_values.append(datetime.datetime(*v_tuple))
normal_types_set.add(datetime.datetime)
if len(normal_types_set) == 1:
# No special handling if column contains only one type
pass
elif normal_types_set == set([datetime.datetime, datetime.date]):
# If a mix of dates and datetimes, up-convert dates to datetimes
for i, v in enumerate(normal_values):
if v.__class__ == datetime.date:
normal_values[i] = datetime.datetime.combine(v, datetime.time())
normal_types_set.remove(datetime.date)
elif normal_types_set == set([datetime.datetime, datetime.time]):
# Datetimes and times don't mix
raise XLSDataError('Column contains a mix of times and datetimes (this is not supported).')
elif normal_types_set == set([datetime.date, datetime.time]):
# Dates and times don't mix
raise XLSDataError('Column contains a mix of dates and times (this is not supported).')
# Natural serialization of dates and times by csv.writer is insufficent so they get converted back to strings at this point
return normal_types_set.pop(), normal_values
def normalize_booleans(values, **kwargs):
"""
Normalize a column of boolean cells.
"""
return bool, [bool(v) if v != '' else None for v in values]
NORMALIZERS = {
xlrd.biffh.XL_CELL_EMPTY: normalize_empty,
xlrd.biffh.XL_CELL_TEXT: normalize_text,
xlrd.biffh.XL_CELL_NUMBER: normalize_numbers,
xlrd.biffh.XL_CELL_DATE: normalize_dates,
xlrd.biffh.XL_CELL_BOOLEAN: normalize_booleans
}
def determine_column_type(types):
"""
Determine the correct type for a column from a list of cell types.
"""
types_set = set(types)
types_set.discard(xlrd.biffh.XL_CELL_EMPTY)
# Normalize mixed types to text
if len(types_set) > 1:
return xlrd.biffh.XL_CELL_TEXT
try:
return types_set.pop()
except KeyError:
return xlrd.biffh.XL_CELL_EMPTY
def xls2csv(f, **kwargs):
"""
Convert an Excel .xls file to csv.
"""
book = xlrd.open_workbook(file_contents=f.read())
if 'sheet' in kwargs:
sheet = book.sheet_by_name(kwargs['sheet'])
else:
sheet = book.sheet_by_index(0)
tab = table.Table()
for i in range(sheet.ncols):
# Trim headers
column_name = sheet.col_values(i)[0]
values = sheet.col_values(i)[1:]
types = sheet.col_types(i)[1:]
column_type = determine_column_type(types)
t, normal_values = NORMALIZERS[column_type](values, datemode=book.datemode)
column = table.Column(i, column_name, normal_values, normal_type=t)
tab.append(column)
o = six.StringIO()
output = tab.to_csv(o)
output = o.getvalue()
o.close()
return output
| mit |
asen6/amartyasenguptadotcom | django/test/_doctest.py | 152 | 100621 | # This is a slightly modified version of the doctest.py that shipped with Python 2.4
# It incorporates changes that have been submitted to the Python ticket tracker
# as ticket #1521051. These changes allow for a DoctestRunner and Doctest base
# class to be specified when constructing a DoctestSuite.
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
if sys.platform.startswith('java'):
# On Jython, isclass() reports some modules as classes. Patch it.
def patch_isclass(isclass):
def patched_isclass(obj):
return isclass(obj) and hasattr(obj, '__module__')
return patched_isclass
inspect.isclass = patch_isclass(inspect.isclass)
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self)
def set_trace(self):
self.__debugger_used = True
pdb.Pdb.set_trace(self)
def set_continue(self):
# Calling set_continue unconditionally would break unit test coverage
# reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
if sys.version_info < (2, 5, 0):
return self.save_linecache_getlines(filename)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, runner=DocTestRunner):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
self._dt_runner = runner
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = self._dt_runner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
test_class=DocTestCase, **options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(test_class(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| bsd-3-clause |
Branlala/docker-sickbeardfr | sickbeard/lib/subliminal/services/__init__.py | 36 | 9247 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from ..cache import Cache
from ..exceptions import DownloadFailedError, ServiceError
from ..language import language_set, Language
from ..subtitles import EXTENSIONS
import logging
import os
import requests
import threading
import zipfile
__all__ = ['ServiceBase', 'ServiceConfig']
logger = logging.getLogger("subliminal")
class ServiceBase(object):
"""Service base class
:param config: service configuration
:type config: :class:`ServiceConfig`
"""
#: URL to the service server
server_url = ''
#: User Agent for any HTTP-based requests
user_agent = 'subliminal v0.6'
#: Whether based on an API or not
api_based = False
#: Timeout for web requests
timeout = 5
#: :class:`~subliminal.language.language_set` of available languages
languages = language_set()
#: Map between language objects and language codes used in the service
language_map = {}
#: Default attribute of a :class:`~subliminal.language.Language` to get with :meth:`get_code`
language_code = 'alpha2'
#: Accepted video classes (:class:`~subliminal.videos.Episode`, :class:`~subliminal.videos.Movie`, :class:`~subliminal.videos.UnknownVideo`)
videos = []
#: Whether the video has to exist or not
require_video = False
#: List of required features for BeautifulSoup
required_features = None
def __init__(self, config=None):
self.config = config or ServiceConfig()
self.session = None
def __enter__(self):
self.init()
return self
def __exit__(self, *args):
self.terminate()
def init(self):
"""Initialize connection"""
logger.debug(u'Initializing %s' % self.__class__.__name__)
self.session = requests.session(timeout=10, headers={'User-Agent': self.user_agent})
def init_cache(self):
"""Initialize cache, make sure it is loaded from disk"""
if not self.config or not self.config.cache:
raise ServiceError('Cache directory is required')
self.config.cache.load(self.__class__.__name__)
def save_cache(self):
self.config.cache.save(self.__class__.__name__)
def clear_cache(self):
self.config.cache.clear(self.__class__.__name__)
def cache_for(self, func, args, result):
return self.config.cache.cache_for(self.__class__.__name__, func, args, result)
def cached_value(self, func, args):
return self.config.cache.cached_value(self.__class__.__name__, func, args)
def terminate(self):
"""Terminate connection"""
logger.debug(u'Terminating %s' % self.__class__.__name__)
def get_code(self, language):
"""Get the service code for a :class:`~subliminal.language.Language`
It uses the :data:`language_map` and if there's no match, falls back
on the :data:`language_code` attribute of the given :class:`~subliminal.language.Language`
"""
if language in self.language_map:
return self.language_map[language]
if self.language_code is None:
raise ValueError('%r has no matching code' % language)
return getattr(language, self.language_code)
def get_language(self, code):
"""Get a :class:`~subliminal.language.Language` from a service code
It uses the :data:`language_map` and if there's no match, uses the
given code as ``language`` parameter for the :class:`~subliminal.language.Language`
constructor
.. note::
A warning is emitted if the generated :class:`~subliminal.language.Language`
is "Undetermined"
"""
if code in self.language_map:
return self.language_map[code]
language = Language(code, strict=False)
if language == Language('Undetermined'):
logger.warning(u'Code %s could not be identified as a language for %s' % (code, self.__class__.__name__))
return language
def query(self, *args):
"""Make the actual query"""
raise NotImplementedError()
def list(self, video, languages):
"""List subtitles
As a service writer, you can either override this method or implement
:meth:`list_checked` instead to have the languages pre-filtered for you
"""
if not self.check_validity(video, languages):
return []
return self.list_checked(video, languages)
def list_checked(self, video, languages):
"""List subtitles without having to check parameters for validity"""
raise NotImplementedError()
def download(self, subtitle):
"""Download a subtitle"""
self.download_file(subtitle.link, subtitle.path)
return subtitle
@classmethod
def check_validity(cls, video, languages):
"""Check for video and languages validity in the Service
:param video: the video to check
:type video: :class:`~subliminal.videos.video`
:param languages: languages to check
:type languages: :class:`~subliminal.language.Language`
:rtype: bool
"""
languages = (languages & cls.languages) - language_set(['Undetermined'])
if not languages:
logger.debug(u'No language available for service %s' % cls.__name__.lower())
return False
if cls.require_video and not video.exists or not isinstance(video, tuple(cls.videos)):
logger.debug(u'%r is not valid for service %s' % (video, cls.__name__.lower()))
return False
return True
def download_file(self, url, filepath):
"""Attempt to download a file and remove it in case of failure
:param string url: URL to download
:param string filepath: destination path
"""
logger.info(u'Downloading %s in %s' % (url, filepath))
try:
r = self.session.get(url, headers={'Referer': url, 'User-Agent': self.user_agent})
with open(filepath, 'wb') as f:
f.write(r.content)
except Exception as e:
logger.error(u'Download failed: %s' % e)
if os.path.exists(filepath):
os.remove(filepath)
raise DownloadFailedError(str(e))
logger.debug(u'Download finished')
def download_zip_file(self, url, filepath):
"""Attempt to download a zip file and extract any subtitle file from it, if any.
This cleans up after itself if anything fails.
:param string url: URL of the zip file to download
:param string filepath: destination path for the subtitle
"""
logger.info(u'Downloading %s in %s' % (url, filepath))
try:
zippath = filepath + '.zip'
r = self.session.get(url, headers={'Referer': url, 'User-Agent': self.user_agent})
with open(zippath, 'wb') as f:
f.write(r.content)
if not zipfile.is_zipfile(zippath):
# TODO: could check if maybe we already have a text file and
# download it directly
raise DownloadFailedError('Downloaded file is not a zip file')
zipsub = zipfile.ZipFile(zippath)
for subfile in zipsub.namelist():
if os.path.splitext(subfile)[1] in EXTENSIONS:
with open(filepath, 'wb') as f:
f.write(zipsub.open(subfile).read())
break
else:
zipsub.close()
raise DownloadFailedError('No subtitles found in zip file')
zipsub.close()
os.remove(zippath)
except Exception as e:
logger.error(u'Download %s failed: %s' % (url, e))
if os.path.exists(zippath):
os.remove(zippath)
if os.path.exists(filepath):
os.remove(filepath)
raise DownloadFailedError(str(e))
logger.debug(u'Download finished')
class ServiceConfig(object):
"""Configuration for any :class:`Service`
:param bool multi: whether to download one subtitle per language or not
:param string cache_dir: cache directory
"""
def __init__(self, multi=False, cache_dir=None):
self.multi = multi
self.cache_dir = cache_dir
self.cache = None
if cache_dir is not None:
self.cache = Cache(cache_dir)
def __repr__(self):
return 'ServiceConfig(%r, %s)' % (self.multi, self.cache.cache_dir)
| mit |
jplusplus/detective.io | app/detective/migrations/0035_auto__add_subscription.py | 3 | 12870 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subscription'
db.create_table(u'detective_subscription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=100, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=7)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('address', self.gf('django.db.models.fields.CharField')(max_length=255)),
('country', self.gf('django.db.models.fields.CharField')(max_length=100)),
('siret', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('vat', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('identification', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('plan', self.gf('django.db.models.fields.CharField')(max_length=8)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'detective', ['Subscription'])
def backwards(self, orm):
# Deleting model 'Subscription'
db.delete_table(u'detective_subscription')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'detective.article': {
'Meta': {'object_name': 'Article'},
'content': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.detectiveprofileuser': {
'Meta': {'object_name': 'DetectiveProfileUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'detective.quoterequest': {
'Meta': {'object_name': 'QuoteRequest'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'records': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'detective.searchterm': {
'Meta': {'object_name': 'SearchTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_literal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'subject': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.subscription': {
'Meta': {'object_name': 'Subscription'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identification': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'plan': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'siret': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'vat': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'detective.topic': {
'Meta': {'unique_together': "(('slug', 'author'),)", 'object_name': 'Topic'},
'about': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contributor_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ontology_as_json': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'ontology_as_mod': ('django.db.models.fields.SlugField', [], {'max_length': '250', 'blank': 'True'}),
'ontology_as_owl': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'skeleton_title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'detective.topicskeleton': {
'Meta': {'object_name': 'TopicSkeleton'},
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ontology': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture_credits': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'schema_picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'target_plans': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'detective.topictoken': {
'Meta': {'unique_together': "(('topic', 'email'),)", 'object_name': 'TopicToken'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
}
}
complete_apps = ['detective'] | lgpl-3.0 |
StevenCHowell/code_sas_modeling | sas_modeling/calc_i0.py | 1 | 13051 | #!/usr/bin/env python
# coding:utf-8
'''
Author: Steven C. Howell --<steven.howell@nist.gov>
Purpose: calculating the Guinier fit
Created: 12/21/2016
00000000011111111112222222222333333333344444444445555555555666666666677777777778
12345678901234567890123456789012345678901234567890123456789012345678901234567890
'''
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from scipy import optimize
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
def fit_line_v0(x, y, dy):
'''
Fit data for y = mx + b
return m and b
http://scipy-cookbook.readthedocs.io/items/FittingData.html#id2
error estimate seems reasonable compared to input data
'''
w = 1 / dy
# define our (line) fitting function
fitfunc = lambda p, x: p[0] * x + p[1]
errfunc = lambda p, x, y, w: (y - fitfunc(p, x)) * w
# use the last two points to guess the initial values
m_guess = (y[-2] - y[-1]) / (x[-2] - x[-1]) # use 2 points to guess slope
b_guess = y[-1] - m_guess * x[-1] # gues the y-intercept from 2 points
p_guess = [m_guess, b_guess]
out = optimize.leastsq(errfunc, p_guess, args=(x, y, w), full_output=1)
p_final = out[0]
m = p_final[0]
b = p_final[1]
# from the docs page:
# cov_x : ndarray
# Uses the fjac and ipvt optional outputs to construct an estimate
# of the jacobian around the solution. None if a singular matrix
# encountered (indicates very flat curvature in some direction).
# This matrix must be multiplied by the residual variance to get the
# covariance of the parameter estimates – see curve_fit.
#
# curve_fit documentation says:
# The diagonals provide the variance of the parameter estimate.
# To compute one standard deviation errors on the parameters use
# perr = np.sqrt(np.diag(pcov)).
#
# How the sigma parameter affects the estimated covariance depends
# on absolute_sigma argument, as described above.
#
# If the Jacobian matrix at the solution doesn’t have a full rank,
# then ‘lm’ method returns a matrix filled with np.inf, on the other
# hand ‘trf’ and ‘dogbox’ methods use Moore-Penrose pseudoinverse to
# compute the covariance matrix.
cov = out[1]
m_err = np.sqrt(cov[0, 0])
b_err = np.sqrt(cov[1, 1])
return m, b, m_err, b_err
def fit_line_v1(x, y, dy):
'''
Fit data for y = mx + b
return m and b
no error estimates
'''
w = 1 / dy ** 2
A = np.vstack([x * w, 1.0 * w]).T
p, residuals, _, _ = np.linalg.lstsq(A, y * w)
m = p[0]
b = p[1]
# from the docs page:
# residuals : {(), (1,), (K,)} ndarray
# Sums of residuals; squared Euclidean 2-norm for each column in b - a*x.
# If the rank of a is < N or M <= N, this is an empty array. If b is
# 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,).
# rank : int
# Rank of matrix a.
# s : (min(M, N),) ndarray
# Singular values of a.
m_err = 0.0
b_err = 0.0
return m, b, m_err, b_err
def fit_line_v2(x, y, dy):
'''
Fit data for y = mx + b
return m and b
essentially the same results as fit_line_v0
no error estimates
'''
w = 1 / dy ** 2
out = np.polynomial.polynomial.polyfit(x, y, 1, w=w, full=True)
# does not provide the covariance matrix, not sure how to extract error
p_final = out[0]
m = p_final[1]
b = p_final[0]
# from the docs page:
# [residuals, rank, singular_values, rcond] : list
# These values are only returned if full = True
# resid – sum of squared residuals of the least squares fit
# rank – the numerical rank of the scaled Vandermonde matrix
# sv – singular values of the scaled Vandermonde matrix
# rcond – value of rcond.
# For more details, see linalg.lstsq.
b_err = 0.0
m_err = 0.0
return m, b, m_err, b_err
def fit_line_v3(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from SasView:
github.com/SasView/sasview/blob/master/src/sas/sascalc/invariant/invariant.py
error estimate seems reasonable
'''
A = np.vstack([x / dy, 1.0 / dy]).T
p, residuals, _, _ = np.linalg.lstsq(A, y / dy)
m = p[0]
b = p[1]
# Get the covariance matrix, defined as inv_cov = a_transposed * a
inv_cov = np.dot(A.transpose(), A)
cov = np.linalg.pinv(inv_cov)
err_matrix = np.abs(residuals) * cov
m_err, b_err = np.sqrt(np.diag(err_matrix))
return m, b, m_err, b_err
def fit_line_v4(x, y, dy):
'''
Fit data for y = mx + b
return m and b
error estimate seems much too small
'''
w = 1 / dy ** 2
p, cov = np.polyfit(x, y, 1, w=w, cov=True)
m, b = p
# From docs page:
# The diagonal of this matrix (cov) are the
# variance estimates for each coefficient.
m_err, b_err = np.sqrt(np.diag(cov)) # standard devaitions
# m_err, b_err = np.diag(cov)
return m, b, m_err, b_err
def fit_line_v5(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from wikipedia:
https://en.wikipedia.org/wiki/Linear_least_squares_(mathematics)#Python
error estimate seems reasonable comared to input data
This result is identical to v0 and v7
'''
w = 1 / dy ** 2
n = len(x)
X = np.array([x, np.ones(n)]).T
Y = np.array(y).reshape(-1, 1)
W = np.eye(n) * w # weight using the inverse of the variance
# calculate the parameters
xtwx_inv = np.linalg.inv(X.T.dot(W).dot(X))
m, b = xtwx_inv.dot(X.T).dot(W).dot(Y).reshape(2)
# calculate the error of the parameters:
# (X.T * W * X)^-1 * X.T * W * M * W.T * X * (X.T * W.T * X)^-1
# cov_xy = covariance(x, y)
# var_x = covariance(x, x)
# var_y = covariance(y, y)
# M = np.eye(m) * dy ** 2
# xtwtx_inv = np.linalg.inv(X.T.dot(W.T).dot(X))
# M_beta = xtwx_inv.dot(X.T).dot(W).dot(M).dot(W.T).dot(X).dot(xtwtx_inv)
# M_beta = xtwx_inv # because M = W^-1
cov = xtwx_inv
m_err, b_err = np.sqrt(np.diag(cov))
return m, b, m_err, b_err
def fit_line_v6(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from Baird's "Experimentation": pg 138-140
The dy's in the derivation are not the same as the error of the y values
This method does not propagate the error
'''
var = dy ** 2 # variance, when dy is the standard deviation
wx = x / var
wy = y / var
sum_xy = np.sum(wx * wy)
sum_x = np.sum(wx)
sum_y = np.sum(wy)
sum_x_dy_inv = np.sum(wx / var)
sum_dy_inv = np.sum(1 / var)
sum_x2 = np.sum(wx ** 2)
den = sum_dy_inv * sum_x2 - sum_x * sum_x_dy_inv
m_num = sum_dy_inv * sum_xy - sum_x_dy_inv * sum_y
m = m_num / den
b_num = sum_x2 * sum_y - sum_x * sum_xy
b = b_num / den
n = len(x)
y_fit = m * x + b
delta_y = y - y_fit
y_err = np.sqrt(np.sum(delta_y ** 2) / (n - 2))
m_err = y_err * np.sqrt(n / den)
b_err = y_err * np.sqrt(sum_x2 / den)
return m, b, m_err, b_err
def fit_line_v7(x, y, dy):
'''
Fit data for y = mx + b
return m and b
from Huges & Hase "Measurements and their Uncertainties", pg 69-70
and Press et al. "Numerical Recipes 3rd Edition", pg 781-783
'''
w = 1 / dy ** 2 # weight is the inverse square of the uncertainty
s = np.sum(w)
sx = np.sum(w * x)
sy = np.sum(w * y)
sxx = np.sum(w * x ** 2)
sxy = np.sum(w * x * y)
den = s * sxx - sx ** 2
m_num = s * sxy - sx * sy
m = m_num / den
b_num = sxx * sy - sx * sxy
b = b_num / den
m_err = np.sqrt(s / den)
b_err = np.sqrt(sxx / den)
return m, b, m_err, b_err
def fit_line_v8(x, y, dy):
'''
Fit data for y = mx + b
return m and b
from Press et al. "Numerical Recipes 3rd Edition", pg 781-783
using numerically robust formalism
'''
w = 1 / dy ** 2 # weight is the inverse square of the uncertainty
s = np.sum(w)
sx = np.sum(w * x)
sy = np.sum(w * y)
t = 1 / dy * (x - sx / s)
stt = np.sum(t ** 2)
m = np.sum(t * y / dy) / stt
b = (sy - sx * m) / s
m_err = np.sqrt(1 / stt)
b_err = np.sqrt((1 + sx ** 2 / (s * stt)) / s)
return m, b, m_err, b_err
def guinier_fit(q, iq, diq, dq=None, q_min=0.0, q_max=0.1, view_fit=False,
fit_method=fit_line_v5, save_fname='guiner_fit.html',
refine=False):
'''
perform Guinier fit
return I(0) and Rg
'''
# Identify the range for the fit
id_x = (q >= q_min) & (q <= q_max)
q2 = q[id_x] ** 2
log_iq = np.log(iq[id_x])
dlog_iq = diq[id_x] / iq[id_x]
if dq is not None:
dq2 = 2 * q[id_x] * dq[id_x]
m, b, m_err, b_err = fit_method(q2, log_iq, dlog_iq)
rg = np.sqrt(-3 * m)
rg_err = 3 / (2 * rg) * m_err
rg, rg_err = round_error(rg, rg_err)
i0 = np.exp(b)
i0_err = i0 * b_err
i0, i0_err = round_error(i0, i0_err)
rg_q_max = 1.3 / rg
if rg_q_max < q[id_x][-1]:
logging.warning('initial q-max too high, 1.3/Rg={} < {}'.format(
rg_q_max, q[id_x][-1]))
if refine:
logging.warning('repeating fit with q-max={}'.format(rg_q_max))
return guinier_fit(q, iq, diq, dq=dq, q_min=q_min, q_max=rg_q_max,
view_fit=view_fit, fit_method=fit_method,
save_fname=save_fname)
if view_fit:
from sas_modeling import make_figures
q2 = np.insert(q2, 0, 0.0)
log_iq = np.insert(log_iq, 0, b)
dlog_iq = np.insert(dlog_iq, 0, b_err)
fit_line = m * q2 + b
q_range = q[id_x][[0, -1]]
fig = make_figures.plot_guinier_fit(q2, log_iq, fit_line, i0, i0_err,
rg, rg_err, dlog_iq, q_range,
save_fname=save_fname)
return i0, rg, i0_err, rg_err, fig
return i0, rg, i0_err, rg_err
def round_error(val, val_err, sig_figs=2):
'''
Round a value and its error estimate to a certain number
of significant figures (on the error estimate). By default 2
significant figures are used.
'''
# round number to a certain number of significant figures
n = int(np.log10(val_err)) # displacement from ones place
if val_err >= 1:
n += 1
scale = 10 ** (sig_figs - n)
val = round(val * scale) / scale
val_err = round(val_err * scale) / scale
return val, val_err
def compare_guinier_fit(q, iq, diq, **args):
'''
perform Guinier fit
return I(0) and Rg
'''
fit_methods = [
fit_line_v0,
fit_line_v1,
fit_line_v2,
fit_line_v3,
fit_line_v4,
fit_line_v5,
fit_line_v6,
fit_line_v7,
fit_line_v8,
]
for fit_method in fit_methods:
save_fname = 'fit_{}_comparison.html'.format(fit_method.__name__[-2:])
i0, rg, i0_err, rg_err = guinier_fit(q, iq, diq, fit_method=fit_method,
save_fname=save_fname,
view_fit=True, **args)
def covariance(x, y):
assert len(x) == len(y)
cov = ((x - x.mean()) * (y - y.mean())).sum() / (len(x) - 1)
return cov
def bayesian():
NotImplemented
if __name__ == '__main__':
import os
import make_figures
# data_fname = 'data/1mgml_LysoSANS.sub'; skiprows = 1
skiprows = 0
data_fname = 'data/1mgml_lys_sans.dat'; q_max = 0.091 # lys
# data_fname = 'data/5mgml_nist_mab_sans.dat'; q_max = 0.0296 # mab
assert os.path.exists(data_fname)
data = np.asfortranarray(np.loadtxt(data_fname, skiprows=skiprows))
# data[:, 1:3] *= 1 / data[0, 1]
# column 4 is the effective q-values, accounting for the beam spread
if True:
plot_fname = 'I(q)_and_guinier-no_scale.html'
make_figures.plot_iq_and_guinier(data[:, 0], data[:, 1], data[:, 2],
save_fname=plot_fname)
# scale the data
# data[:, 1:3] *= 1 / data[0, 1] # set the first measured point to 1
# data[:, 1:3] *= 10 / data[0, 1] # set the first measured point to 10
# data[:, 1:3] *= 100 / data[0, 1] # set the first measured point to 100
# data[:, 1:3] *= 1000 / data[0, 1] # set the first measured point to 1000
# compare_guinier_fit(data[:, 0], data[:, 1], data[:, 2], q_max=q_max,
# refine=True)
save_fname = data_fname.replace('.dat', '.html')
i0, rg, i0_err, rg_err = guinier_fit(data[:, 0], data[:, 1], data[:, 2],
dq=data[:, 3], q_max=q_max,
view_fit=True, fit_method=fit_line_v8,
refine=True, save_fname=save_fname)
logging.debug('\m/ >.< \m/')
| gpl-3.0 |
ChenJunor/hue | desktop/core/ext-py/Django-1.6.10/tests/comment_tests/tests/test_app_api.py | 58 | 2664 | from __future__ import absolute_import
from django.conf import settings
from django.contrib import comments
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils import six
from . import CommentTestCase
class CommentAppAPITests(CommentTestCase):
"""Tests for the "comment app" API"""
def testGetCommentApp(self):
self.assertEqual(comments.get_comment_app(), comments)
@override_settings(
COMMENTS_APP='missing_app',
INSTALLED_APPS=list(settings.INSTALLED_APPS) + ['missing_app'],
)
def testGetMissingCommentApp(self):
with six.assertRaisesRegex(self, ImproperlyConfigured, 'missing_app'):
_ = comments.get_comment_app()
def testGetForm(self):
self.assertEqual(comments.get_form(), CommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
@override_settings(
COMMENTS_APP='comment_tests.custom_comments',
INSTALLED_APPS=list(settings.INSTALLED_APPS) + [
'comment_tests.custom_comments'],
)
class CustomCommentTest(CommentTestCase):
urls = 'comment_tests.urls'
def testGetCommentApp(self):
from comment_tests import custom_comments
self.assertEqual(comments.get_comment_app(), custom_comments)
def testGetModel(self):
from comment_tests.custom_comments.models import CustomComment
self.assertEqual(comments.get_model(), CustomComment)
def testGetForm(self):
from comment_tests.custom_comments.forms import CustomCommentForm
self.assertEqual(comments.get_form(), CustomCommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
| apache-2.0 |
scripnichenko/glance | glance/tests/unit/common/test_wsgi.py | 5 | 26226 | # Copyright 2010-2011 OpenStack Foundation
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import gettext
import socket
from babel import localedata
import eventlet.patcher
import fixtures
import mock
from oslo_serialization import jsonutils
import routes
import six
import webob
from glance.api.v1 import router as router_v1
from glance.api.v2 import router as router_v2
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance import i18n
from glance.tests import utils as test_utils
class RequestTest(test_utils.BaseTestCase):
def _set_expected_languages(self, all_locales=None, avail_locales=None):
if all_locales is None:
all_locales = []
# Override localedata.locale_identifiers to return some locales.
def returns_some_locales(*args, **kwargs):
return all_locales
self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales)
# Override gettext.find to return other than None for some languages.
def fake_gettext_find(lang_id, *args, **kwargs):
found_ret = '/glance/%s/LC_MESSAGES/glance.mo' % lang_id
if avail_locales is None:
# All locales are available.
return found_ret
languages = kwargs['languages']
if languages[0] in avail_locales:
return found_ret
return None
self.stubs.Set(gettext, 'find', fake_gettext_find)
def test_content_range(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Range"] = 'bytes 10-99/*'
range_ = request.get_content_range()
self.assertEqual(10, range_.start)
self.assertEqual(100, range_.stop) # non-inclusive
self.assertIsNone(range_.length)
def test_content_range_invalid(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Range"] = 'bytes=0-99'
self.assertRaises(webob.exc.HTTPBadRequest,
request.get_content_range)
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123')
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml',))
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "text/html"
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml',))
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type(('application/json',))
self.assertEqual("application/json", result)
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_xml_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_json_xml_quality(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_language_accept_default(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8"
result = request.best_match_language()
self.assertIsNone(result)
def test_language_accept_none(self):
request = wsgi.Request.blank('/tests/123')
result = request.best_match_language()
self.assertIsNone(result)
def test_best_match_language_expected(self):
# If Accept-Language is a supported language, best_match_language()
# returns it.
self._set_expected_languages(all_locales=['it'])
req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'})
self.assertEqual('it', req.best_match_language())
def test_request_match_language_unexpected(self):
# If Accept-Language is a language we do not support,
# best_match_language() returns None.
self._set_expected_languages(all_locales=['it'])
req = wsgi.Request.blank('/', headers={'Accept-Language': 'zh'})
self.assertIsNone(req.best_match_language())
@mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match')
def test_best_match_language_unknown(self, mock_best_match):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
accepted = 'unknown-lang'
request.headers = {'Accept-Language': accepted}
mock_best_match.return_value = None
self.assertIsNone(request.best_match_language())
# If Accept-Language is missing or empty, match should be None
request.headers = {'Accept-Language': ''}
self.assertIsNone(request.best_match_language())
request.headers.pop('Accept-Language')
self.assertIsNone(request.best_match_language())
def test_http_error_response_codes(self):
sample_id, member_id, tag_val, task_id = 'abc', '123', '1', '2'
"""Makes sure v1 unallowed methods return 405"""
unallowed_methods = [
('/images', ['PUT', 'DELETE', 'HEAD', 'PATCH']),
('/images/detail', ['POST', 'PUT', 'DELETE', 'PATCH']),
('/images/%s' % sample_id, ['POST', 'PATCH']),
('/images/%s/members' % sample_id,
['POST', 'DELETE', 'HEAD', 'PATCH']),
('/images/%s/members/%s' % (sample_id, member_id),
['POST', 'HEAD', 'PATCH']),
]
api = test_utils.FakeAuthMiddleware(router_v1.API(routes.Mapper()))
for uri, methods in unallowed_methods:
for method in methods:
req = webob.Request.blank(uri)
req.method = method
res = req.get_response(api)
self.assertEqual(405, res.status_int)
"""Makes sure v2 unallowed methods return 405"""
unallowed_methods = [
('/schemas/image', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/images', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/member', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/members', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/task', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/tasks', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images', ['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s' % sample_id, ['POST', 'PUT', 'HEAD']),
('/images/%s/file' % sample_id,
['POST', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s/tags/%s' % (sample_id, tag_val),
['GET', 'POST', 'PATCH', 'HEAD']),
('/images/%s/members' % sample_id,
['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s/members/%s' % (sample_id, member_id),
['POST', 'PATCH', 'HEAD']),
('/tasks', ['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/tasks/%s' % task_id, ['POST', 'PUT', 'PATCH', 'HEAD']),
]
api = test_utils.FakeAuthMiddleware(router_v2.API(routes.Mapper()))
for uri, methods in unallowed_methods:
for method in methods:
req = webob.Request.blank(uri)
req.method = method
res = req.get_response(api)
self.assertEqual(405, res.status_int)
"""Makes sure not implemented methods return 501"""
req = webob.Request.blank('/schemas/image')
req.method = 'NonexistentMethod'
res = req.get_response(api)
self.assertEqual(501, res.status_int)
class ResourceTest(test_utils.BaseTestCase):
def test_get_action_args(self):
env = {
'wsgiorg.routing_args': [
None,
{
'controller': None,
'format': None,
'action': 'update',
'id': 12,
},
],
}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_invalid_index(self):
env = {'wsgiorg.routing_args': []}
expected = {}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_del_controller_error(self):
actions = {'format': None,
'action': 'update',
'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_del_format_error(self):
actions = {'action': 'update', 'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_dispatch(self):
class Controller(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(expected, actual)
def test_dispatch_default(self):
class Controller(object):
def default(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(expected, actual)
def test_dispatch_no_default(self):
class Controller(object):
def show(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
self.assertRaises(AttributeError, resource.dispatch, Controller(),
'index', 'on', pants='off')
def test_call(self):
class FakeController(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(FakeController(), None, None)
def dispatch(self, obj, action, *args, **kwargs):
if isinstance(obj, wsgi.JSONRequestDeserializer):
return []
if isinstance(obj, wsgi.JSONResponseSerializer):
raise webob.exc.HTTPForbidden()
self.stubs.Set(wsgi.Resource, 'dispatch', dispatch)
request = wsgi.Request.blank('/')
response = resource.__call__(request)
self.assertIsInstance(response, webob.exc.HTTPForbidden)
self.assertEqual(403, response.status_code)
def test_call_raises_exception(self):
class FakeController(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(FakeController(), None, None)
def dispatch(self, obj, action, *args, **kwargs):
raise Exception("test exception")
self.stubs.Set(wsgi.Resource, 'dispatch', dispatch)
request = wsgi.Request.blank('/')
response = resource.__call__(request)
self.assertIsInstance(response, webob.exc.HTTPInternalServerError)
self.assertEqual(500, response.status_code)
@mock.patch.object(wsgi, 'translate_exception')
def test_resource_call_error_handle_localized(self,
mock_translate_exception):
class Controller(object):
def delete(self, req, identity):
raise webob.exc.HTTPBadRequest(explanation='Not Found')
actions = {'action': 'delete', 'identity': 12}
env = {'wsgiorg.routing_args': [None, actions]}
request = wsgi.Request.blank('/tests/123', environ=env)
message_es = 'No Encontrado'
resource = wsgi.Resource(Controller(),
wsgi.JSONRequestDeserializer(),
None)
translated_exc = webob.exc.HTTPBadRequest(message_es)
mock_translate_exception.return_value = translated_exc
e = self.assertRaises(webob.exc.HTTPBadRequest,
resource, request)
self.assertEqual(message_es, str(e))
@mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match')
@mock.patch.object(i18n, 'translate')
def test_translate_exception(self, mock_translate, mock_best_match):
mock_translate.return_value = 'No Encontrado'
mock_best_match.return_value = 'de'
req = wsgi.Request.blank('/tests/123')
req.headers["Accept-Language"] = "de"
e = webob.exc.HTTPNotFound(explanation='Not Found')
e = wsgi.translate_exception(req, e)
self.assertEqual('No Encontrado', e.explanation)
class JSONResponseSerializerTest(test_utils.BaseTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1901, 3, 8, 2)}
expected = '{"date": "1901-03-08T02:00:00.000000"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = {"is_public": True, "name": [{"name1": "test"}]}
actual = wsgi.JSONResponseSerializer().to_json(fixture)
actual = jsonutils.loads(actual)
for k in expected:
self.assertEqual(expected[k], actual[k])
def test_to_json_with_set(self):
fixture = set(["foo"])
expected = '["foo"]'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
wsgi.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}', response.body)
class JSONRequestDeserializerTest(test_utils.BaseTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers.pop('Content-Length')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers['Content-Length'] = 0
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
self.assertIn('Content-Length', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = wsgi.JSONRequestDeserializer().from_json(fixture)
self.assertEqual(expected, actual)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
wsgi.JSONRequestDeserializer().from_json, fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {}
self.assertEqual(expected, actual)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_has_body_has_transfer_encoding(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='chunked'))
def test_has_body_multiple_transfer_encoding(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='chunked, gzip'))
def test_has_body_invalid_transfer_encoding(self):
self.assertFalse(self._check_transfer_encoding(
transfer_encoding='invalid', content_length=0))
def test_has_body_invalid_transfer_encoding_with_content_length(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='invalid', content_length=5))
def test_has_body_valid_transfer_encoding_with_content_length(self):
self.assertTrue(self._check_transfer_encoding(
transfer_encoding='chunked', content_length=0))
def _check_transfer_encoding(self, transfer_encoding=None,
content_length=None):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'fake_body'
request.headers['transfer-encoding'] = transfer_encoding
if content_length is not None:
request.headers['content-length'] = content_length
return wsgi.JSONRequestDeserializer().has_body(request)
def test_get_bind_addr_default_value(self):
expected = ('0.0.0.0', '123456')
actual = wsgi.get_bind_addr(default_port="123456")
self.assertEqual(expected, actual)
class ServerTest(test_utils.BaseTestCase):
def test_create_pool(self):
"""Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool."""
actual = wsgi.Server(threads=1).create_pool()
self.assertIsInstance(actual, eventlet.greenpool.GreenPool)
@mock.patch.object(wsgi.Server, 'configure_socket')
def test_http_keepalive(self, mock_configure_socket):
self.config(http_keepalive=False)
self.config(workers=0)
server = wsgi.Server(threads=1)
server.sock = 'fake_socket'
# mocking eventlet.wsgi server method to check it is called with
# configured 'http_keepalive' value.
with mock.patch.object(eventlet.wsgi,
'server') as mock_server:
fake_application = "fake-application"
server.start(fake_application, 0)
server.wait()
mock_server.assert_called_once_with('fake_socket',
fake_application,
log=server._logger,
debug=False,
custom_pool=server.pool,
keepalive=False,
socket_timeout=900)
class TestHelpers(test_utils.BaseTestCase):
def test_headers_are_unicode(self):
"""
Verifies that the headers returned by conversion code are unicode.
Headers are passed via http in non-testing mode, which automatically
converts them to unicode. Verifying that the method does the
conversion proves that we aren't passing data that works in tests
but will fail in production.
"""
fixture = {'name': 'fake public image',
'is_public': True,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
for k, v in six.iteritems(headers):
self.assertIsInstance(v, six.text_type)
def test_data_passed_properly_through_headers(self):
"""
Verifies that data is the same after being passed through headers
"""
fixture = {'name': 'fake public image',
'is_public': True,
'deleted': False,
'name': None,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
class FakeResponse(object):
pass
response = FakeResponse()
response.headers = headers
result = utils.get_image_meta_from_headers(response)
for k, v in six.iteritems(fixture):
if v is not None:
self.assertEqual(v, result[k])
else:
self.assertNotIn(k, result)
class GetSocketTestCase(test_utils.BaseTestCase):
def setUp(self):
super(GetSocketTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.get_bind_addr",
lambda x: ('192.168.0.13', 1234)))
addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)),
(2, 2, 17, '', ('192.168.0.13', 80)),
(2, 3, 0, '', ('192.168.0.13', 80))]
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.socket.getaddrinfo",
lambda *x: addr_info_list))
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.time.time",
mock.Mock(side_effect=[0, 1, 5, 10, 20, 35])))
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.utils.validate_key_cert",
lambda *x: None))
wsgi.CONF.cert_file = '/etc/ssl/cert'
wsgi.CONF.key_file = '/etc/ssl/key'
wsgi.CONF.ca_file = '/etc/ssl/ca_cert'
wsgi.CONF.tcp_keepidle = 600
def test_correct_configure_socket(self):
mock_socket = mock.Mock()
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
mock_socket))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
lambda *x, **y: mock_socket))
server = wsgi.Server()
server.default_port = 1234
server.configure_socket()
self.assertIn(mock.call.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1), mock_socket.mock_calls)
self.assertIn(mock.call.setsockopt(
socket.SOL_SOCKET,
socket.SO_KEEPALIVE,
1), mock_socket.mock_calls)
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertIn(mock.call().setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
wsgi.CONF.tcp_keepidle), mock_socket.mock_calls)
def test_get_socket_without_all_ssl_reqs(self):
wsgi.CONF.key_file = None
self.assertRaises(RuntimeError, wsgi.get_socket, 1234)
def test_get_socket_with_bind_problems(self):
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
mock.Mock(side_effect=(
[wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None]))))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
self.assertRaises(RuntimeError, wsgi.get_socket, 1234)
def test_get_socket_with_unexpected_socket_errno(self):
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM))))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234)
| apache-2.0 |
tijme/not-your-average-web-crawler | test/test_helpers_url_helper.py | 1 | 5609 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from nyawc.helpers.URLHelper import URLHelper
class TestUrlHelper(unittest.TestCase):
"""The TestUrlHelper class checks if the methods in the URLHelper work correctly."""
def test_make_absolute(self):
"""Check if the make absolute method works correctly."""
host = "https://example.ltd/current"
tests = [
("https://example.ltd/new.html", "new.html"),
("https://example.ltd/new", "new"),
("https://example.ltd/new1/new2", "new1/new2"),
("https://example.ltd/new1/new3", "/new1/new3"),
("https://example.ltd/current?a=a", "?a=a")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_make_absolute_with_base(self):
"""Check if the make absolute method works correctly in interpreted with a base URL."""
host = "https://example.ltd/base/"
tests = [
("https://example.ltd/base/new.html", "new.html"),
("https://example.ltd/base/new", "new"),
("https://example.ltd/base/new1/new2", "new1/new2"),
("https://example.ltd/new1/new2", "/new1/new2"),
("https://example.ltd/base/?a=a", "?a=a")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_make_absolute_folder_traversal(self):
"""Ensure folder traversal works correclty."""
host = "https://example.ltd/dir1/dir2/dir3"
tests = [
("https://example.ltd/dir1/dir2", "../"),
("https://example.ltd/dir1", "../../"),
("https://example.ltd", "../../../"),
("https://example.ltd", "../../../../"),
("https://example.ltd", "../../../../../")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_get_protocol(self):
"""Check if the get protocol method works correctly."""
tests = [
("", "domain.tld"),
("http", "http://domain.tld"),
("arbitrary", "arbitrary://omain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_protocol(test[1]), test[0])
def test_get_subdomain(self):
"""Check if the get subdomain method works correctly."""
tests = [
("", ""),
("", "http://"),
("", "http://domain"),
("", "http://domain.tld"),
("sub1", "http://sub1.domain.tld"),
("sub2.sub1", "http://sub2.sub1.domain.tld"),
("sub3.sub2.sub1", "http://sub3.sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_subdomain(test[1]), test[0])
def test_get_hostname(self):
"""Check if the get hostname method works correctly."""
tests = [
("", ""),
("", "http://"),
("domain", "http://domain"),
("domain", "http://domain.tld"),
("domain", "http://sub1.domain.tld"),
("domain", "http://sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_hostname(test[1]), test[0])
def test_get_tld(self):
"""Check if the get tld method works correctly."""
tests = [
("", ""),
("", "http://"),
("", "http://domain"),
("tld", "http://domain.tld"),
("tld", "http://sub1.domain.tld"),
("tld", "http://sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_tld(test[1]), test[0])
def test_get_ordered_params(self):
"""Check if the get ordered params method works correctly."""
val1 = URLHelper.get_ordered_params("http://example.tld?a=a&c=c&b=b&d=d")
val2 = URLHelper.get_ordered_params("http://sub.domain.ltd?c=c&b=b&a=a&d=d")
self.assertEqual(val1, val2)
def test_append_with_data_encoded_and_decoded(self):
"""Make sure values do not get decoded or encoded."""
val1 = URLHelper.append_with_data("http://example.tld/", {"val": "{{aaaa}}"})
val2 = URLHelper.append_with_data("http://example.tld/", {"val": "%7B%7Baaaa%7D%7D"})
self.assertEqual(val1, "http://example.tld/?val={{aaaa}}")
self.assertEqual(val2, "http://example.tld/?val=%7B%7Baaaa%7D%7D")
| mit |
vishnugonela/boto | boto/kms/exceptions.py | 135 | 1523 | # The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidGrantTokenException(BotoServerError):
pass
class DisabledException(BotoServerError):
pass
class LimitExceededException(BotoServerError):
pass
class DependencyTimeoutException(BotoServerError):
pass
class InvalidMarkerException(BotoServerError):
pass
class AlreadyExistsException(BotoServerError):
pass
class InvalidCiphertextException(BotoServerError):
pass
class KeyUnavailableException(BotoServerError):
pass
class InvalidAliasNameException(BotoServerError):
pass
class UnsupportedOperationException(BotoServerError):
pass
class InvalidArnException(BotoServerError):
pass
class KMSInternalException(BotoServerError):
pass
class InvalidKeyUsageException(BotoServerError):
pass
class MalformedPolicyDocumentException(BotoServerError):
pass
class NotFoundException(BotoServerError):
pass
| mit |
SeedScientific/polio | source_data/migrations/0053_auto__chg_field_sourcedatapoint_error_msg.py | 1 | 71250 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SourceDataPoint.error_msg'
db.alter_column(u'source_data_sourcedatapoint', 'error_msg', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'SourceDataPoint.error_msg'
db.alter_column(u'source_data_sourcedatapoint', 'error_msg', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.campaign': {
'Meta': {'object_name': 'Campaign', 'db_table': "'campaign'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'get_full_name'"}),
'start_date': ('django.db.models.fields.DateField', [], {'unique': 'True'})
},
u'datapoints.indicator': {
'Meta': {'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '55', 'populate_from': "'name'", 'unique_with': '()'})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'unique_together': "(('source', 'source_guid'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '10', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '13', 'decimal_places': '10', 'blank': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'settlement_code': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '55', 'populate_from': "'full_name'"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
'source_data.activityreport': {
'Meta': {'object_name': 'ActivityReport'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_hh_pending_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_local_leadership_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_hh_affected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_pro_opv_cd': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_caregiver_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_husband_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_positive': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_sett': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipds_community_leader_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_reported': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_children': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_hh': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_other_issue': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.campaignmap': {
'Meta': {'object_name': 'CampaignMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'source_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceCampaign']", 'unique': 'True'})
},
'source_data.clustersupervisor': {
'Meta': {'object_name': 'ClusterSupervisor'},
'coord_rfp_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_smwg_meetings': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_vcm_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'end_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fund_transparency': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_conducted': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_planned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_endorsed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_implementation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_socialdata': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_special_pop': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_workplan_aligned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_lgac': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ri_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisee_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_birthtracking': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.document': {
'Meta': {'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'source_data.etljob': {
'Meta': {'object_name': 'EtlJob'},
'date_attempted': ('django.db.models.fields.DateTimeField', [], {}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'success_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
'source_data.healthcamp': {
'Meta': {'object_name': 'HealthCamp'},
'agencyname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'formhub_uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_photo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'megaphone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.indicatormap': {
'Meta': {'object_name': 'IndicatorMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'source_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceIndicator']", 'unique': 'True'})
},
'source_data.knowthepeople': {
'Meta': {'object_name': 'KnowThePeople'},
'brothers': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'citiesvisited': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofpax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'prefferedcity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sisters': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.paxlistreporttraining': {
'Meta': {'object_name': 'PaxListReportTraining'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'emailaddr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofparticipant': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.phoneinventory': {
'Meta': {'object_name': 'PhoneInventory'},
'asset_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'colour_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone_no': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmbirthrecord': {
'Meta': {'object_name': 'PracticeVCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsettcoordinates': {
'Meta': {'object_name': 'PracticeVCMSettCoordinates'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsummary': {
'Meta': {'object_name': 'PracticeVCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'source_data.regionmap': {
'Meta': {'object_name': 'RegionMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']", 'unique': 'True'})
},
u'source_data.sourcecampaign': {
'Meta': {'unique_together': "(('source', 'campaign_string'),)", 'object_name': 'SourceCampaign'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
},
u'source_data.sourceindicator': {
'Meta': {'unique_together': "(('source', 'indicator_string'),)", 'object_name': 'SourceIndicator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.sourceregion': {
'Meta': {'object_name': 'SourceRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'lon': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlement_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmbirthrecord': {
'Meta': {'object_name': 'VCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsettlement': {
'Meta': {'object_name': 'VCMSettlement'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummary': {
'Meta': {'object_name': 'VCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummarynew': {
'Meta': {'object_name': 'VCMSummaryNew'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax6': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax7': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax8': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax9': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_display_msd3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_tot_missed_check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_12_59months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_2_11months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_census': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_missed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_newborns': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax12_59mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax2_11mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vaxnewborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vwsregister': {
'Meta': {'object_name': 'VWSRegister'},
'acceptphoneresponsibility': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'datephonecollected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'personal_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wardcode': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['source_data'] | agpl-3.0 |
RandyMoore/mySiteDjango | my_site_django/weblog/models.py | 1 | 3693 | from django.db import models
from django.db.models.fields import CharField
from django.utils.safestring import mark_safe
from markdown import markdown
from pygments import highlight
from pygments.formatters import get_formatter_by_name
from pygments.lexers import get_lexer_by_name
from wagtail.core import blocks
from wagtail.core.blocks import BlockQuoteBlock, RawHTMLBlock
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.search import index
# Custom blocks for StreamField. From https://gist.github.com/frankwiles/74a882f16704db9caa27
# See also http://docs.wagtail.io/en/v1.9/releases/1.6.html#render-and-render-basic-methods-on-streamfield-blocks-now-accept-a-context-keyword-argument
class CodeBlock(blocks.StructBlock):
"""
Code Highlighting Block
"""
LANGUAGE_CHOICES = (
('python', 'Python'),
('bash', 'Bash/Shell'),
('html', 'HTML'),
('css', 'CSS'),
('scss', 'SCSS'),
)
language = blocks.ChoiceBlock(choices=LANGUAGE_CHOICES)
code = blocks.TextBlock()
class Meta:
icon = 'code'
def render(self, value, context=None):
src = value['code'].strip('\n')
lang = value['language']
lexer = get_lexer_by_name(lang)
formatter = get_formatter_by_name(
'html',
linenos=None,
cssclass='codehilite',
style='default',
noclasses=False,
)
return mark_safe(highlight(src, lexer, formatter))
class MarkDownBlock(blocks.TextBlock):
""" MarkDown Block """
class Meta:
icon = 'code'
def render_basic(self, value, context=None):
md = markdown(
value,
[
'markdown.extensions.fenced_code',
'codehilite',
],
)
return mark_safe(md)
# Page Models
class BlogIndexPage(Page):
subheading = CharField(max_length=255)
content_panels = Page.content_panels + [
FieldPanel('subheading', classname="full"),
]
@property
def blogs(self):
blogs = WeblogPage.objects.live().descendant_of(self)
blogs = blogs.order_by('-date')
return blogs
def get_context(self, request):
blogs = self.blogs
context = super(BlogIndexPage, self).get_context(request)
context['blogs'] = blogs
context['title'] = self.title
context['subheading'] = self.subheading
return context
class WeblogPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('html', RawHTMLBlock()),
('block_quote', BlockQuoteBlock()),
('embed', EmbedBlock()),
('code', CodeBlock()),
('markdown', MarkDownBlock()),
])
subheading = CharField(max_length=255)
date = models.DateField("Post date")
search_fields = Page.search_fields + [
index.SearchField('body'),
index.FilterField('date'),
]
content_panels = Page.content_panels + [
FieldPanel('subheading', classname="full"),
FieldPanel('date'),
StreamFieldPanel('body', classname="full"),
]
def get_context(self, request):
context = super(WeblogPage, self).get_context(request)
context['title'] = self.title
context['subheading'] = self.subheading
context['body'] = self.body
return context
| gpl-3.0 |
sfam/home-assistant | homeassistant/components/sensor/torque.py | 6 | 3146 | """
homeassistant.components.sensor.torque
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Get data from the Torque OBD application.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.torque/
"""
import re
from homeassistant.const import HTTP_OK
from homeassistant.helpers.entity import Entity
DOMAIN = 'torque'
DEPENDENCIES = ['http']
SENSOR_EMAIL_FIELD = 'eml'
DEFAULT_NAME = 'vehicle'
ENTITY_NAME_FORMAT = '{0} {1}'
API_PATH = '/api/torque'
SENSOR_NAME_KEY = r'userFullName(\w+)'
SENSOR_UNIT_KEY = r'userUnit(\w+)'
SENSOR_VALUE_KEY = r'k(\w+)'
NAME_KEY = re.compile(SENSOR_NAME_KEY)
UNIT_KEY = re.compile(SENSOR_UNIT_KEY)
VALUE_KEY = re.compile(SENSOR_VALUE_KEY)
def decode(value):
""" Double-decode required. """
return value.encode('raw_unicode_escape').decode('utf-8')
def convert_pid(value):
""" Convert pid from hex string to integer. """
return int(value, 16)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Set up Torque platform. """
vehicle = config.get('name', DEFAULT_NAME)
email = config.get('email', None)
sensors = {}
def _receive_data(handler, path_match, data):
""" Received data from Torque. """
handler.send_response(HTTP_OK)
handler.end_headers()
if email is not None and email != data[SENSOR_EMAIL_FIELD]:
return
names = {}
units = {}
for key in data:
is_name = NAME_KEY.match(key)
is_unit = UNIT_KEY.match(key)
is_value = VALUE_KEY.match(key)
if is_name:
pid = convert_pid(is_name.group(1))
names[pid] = decode(data[key])
elif is_unit:
pid = convert_pid(is_unit.group(1))
units[pid] = decode(data[key])
elif is_value:
pid = convert_pid(is_value.group(1))
if pid in sensors:
sensors[pid].on_update(data[key])
for pid in names:
if pid not in sensors:
sensors[pid] = TorqueSensor(
ENTITY_NAME_FORMAT.format(vehicle, names[pid]),
units.get(pid, None))
add_devices([sensors[pid]])
hass.http.register_path('GET', API_PATH, _receive_data)
return True
class TorqueSensor(Entity):
""" Represents a Torque sensor. """
def __init__(self, name, unit):
self._name = name
self._unit = unit
self._state = None
@property
def name(self):
""" Returns the name of the sensor. """
return self._name
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return self._unit
@property
def state(self):
""" State of the sensor. """
return self._state
@property
def icon(self):
""" Sensor default icon. """
return 'mdi:car'
def on_update(self, value):
""" Receive an update. """
self._state = value
self.update_ha_state()
| mit |
2014c2g4/w16b_test | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/locals.py | 603 | 1141 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""Set of functions from PyGame that are handy to have in
the local namespace for your module"""
from pygame.constants import *
from pygame.rect import Rect
import pygame.color as color
Color = color.Color
| gpl-3.0 |
brianchoate/chef2ldif | setup.py | 1 | 1553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='chef2ldif',
version='0.0.1',
description="Small tool to read in user data_bags and generate LDIF records.",
long_description=readme + '\n\n' + history,
author="Brian Choate",
author_email='brian.choate@gmail.com',
url='https://github.com/brianchoate/chef2ldif',
packages=[
'chef2ldif',
],
package_dir={'chef2ldif':
'chef2ldif'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='chef2ldif',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| bsd-3-clause |
yamahata/tacker | tacker/openstack/common/db/sqlalchemy/utils.py | 2 | 24284 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from tacker.openstack.common import context as request_context
from tacker.openstack.common.db.sqlalchemy import models
from tacker.openstack.common.gettextutils import _, _LI, _LW
from tacker.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from tacker.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
from migrate.changeset import UniqueConstraint
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = sqlalchemy.sql.select(
columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
| apache-2.0 |
numenta/nupic.vision | src/nupic/vision/data/OCR/characters/parseJPG.py | 3 | 7772 | #!/usr/bin/python2
'''
This script parses JPEG images of text documents to isolate and save images
of individual characters. The size of these output images in pixels is
specified by the parameters desired_height and desired_width.
The JPEG images are converted to grey scale using a parameter called
luminance_threshold to distinguish between light and dark pixels. Lines of
text are found by searching for rows that contain dark pixels, and
characters are found by searching for columns that contain dark pixels. Once
a character is found it is padded with blank rows and columns to obtain the
desired size. The images are saved using the filenames given in the XML file.
'''
# Set desired output image height and width in pixels
desired_height = 32
desired_width = 32
DEBUG = False
import matplotlib.pyplot as plot
import numpy as np
import operator
import sys
import re
import os
from PIL import Image
from xml.dom import minidom
jpg_list = [ 'characters-0.jpg', 'characters-1.jpg', 'characters-2.jpg',
'characters-3.jpg', 'characters-4.jpg', 'characters-5.jpg',
'characters-6.jpg', 'characters-7.jpg', 'characters-8.jpg',
'characters-9.jpg', 'characters-10.jpg', 'characters-11.jpg',
'characters-12.jpg', 'characters-13.jpg', 'characters-14.jpg',
'characters-15.jpg', 'characters-16.jpg', 'characters-17.jpg',
'characters-18.jpg', 'characters-19.jpg' ]
#jpg_list = [ 'debug_doc.jpg' ]
# Parse XML file for filenames to use when saving each character image
xmldoc = minidom.parse('characters.xml')
#xmldoc = minidom.parse('debug_doc.xml')
filelist = xmldoc.getElementsByTagName('image')
print len(filelist)
#for i in range(145):
#print filelist[62*i].attributes['file'].value
# this counter gets used to select file names from an xml file
output_files_saved = 0
for jpg in jpg_list:
print jpg
im = Image.open(jpg)
width, length = im.size
if DEBUG:
print "image size: ", im.size
print "image mode: ", im.mode
print im.size[1],im.size[0]
# read pixel data from image into a numpy array
if im.mode == 'L':
pixels = np.array(list(im.getdata())).reshape(im.size[1],im.size[0])
elif im.mode == 'RGB':
pixels = np.array(list(im.convert('L').getdata())).reshape(im.size[1],
im.size[0])
#im.show()
##############################################################################
# Removed all logic for determining the value to use to distinguish between
# light and dark pixels because this is a non-trivial challenge of its own and
# I want to get to generating a data set for OCR which I can do much faster by
# choosing the threshold manually.
##############################################################################
luminance_threshold = 100
##############################################################################
# parse document for lines of text
##############################################################################
row = 0
while row < length:
# Find the first row of pixels in next line of text by ignoring blank rows
# of pixels which will have a non-zero product since white pixels have a
# luminance value of 255
#row_data = pixels[row * width : row * width + width]
while (row < length and pixels[row,:].min() > luminance_threshold):
row += 1
first_row = row
if DEBUG:
print "the first row of pixels in the line of text is ", first_row
# Find the last row of pixels in this line of text by counting rows with
# dark pixels. These rows have a product of zero since the luminance value
# of all dark pixels was set to zero
while (row < length and pixels[row:row + 2,:].min() < luminance_threshold):
row += 1
last_row = row
#if row < length:
#last_row = row + 2 # this is a hack for Cochin font Q
#row += 5 # this is a hack for Cochin font Q
if DEBUG:
print "the last row of pixels in the line of text is ", last_row
##############################################################################
# parse line of text for characters
##############################################################################
if first_row < last_row:
col = 0
while col < width:
# find first column of pixels in the next character by ignoring blank
# cols of pixels
while col < width and pixels[first_row:last_row,col].min() > luminance_threshold:
col += 1
first_col = col
# find last column of pixels in the next character by counting columns
# with dark pixels
while col < width and \
pixels[first_row:last_row,col:col + 5].min() < luminance_threshold:
col += 1
last_col = col
##############################################################################
# remove blank rows from the top and bottom of characters
##############################################################################
if first_col < last_col:
# remove blank rows from the top of the character
r = first_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r + 1;
char_first_row = r;
# remove blank rows from the bottom of the character
r = last_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r - 1;
char_last_row = r + 1;
if DEBUG:
# isolate an image of this character
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
print "Character size after whitespace removal", character.size
print first_col, first_row, last_col, last_row
#character.show()
# pad character width out to desired_width
char_width = last_col - first_col
if char_width > desired_width:
print "Character is wider than ", desired_width
else:
# add the same number of blank columns to the left and right
first_col = first_col - (desired_width - char_width) / 2
last_col = last_col + (desired_width - char_width) / 2
# if the difference was odd we'll be short one column
char_width = last_col - first_col
if char_width < desired_width:
last_col = last_col + 1
# pad character height out to desired_height
char_height = char_last_row - char_first_row
if char_height > desired_height:
print "Character is taller than ", desired_height
else:
# add the same number of blank rows to the left and right
char_first_row = char_first_row - (desired_height - char_height) / 2
char_last_row = char_last_row + (desired_height - char_height) / 2
# if the difference was odd we'll be short one row
char_height = char_last_row - char_first_row
if char_height < desired_height:
char_last_row = char_last_row + 1
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
if DEBUG:
print "Character size after padding", character.size
print first_col, char_first_row, last_col, char_last_row
#character.show()
#garbage = raw_input()
# save image to filename specified in ground truth file
filename = filelist[output_files_saved].attributes['file'].value
directory = filename.split('/')[0]
if not os.path.exists(directory):
os.makedirs(directory)
character.save(filename, "JPEG", quality=80)
output_files_saved = output_files_saved + 1
print output_files_saved
| agpl-3.0 |
citrix-openstack-build/neutron-vpnaas | neutron_vpnaas/db/vpn/vpn_db.py | 1 | 31697 | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.utils import excutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as n_constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import vpnaas
from neutron.i18n import _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.plugins.common import utils
from neutron_vpnaas.db.vpn import vpn_validator
LOG = logging.getLogger(__name__)
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def _get_validator(self):
"""Obtain validator to use for attribute validation.
Subclasses may override this with a different valdiator, as needed.
Note: some UTs will directly create a VPNPluginDb object and then
call its methods, instead of creating a VPNDriverPlugin, which
will have a service driver associated that will provide a
validator object. As a result, we use the reference validator here.
"""
return vpn_validator.VpnReferenceValidator()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def _get_subnet_ip_version(self, context, vpnservice_id):
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
subnet = vpn_service_db.subnet['cidr']
ip_version = netaddr.IPNetwork(subnet).version
return ip_version
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
validator = self._get_validator()
validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
vpnservice_id = ipsec_sitecon['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.validate_ipsec_site_connection(context,
ipsec_sitecon,
ip_version)
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=vpnservice_id,
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
validator = self._get_validator()
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
vpnservice_id = ipsec_site_conn_db['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.assign_sensible_ipsec_sitecon_defaults(
ipsec_sitecon, ipsec_site_conn_db)
validator.validate_ipsec_site_connection(
context,
ipsec_sitecon,
ip_version)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in ipsec_sitecon:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(ipsec_sitecon["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del ipsec_sitecon["peer_cidrs"]
if ipsec_sitecon:
ipsec_site_conn_db.update(ipsec_sitecon)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_vpnservice(context, vpns)
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpnaas.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
def check_subnet_in_use(self, context, subnet_id):
with context.session.begin(subtransactions=True):
vpnservices = context.session.query(VPNService).filter_by(
subnet_id=subnet_id
).first()
if vpnservices:
raise vpnaas.SubnetInUseByVPNService(
subnet_id=subnet_id,
vpnservice_id=vpnservices['id'])
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent update any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warn(_LW('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
| apache-2.0 |
dgoedkoop/QGIS | python/plugins/processing/tools/vector.py | 3 | 3781 | # -*- coding: utf-8 -*-
"""
***************************************************************************
vector.py
---------------------
Date : February 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'February 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (NULL,
QgsFeatureRequest)
def resolveFieldIndex(source, attr):
"""This method takes an object and returns the index field it
refers to in a layer. If the passed object is an integer, it
returns the same integer value. If the passed value is not an
integer, it returns the field whose name is the string
representation of the passed object.
Ir raises an exception if the int value is larger than the number
of fields, or if the passed object does not correspond to any
field.
"""
if isinstance(attr, int):
return attr
else:
index = source.fields().lookupField(attr)
if index == -1:
raise ValueError('Wrong field name')
return index
def values(source, *attributes):
"""Returns the values in the attributes table of a feature source,
for the passed fields.
Field can be passed as field names or as zero-based field indices.
Returns a dict of lists, with the passed field identifiers as keys.
It considers the existing selection.
It assummes fields are numeric or contain values that can be parsed
to a number.
"""
ret = {}
indices = []
attr_keys = {}
for attr in attributes:
index = resolveFieldIndex(source, attr)
indices.append(index)
attr_keys[index] = attr
# use an optimised feature request
request = QgsFeatureRequest().setSubsetOfAttributes(indices).setFlags(QgsFeatureRequest.NoGeometry)
for feature in source.getFeatures(request):
for i in indices:
# convert attribute value to number
try:
v = float(feature.attributes()[i])
except:
v = None
k = attr_keys[i]
if k in ret:
ret[k].append(v)
else:
ret[k] = [v]
return ret
def convert_nulls(values, replacement=None):
"""
Converts NULL items in a list of values to a replacement value (usually None)
:param values: list of values
:param replacement: value to use in place of NULL
:return: converted list
"""
return [i if i != NULL else replacement for i in values]
def checkMinDistance(point, index, distance, points):
"""Check if distance from given point to all other points is greater
than given value.
"""
if distance == 0:
return True
neighbors = index.nearestNeighbor(point, 1)
if len(neighbors) == 0:
return True
if neighbors[0] in points:
np = points[neighbors[0]]
if np.sqrDist(point) < (distance * distance):
return False
return True
| gpl-2.0 |
LiveChains/Live-Coin | contrib/spendfrom/spendfrom.py | 1 | 10087 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a livecoind or Livecoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting LVC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_livecoin.config(dbdir):
"""Read the livecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "livecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 113144 if testnet else 13144
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the livecoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(livecoind):
info = livecoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
livecoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = livecoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(livecoind):
address_summary = dict()
address_to_account = dict()
for info in livecoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = livecoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = livecoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(livecoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(livecoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f LVC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to livecoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = livecoind.createrawtransaction(inputs, outputs)
signed_rawtx = livecoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(livecoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = livecoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(livecoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = livecoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(livecoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of livecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_livecoin.config(options.datadir)
if options.testnet: config['testnet'] = True
livecoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(livecoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(livecoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(livecoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(livecoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = livecoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
petteyg/intellij-community | python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py | 320 | 1828 | """
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
| apache-2.0 |
peak6/st2 | st2common/st2common/models/system/actionchain.py | 8 | 5983 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import string
from st2common.util import schema as util_schema
from st2common.models.api.notification import NotificationSubSchemaAPI
class Node(object):
schema = {
"title": "Node",
"description": "Node of an ActionChain.",
"type": "object",
"properties": {
"name": {
"description": "The name of this node.",
"type": "string",
"required": True
},
"ref": {
"type": "string",
"description": "Ref of the action to be executed.",
"required": True
},
"params": {
"type": "object",
"description": ("Parameter for the execution (old name, here for backward "
"compatibility reasons)."),
"default": {}
},
"parameters": {
"type": "object",
"description": "Parameter for the execution.",
"default": {}
},
"on-success": {
"type": "string",
"description": "Name of the node to invoke on successful completion of action"
" executed for this node.",
"default": ""
},
"on-failure": {
"type": "string",
"description": "Name of the node to invoke on failure of action executed for this"
" node.",
"default": ""
},
"publish": {
"description": "The variables to publish from the result. Should be of the form"
" name.foo. o1: {{node_name.foo}} will result in creation of a"
" variable o1 which is now available for reference through"
" remainder of the chain as a global variable.",
"type": "object",
"patternProperties": {
"^\w+$": {}
}
},
"notify": {
"description": "Notification settings for action.",
"type": "object",
"properties": {
"on-complete": NotificationSubSchemaAPI,
"on-failure": NotificationSubSchemaAPI,
"on-success": NotificationSubSchemaAPI
},
"additionalProperties": False
}
},
"additionalProperties": False
}
def __init__(self, **kw):
for prop in six.iterkeys(self.schema.get('properties', [])):
value = kw.get(prop, None)
# having '-' in the property name lead to challenges in referencing the property.
# At hindsight the schema property should've been on_success rather than on-success.
prop = string.replace(prop, '-', '_')
setattr(self, prop, value)
def validate(self):
params = getattr(self, 'params', {})
parameters = getattr(self, 'parameters', {})
if params and parameters:
msg = ('Either "params" or "parameters" attribute needs to be provided, but not '
'both')
raise ValueError(msg)
return self
def get_parameters(self):
# Note: "params" is old deprecated attribute which will be removed in a future release
params = getattr(self, 'params', {})
parameters = getattr(self, 'parameters', {})
return parameters or params
def __repr__(self):
return ('<Node name=%s, ref=%s, on-success=%s, on-failure=%s>' %
(self.name, self.ref, self.on_success, self.on_failure))
class ActionChain(object):
schema = {
"title": "ActionChain",
"description": "A chain of sequentially executed actions.",
"type": "object",
"properties": {
"chain": {
"description": "The chain.",
"type": "array",
"items": [Node.schema],
"required": True
},
"default": {
"type": "string",
"description": "name of the action to be executed."
},
"vars": {
"description": "",
"type": "object",
"patternProperties": {
"^\w+$": {}
}
}
},
"additionalProperties": False
}
def __init__(self, **kw):
util_schema.validate(instance=kw, schema=self.schema, cls=util_schema.CustomValidator,
use_default=False, allow_default_none=True)
for prop in six.iterkeys(self.schema.get('properties', [])):
value = kw.get(prop, None)
# special handling for chain property to create the Node object
if prop == 'chain':
nodes = []
for node in value:
ac_node = Node(**node)
ac_node.validate()
nodes.append(ac_node)
value = nodes
setattr(self, prop, value)
| apache-2.0 |
podemos-info/odoo | openerp/tests/addons/test_exceptions/models.py | 66 | 1289 | # -*- coding: utf-8 -*-
import openerp
class m(openerp.osv.osv.Model):
""" This model exposes a few methods that will raise the different
exceptions that must be handled by the server (and its RPC layer)
and the clients.
"""
_name = 'test.exceptions.model'
def generate_except_osv(self, cr, uid, ids, context=None):
# title is ignored in the new (6.1) exceptions
raise openerp.osv.osv.except_osv('title', 'description')
def generate_except_orm(self, cr, uid, ids, context=None):
# title is ignored in the new (6.1) exceptions
raise openerp.osv.orm.except_orm('title', 'description')
def generate_warning(self, cr, uid, ids, context=None):
raise openerp.exceptions.Warning('description')
def generate_access_denied(self, cr, uid, ids, context=None):
raise openerp.exceptions.AccessDenied()
def generate_access_error(self, cr, uid, ids, context=None):
raise openerp.exceptions.AccessError('description')
def generate_exc_access_denied(self, cr, uid, ids, context=None):
raise Exception('AccessDenied')
def generate_undefined(self, cr, uid, ids, context=None):
self.surely_undefined_sumbol
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lmorchard/django-allauth | allauth/socialaccount/providers/xing/provider.py | 68 | 1229 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
class XingAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('permalink')
def get_avatar_url(self):
return self.account.extra_data.get(
'photo_urls', {}).get('large')
def to_str(self):
dflt = super(XingAccount, self).to_str()
first_name = self.account.extra_data.get('first_name', '')
last_name = self.account.extra_data.get('last_name', '')
name = ' '.join([first_name, last_name]).strip()
return name or dflt
class XingProvider(OAuthProvider):
id = 'xing'
name = 'Xing'
package = 'allauth.socialaccount.providers.xing'
account_class = XingAccount
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(email=data.get('active_email'),
username=data.get('page_name'),
first_name=data.get('first_name'),
last_name=data.get('last_name'))
providers.registry.register(XingProvider)
| mit |
jhseu/tensorflow | tensorflow/python/feature_column/dense_features_v2.py | 7 | 3937 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.feature_column import dense_features
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.DenseFeatures', v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords", 10K),
dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self,
feature_columns,
trainable=True,
name=None,
**kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super(DenseFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs)
self._state_manager = fc._StateManagerImplV2(self, self.trainable) # pylint: disable=protected-access
def build(self, _):
for column in self._feature_columns:
with ops.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
# pylint: disable=protected-access
super(fc._BaseFeaturesLayer, self).build(None) # pylint: disable=bad-super-call
| apache-2.0 |
Hiestaa/RLViz | src/problems/base.py | 1 | 6050 | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import gym
from parametizable import Parametizable
from consts import ParamsTypes, Spaces
class ProblemException(Exception):
pass
class BaseProblem(Parametizable):
"""
Mostly a wrapper around gym's environment, but also provide additional
parameters and statistics to play with.
The class is setup for a default behaviour on any gym's environment. When
subclassing, part of the job should already be done by setting up the
right parameters. Additional specific behavior can be obtained by overriding
the functions but care should be taken to call the parent's corresponding
method using `super(<Class>, self)`
"""
# These will be or-ed at each step to know whether the environment
# considers the episode terminated
EPISODE_TERMINATION_CRITERIA = [
lambda self, **kwargs: self._done,
lambda self, stepI, **kwargs: stepI >= self.maxSteps
]
PARAMS = {
'maxSteps': ParamsTypes.Number
}
PARAMS_DOMAIN = {
'maxSteps': {
'range': (-1, float('inf')),
'values': [100, 500, 1000]
},
}
PARAMS_DEFAULT = {
'maxSteps': 500
}
PARAMS_DESCRIPTION = {
'maxSteps': "Maximum number of steps per episode. Set to -1 to disable."
}
# Override to specify a Gym environment that should be loaded.
GYM_ENVIRONMENT_NAME = None
# Override to specify compatible algorithm
DOMAIN = {
'action': Spaces.Discrete,
'state': Spaces.Discrete
}
# optional: override to give a specific name to each action
# action space is assumed to be discrete and 1 dimensional.
# first action should be in first position, second action in second,
# and so on.
ACTION_NAMES = []
# optional: override to give a specific name to each dimension of
# the state space. List should be in the same order of the dimensions
# of the state space (dimension 1 in first position, etc...)
STATE_DIMENSION_NAMES = []
def __init__(self, **kwargs):
super(BaseProblem, self).__init__(**kwargs)
self._done = False
self._env = None
self.observationSpace = None
self.actionSpace = None
@property
def env(self):
return self._env
def terminate(self):
self._done = True
def episodeDone(self, stepI):
return any(
crit(self, stepI=stepI)
for crit in self.EPISODE_TERMINATION_CRITERIA)
def setup(self):
"""
Setup the environment - this shouldn't be done in the constructor to
enable override.
This asusmes the problem uses a gym environment. Override otherwise.
"""
logger.info("[%s] Problem setup" % self.__class__.__name__)
if self.GYM_ENVIRONMENT_NAME is None:
raise NotImplementedError()
self._env = gym.make(self.GYM_ENVIRONMENT_NAME)
self.observationSpace = self._env.observation_space
self.actionSpace = self._env.action_space
###
# Some helper function to retrieve information about the environment.
# These are pre-implemented for any gym environment, and should
# be overriden otherwise
###
def getStatesList(self):
"""
Returns the list of possible states.
Override this function if you're not defining a gym environment.
This function should only be called if the problem bears a discrete
state space.
"""
if self.env is None:
raise NotImplementedError()
if self.DOMAIN['state'] == Spaces.Discrete:
return range(self.env.action_space.n)
raise ProblemException("Continuous state space")
def getStatesDim(self):
"""
Return the number of dimension of the state space
"""
if self.env is None:
raise NotImplementedError()
return len(self.env.observation_space.low)
def getStatesBounds(self):
"""
Returns the max and min values each dimension can take.
These are returned as two tuples, `low` and `high`, where both
are a list of as many elements as there is dimension to the state space.
"""
if self.env is None:
raise NotImplementedError()
return (
self.env.observation_space.low,
self.env.observation_space.high)
def getActionsList(self):
"""
Returns the list of possible actions.
Override this function if you're not defining a gym environment.
This function should only be called if the problem bears a discrete
state space.
"""
if self.env is None:
raise NotImplementedError()
if self.DOMAIN['action'] == Spaces.Discrete:
return range(self.env.action_space.n)
raise NotImplementedError()
# Problem execution methods
def step(self, action):
"""
The agent take the given action and receives back the new state,
the reward, whether the episode is terminated and optionally
some additional debug information.
Override this function if you're not defining a gym environment.
"""
newObservation, reward, self._done, info = self._env.step(action)
return newObservation, reward, self._done, info
def reset(self):
"""
Reset the state of the environment for a new episode.
Override this function if you're not defining a gym environment.
"""
self._done = False
return self._env.reset()
def render(self, close=False):
"""
Render the environment (server-side)
Override this function if you're not defining a gym environment.
"""
return self._env.render(close=close)
def release(self):
"""
Release handles and memory if manual intervention is required.
"""
pass
| mit |
djangocon/symposion-2014 | symposion/cms/views.py | 7 | 3302 | from django.conf import settings
from django.db import transaction
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views import static
from django.contrib.auth.decorators import login_required
from .models import Page, File
from .forms import PageForm, FileUploadForm
def can_edit(page, user):
if page and page.is_community:
return True
else:
return user.has_perm("cms.change_page")
def can_upload(user):
if user.is_staff or user.is_superuser:
return True
return False
def page(request, path):
try:
page = Page.published.get(path=path)
except Page.DoesNotExist:
page = None
editable = can_edit(page, request.user)
if page is None:
if editable:
return redirect("cms_page_edit", path=path)
else:
raise Http404
return render(request, "cms/page_detail.html", {
"page": page,
"editable": editable,
})
@login_required
def page_edit(request, path):
try:
page = Page.published.get(path=path)
except Page.DoesNotExist:
page = None
if not can_edit(page, request.user):
raise Http404
if request.method == "POST":
form = PageForm(request.POST, instance=page)
if form.is_valid():
page = form.save(commit=False)
page.path = path
page.save()
return redirect(page)
else:
print form.errors
else:
form = PageForm(instance=page, initial={"path": path})
return render(request, "cms/page_edit.html", {
"path": path,
"form": form
})
def file_index(request):
if not can_upload(request.user):
raise Http404
ctx = {
"files": File.objects.all(),
}
return render(request, "cms/file_index.html", ctx)
def file_create(request):
if not can_upload(request.user):
raise Http404
if request.method == "POST":
form = FileUploadForm(request.POST, request.FILES)
if form.is_valid():
with transaction.commit_on_success():
kwargs = {
"file": form.cleaned_data["file"],
}
File.objects.create(**kwargs)
return redirect("file_index")
else:
form = FileUploadForm()
ctx = {
"form": form,
}
return render(request, "cms/file_create.html", ctx)
def file_download(request, pk, *args):
file = get_object_or_404(File, pk=pk)
if getattr(settings, "USE_X_ACCEL_REDIRECT", False):
response = HttpResponse()
response["X-Accel-Redirect"] = file.file.url
# delete content-type to allow Gondor to determine the filetype and
# we definitely don't want Django's default :-)
del response["content-type"]
else:
response = static.serve(request, file.file.name, document_root=settings.MEDIA_ROOT)
return response
def file_delete(request, pk):
if not can_upload(request.user):
raise Http404
file = get_object_or_404(File, pk=pk)
if request.method == "POST":
file.delete()
# @@@ message
return redirect("file_index")
| bsd-3-clause |
kosgroup/odoo | odoo/conf/deprecation.py | 20 | 1455 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Regroup variables for deprecated features.
To keep the OpenERP server backward compatible with older modules, some
additional code is needed throughout the core library. This module keeps
track of those specific measures by providing variables that can be unset
by the user to check if her code is future proof.
In a perfect world, all these variables are set to False, the corresponding
code removed, and thus these variables made unnecessary.
"""
# If True, the Python modules inside the openerp namespace are made available
# without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the
# same module.
# Introduced around 2011.02.
# Change to False around 2013.02.
open_openerp_namespace = False
# If True, openerp.netsvc.LocalService() can be used to lookup reports or to
# access openerp.workflow.
# Introduced around 2013.03.
# Among the related code:
# - The openerp.netsvc.LocalService() function.
# - The openerp.report.interface.report_int._reports dictionary.
# - The register attribute in openerp.report.interface.report_int (and in its
# - auto column in ir.actions.report.xml.
# inheriting classes).
allow_local_service = True
# Applies for the register attribute in openerp.report.interface.report_int.
# See comments for allow_local_service above.
# Introduced around 2013.03.
allow_report_int_registration = True
| gpl-3.0 |
knehez/edx-platform | lms/djangoapps/shoppingcart/management/tests/test_retire_order.py | 105 | 2638 | """Tests for the retire_order command"""
from tempfile import NamedTemporaryFile
from django.core.management import call_command
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from shoppingcart.models import Order, CertificateItem
from student.tests.factories import UserFactory
class TestRetireOrder(ModuleStoreTestCase):
"""Test the retire_order command"""
def setUp(self):
super(TestRetireOrder, self).setUp()
course = CourseFactory.create()
self.course_key = course.id
# set up test carts
self.cart, __ = self._create_cart()
self.paying, __ = self._create_cart()
self.paying.start_purchase()
self.already_defunct_cart, __ = self._create_cart()
self.already_defunct_cart.retire()
self.purchased, self.purchased_item = self._create_cart()
self.purchased.status = "purchased"
self.purchased.save()
self.purchased_item.status = "purchased"
self.purchased.save()
def test_retire_order(self):
"""Test the retire_order command"""
nonexistent_id = max(order.id for order in Order.objects.all()) + 1
order_ids = [
self.cart.id,
self.paying.id,
self.already_defunct_cart.id,
self.purchased.id,
nonexistent_id
]
self._create_tempfile_and_call_command(order_ids)
self.assertEqual(
Order.objects.get(id=self.cart.id).status, "defunct-cart"
)
self.assertEqual(
Order.objects.get(id=self.paying.id).status, "defunct-paying"
)
self.assertEqual(
Order.objects.get(id=self.already_defunct_cart.id).status,
"defunct-cart"
)
self.assertEqual(
Order.objects.get(id=self.purchased.id).status, "purchased"
)
def _create_tempfile_and_call_command(self, order_ids):
"""
Takes a list of order_ids, writes them to a tempfile, and then runs the
"retire_order" command on the tempfile
"""
with NamedTemporaryFile() as temp:
temp.write("\n".join(str(order_id) for order_id in order_ids))
temp.seek(0)
call_command('retire_order', temp.name)
def _create_cart(self):
"""Creates a cart and adds a CertificateItem to it"""
cart = Order.get_cart_for_user(UserFactory.create())
item = CertificateItem.add_to_order(
cart, self.course_key, 10, 'honor', currency='usd'
)
return cart, item
| agpl-3.0 |
JNRowe/cupage | setup.py | 1 | 2052 | #! /usr/bin/env python3
"""setup.py - Setuptools tasks and config for cupage."""
# Copyright © 2009-2014 James Rowe <jnrowe@gmail.com>
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from typing import List
from setuptools import setup
from setuptools.command.test import test
class PytestTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = [
'tests/',
]
self.test_suite = True
def run_tests(self):
from sys import exit
from pytest import main
exit(main(self.test_args))
def parse_requires(file: str) -> List[str]:
deps = []
with open(f'extra/{file}') as req_file:
entries = [s.split('#')[0].strip() for s in req_file.readlines()]
for dep in entries:
if not dep or dep.startswith('#'):
continue
elif dep.startswith('-r '):
deps.extend(parse_requires(dep.split()[1]))
continue
deps.append(dep)
return deps
# Note: We can't use setuptool’s requirements support as it only a list value,
# and doesn’t support pip’s inclusion mechanism
install_requires = parse_requires('requirements.txt')
tests_require = parse_requires('requirements-test.txt')
if __name__ == '__main__':
setup(
install_requires=install_requires,
tests_require=tests_require,
cmdclass={'test': PytestTest},
)
| gpl-3.0 |
nwjs/chromium.src | chrome/test/mini_installer/chrome_helper.py | 4 | 4785 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common helper module for working with Chrome's processes and windows."""
import logging
import os
import psutil
import re
import win32gui
import win32process
def get_process_name(p):
"""A wrapper to return a psutil.Process name."""
# Process.name was a property prior to version 2.0.
if psutil.version_info[0] < 2:
return p.name
# But it's a function since 2.0.
return p.name()
def get_process_exe(p):
"""A wrapper to return a psutil.Process exe."""
# Process.exe was a property prior to version 2.0.
if psutil.version_info[0] < 2:
return p.exe
# But it's a function since 2.0.
return p.exe()
def get_process_ppid(p):
"""A wrapper to return a psutil.Process ppid."""
# Process.ppid was a property prior to version 2.0.
if psutil.version_info[0] < 2:
return p.ppid
# But it's a function since 2.0.
return p.ppid()
def GetProcessIDAndPathPairs():
"""Returns a list of 2-tuples of (process id, process path).
"""
process_id_and_path_pairs = []
for process in psutil.process_iter():
try:
process_id_and_path_pairs.append((process.pid, get_process_exe(process)))
except psutil.Error:
# It's normal that some processes are not accessible.
pass
return process_id_and_path_pairs
def GetProcessIDs(process_path):
"""Returns a list of IDs of processes whose path is |process_path|.
Args:
process_path: The path to the process.
Returns:
A list of process IDs.
"""
return [pid for (pid, path) in GetProcessIDAndPathPairs() if
path == process_path]
def WaitForChromeExit(chrome_path):
"""Waits for all |chrome_path| processes to exit.
Args:
chrome_path: The path to the chrome.exe on which to wait.
"""
def GetChromeProcesses(chrome_path):
"""Returns a dict of all |chrome_path| processes indexed by pid."""
chrome_processes = dict()
for process in psutil.process_iter():
try:
if get_process_exe(process) == chrome_path:
chrome_processes[process.pid] = process
logging.info('Found chrome process %s' % get_process_exe(process))
elif get_process_name(process) == os.path.basename(chrome_path):
raise Exception(
'Found other chrome process %s' % get_process_exe(process))
except psutil.Error:
pass
return chrome_processes
def GetBrowserProcess(chrome_processes):
"""Returns a psutil.Process for the browser process in |chrome_processes|.
"""
# Find the one whose parent isn't a chrome.exe process.
for process in chrome_processes.itervalues():
try:
if get_process_ppid(process) not in chrome_processes:
return process
except psutil.Error:
pass
return None
chrome_processes = GetChromeProcesses(chrome_path)
while chrome_processes:
# Prefer waiting on the browser process.
process = GetBrowserProcess(chrome_processes)
if not process:
# Pick any process to wait on if no top-level parent was found.
process = next(chrome_processes.itervalues())
if process.is_running():
logging.info(
'Waiting on %s for %s %s processes to exit' %
(str(process), len(chrome_processes), get_process_exe(process)))
process.wait()
# Check for stragglers and keep waiting until all are gone.
chrome_processes = GetChromeProcesses(chrome_path)
def GetWindowHandles(process_ids):
"""Returns a list of handles of windows owned by processes in |process_ids|.
Args:
process_ids: A list of process IDs.
Returns:
A list of handles of windows owned by processes in |process_ids|.
"""
hwnds = []
def EnumerateWindowCallback(hwnd, _):
_, found_process_id = win32process.GetWindowThreadProcessId(hwnd)
if found_process_id in process_ids and win32gui.IsWindowVisible(hwnd):
hwnds.append(hwnd)
# Enumerate all the top-level windows and call the callback with the hwnd as
# the first parameter.
win32gui.EnumWindows(EnumerateWindowCallback, None)
return hwnds
def WindowExists(process_ids, class_pattern):
"""Returns whether there exists a window with the specified criteria.
This method returns whether there exists a window that is owned by a process
in |process_ids| and has a class name that matches |class_pattern|.
Args:
process_ids: A list of process IDs.
class_pattern: The regular expression pattern of the window class name.
Returns:
A boolean indicating whether such window exists.
"""
for hwnd in GetWindowHandles(process_ids):
if re.match(class_pattern, win32gui.GetClassName(hwnd)):
return True
return False
| bsd-3-clause |
blabla1337/skf-flask | skf/rabbit_mq_workers/deletion-worker.py | 1 | 3009 | #!/usr/bin/env python
import pika, time, random, yaml
from os import path
from skf import settings
from kubernetes import client, config
creds = pika.PlainCredentials('admin', 'admin-skf-secret')
connection = pika.BlockingConnection(pika.ConnectionParameters(host=settings.RABBIT_MQ_CONN_STRING, credentials=creds))
channel = connection.channel()
channel.queue_declare(queue='deletion_qeue')
def delete_container(rpc_body):
user_id = string_split_user_id(rpc_body)
deployment = string_split_deployment(rpc_body)
delete_deployment(deployment, user_id)
delete_service(deployment, user_id)
time.sleep(3)
return {'message': 'If present, the container image was deleted from the cluster!'}
def delete_deployment(instance_name, user_id):
try:
config.load_kube_config()
api_instance = client.AppsV1Api()
api_response = api_instance.delete_namespaced_deployment(
name=instance_name,
namespace=user_id,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
return {'message': 'Deployment deleted.'}
except:
return {'message': 'Kubernetes configuration is either missing or done incorrectly, error deployment delete!'}
def delete_service(instance_name, user_id):
try:
config.load_kube_config()
api_instance = client.CoreV1Api()
api_response = api_instance.delete_namespaced_service(
name=instance_name,
namespace=user_id,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
return {'message': 'Deployment deleted.'}
except:
return {'message': 'Kubernetes configuration is either missing or done incorrectly, error service delete!'}
def string_split_user_id(body):
try:
user_id = body.split(':')
return user_id[1]
except:
return {'message': 'Failed to deploy, error no user_id found!'}
def string_split_deployment(body):
try:
deployment = body.split(':')
return deployment[0]
except:
return {'message': 'Failed to delete, error no deployment found!'}
def on_request(ch, method, props, body):
response = delete_container(str(body, 'utf-8'))
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id,
expiration='30000'),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='deletion_qeue', on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming() | agpl-3.0 |
pieleric/odemis | src/odemis/acq/align/transform.py | 4 | 3925 | # -*- coding: utf-8 -*-
"""
Created on 29 Nov 2013
@author: Kimon Tsitsikas
Copyright © 2012-2013 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License version 2 as published by the Free
Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
import numpy
import math
def CalculateTransform(optical_coordinates, electron_coordinates, skew=False):
"""
Returns the translation, scaling and rotation for the optical and electron image coordinates.
optical_coordinates (List of tuples): Coordinates of spots in optical image
electron_coordinates (List of tuples): Coordinates of spots in electron image
skew (boolean): If True, also compute scaling ratio and shear
returns translation (Tuple of 2 floats),
scaling (Tuple of 2 floats),
rotation (Float): Transformation parameters
shear (Float):
"""
# Create numpy arrays out of the coordinate lists
optical_array = numpy.array(optical_coordinates)
electron_array = numpy.array(electron_coordinates)
# Make matrix X
list_len = len(electron_coordinates) # We assume that both lists have the same length
if optical_array.shape[0] != list_len:
raise ValueError("Mismatch between the number of expected and found coordinates.")
if skew is False:
x_array = numpy.zeros(shape=(2 * list_len, 4))
x_array[0:list_len, 2].fill(1)
x_array[0:list_len, 0:2] = optical_array
x_array[list_len:2 * list_len, 3].fill(1)
x_array[list_len:2 * list_len, 0] = optical_array[:, 1]
x_array[list_len:2 * list_len, 1] = -optical_array[:, 0]
# Make matrix U
u_array = numpy.zeros(shape=(2 * list_len, 1))
u_array[0: list_len, 0] = electron_array[:, 0]
u_array[list_len: 2 * list_len, 0] = electron_array[:, 1]
# Calculate matrix R, R = X\U
r_array, resid, rank, s = numpy.linalg.lstsq(x_array, u_array)
# if r_array[1][0] == 0:
# r_array[1][0] = 1
translation_x = -r_array[2][0]
translation_y = -r_array[3][0]
scaling_x = 1 / math.sqrt((r_array[1][0] ** 2) + (r_array[0][0] ** 2))
scaling_y = 1 / math.sqrt((r_array[1][0] ** 2) + (r_array[0][0] ** 2))
rotation = math.atan2(-r_array[1][0], r_array[0][0])
return (translation_x, translation_y), (scaling_x, scaling_y), rotation
else:
# Calculate including shear
x_array = numpy.zeros(shape=(list_len, 3))
x_array[0:list_len, 2].fill(1)
x_array[0:list_len, 0:2] = optical_array
# Make matrix U
u_array = electron_array
# We know that X*T=U
t_inv, resid, rank, s = numpy.linalg.lstsq(x_array, u_array)
translation_xy = t_inv[2, :]
theta = math.atan2(t_inv[1, 0], t_inv[1, 1])
scaling_x = t_inv[0, 0] * math.cos(theta) - t_inv[0, 1] * math.sin(theta)
scaling_y = math.sqrt(math.pow(t_inv[1, 0], 2) + math.pow(t_inv[1, 1], 2))
shear = (t_inv[0, 0] * math.sin(theta) + t_inv[0, 1] * math.cos(theta)) / scaling_x
# change values for return values
translation_xy_ret = -translation_xy
scaling_ret = (1 / scaling_x + 1 / scaling_y) / 2
theta_ret = -theta
scaling_xy_ret = (1 / scaling_x) / scaling_ret - 1
shear_ret = -shear
return (translation_xy_ret[0], translation_xy_ret[1]), (scaling_ret, scaling_ret), theta_ret, scaling_xy_ret, shear_ret
| gpl-2.0 |
google-research/social_cascades | news/graph_processing.py | 1 | 1943 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph processing script."""
import os
from absl import app
from absl import flags
from absl import logging
import networkx as nx
import pandas as pd
from utils import graph_filter_with_degree
from utils import load_graph_from_edgelist_csv
FLAGS = flags.FLAGS
flags.DEFINE_string(
'g_file',
'../proj_Data/cat_data/test3/sr_timespan_post_graph-00000-of-00001.csv',
'raw graph edgelist csv file')
flags.DEFINE_integer('low', 40, 'low degree threshold')
flags.DEFINE_integer('high', 80, 'high degree threshold')
flags.DEFINE_string('data_file', '', 'raw data path')
flags.DEFINE_string('filename', '', 'graph filename')
flags.DEFINE_string('save_path', '', 'graph save path')
def main(_):
df = pd.read_csv(FLAGS.data_file)
author_set = set(df['author'].unique())
graph = load_graph_from_edgelist_csv(FLAGS.g_file)
logging.info('Original Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
graph = graph_filter_with_degree(graph, FLAGS.low, FLAGS.high, author_set)
logging.info('Filtered Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
nx.write_gpickle(graph, os.path.join(
FLAGS.save_path, FLAGS.filename + '%s_%s.gpickle' %
(FLAGS.low, FLAGS.high)))
logging.info('Saved graph.')
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Demo/tix/samples/SHList1.py | 9 | 3965 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id$
#
# Tix Demostration Program
#
# This sample program is structured in such a way so that it can be
# executed from the Tix demo program "tixwidgets.py": it must have a
# procedure called "RunSample". It should also have the "if" statment
# at the end of this file so that it can be run as a standalone
# program using tixwish.
# This file demonstrates the use of the tixScrolledHList widget.
#
import Tix
TCL_ALL_EVENTS = 0
def RunSample (root):
shlist = DemoSHList(root)
shlist.mainloop()
shlist.destroy()
class DemoSHList:
def __init__(self, w):
self.root = w
self.exit = -1
z = w.winfo_toplevel()
z.wm_protocol("WM_DELETE_WINDOW", lambda self=self: self.quitcmd())
# We create the frame and the ScrolledHList widget
# at the top of the dialog box
#
top = Tix.Frame( w, relief=Tix.RAISED, bd=1)
# Put a simple hierachy into the HList (two levels). Use colors and
# separator widgets (frames) to make the list look fancy
#
top.a = Tix.ScrolledHList(top)
top.a.pack( expand=1, fill=Tix.BOTH, padx=10, pady=10, side=Tix.TOP)
# This is our little relational database
#
bosses = [
('jeff', 'Jeff Waxman'),
('john', 'John Lee'),
('peter', 'Peter Kenson')
]
employees = [
('alex', 'john', 'Alex Kellman'),
('alan', 'john', 'Alan Adams'),
('andy', 'peter', 'Andreas Crawford'),
('doug', 'jeff', 'Douglas Bloom'),
('jon', 'peter', 'Jon Baraki'),
('chris', 'jeff', 'Chris Geoffrey'),
('chuck', 'jeff', 'Chuck McLean')
]
hlist=top.a.hlist
# Let configure the appearance of the HList subwidget
#
hlist.config( separator='.', width=25, drawbranch=0, indent=10)
count=0
for boss,name in bosses :
if count :
f=Tix.Frame(hlist, name='sep%d' % count, height=2, width=150,
bd=2, relief=Tix.SUNKEN )
hlist.add_child( itemtype=Tix.WINDOW,
window=f, state=Tix.DISABLED )
hlist.add(boss, itemtype=Tix.TEXT, text=name)
count = count+1
for person,boss,name in employees :
# '.' is the separator character we chose above
#
key= boss + '.' + person
# ^^^^ ^^^^^^
# parent entryPath / child's name
hlist.add( key, text=name )
# [Hint] Make sure the keys (e.g. 'boss.person') you choose
# are unique names. If you cannot be sure of this (because of
# the structure of your database, e.g.) you can use the
# "add_child" command instead:
#
# hlist.addchild( boss, text=name)
# ^^^^
# parent entryPath
# Use a ButtonBox to hold the buttons.
#
box= Tix.ButtonBox(top, orientation=Tix.HORIZONTAL )
box.add( 'ok', text='Ok', underline=0, width=6,
command = self.okcmd)
box.add( 'cancel', text='Cancel', underline=0, width=6,
command = self.quitcmd)
box.pack( side=Tix.BOTTOM, fill=Tix.X)
top.pack( side=Tix.TOP, fill=Tix.BOTH, expand=1 )
def okcmd (self):
self.quitcmd()
def quitcmd (self):
self.exit = 0
def mainloop(self):
while self.exit < 0:
self.root.tk.dooneevent(TCL_ALL_EVENTS)
def destroy (self):
self.root.destroy()
# This "if" statement makes it possible to run this script file inside or
# outside of the main demo program "tixwidgets.py".
#
if __name__== '__main__' :
root=Tix.Tk()
RunSample(root)
| gpl-3.0 |
signed/intellij-community | python/lib/Lib/pickle.py | 86 | 44800 | """Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision: 38432 $" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not callable(func):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType == UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
inst.__dict__.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| apache-2.0 |
joyxu/autotest | client/shared/iscsi.py | 3 | 10227 | """
Basic iscsi support for Linux host with the help of commands
iscsiadm and tgtadm.
This include the basic operates such as login and get device name by
target name. And it can support the real iscsi access and emulated
iscsi in localhost then access it.
"""
import re
import os
import logging
from autotest.client import os_dep
from autotest.client.shared import utils, error
def iscsi_get_sessions():
"""
Get the iscsi sessions activated
"""
cmd = "iscsiadm --mode session"
output = utils.system_output(cmd, ignore_status=True)
sessions = []
if "No active sessions" not in output:
for session in output.splitlines():
ip_addr = session.split()[2].split(',')[0]
target = session.split()[3]
sessions.append((ip_addr, target))
return sessions
def iscsi_get_nodes():
"""
Get the iscsi nodes
"""
cmd = "iscsiadm --mode node"
output = utils.system_output(cmd)
pattern = r"(\d+\.\d+\.\d+\.\d+|\W:{2}\d\W):\d+,\d+\s+([\w\.\-:\d]+)"
nodes = []
if "No records found" not in output:
nodes = re.findall(pattern, output)
return nodes
def iscsi_login(target_name):
"""
Login to a target with the target name
:param target_name: Name of the target
"""
cmd = "iscsiadm --mode node --login --targetname %s" % target_name
output = utils.system_output(cmd)
target_login = ""
if "successful" in output:
target_login = target_name
return target_login
def iscsi_logout(target_name=None):
"""
Logout from a target. If the target name is not set then logout all
targets.
:params target_name: Name of the target.
"""
if target_name:
cmd = "iscsiadm --mode node --logout -T %s" % target_name
else:
cmd = "iscsiadm --mode node --logout all"
output = utils.system_output(cmd)
target_logout = ""
if "successful" in output:
target_logout = target_name
return target_logout
def iscsi_discover(portal_ip):
"""
Query from iscsi server for available targets
:param portal_ip: Ip for iscsi server
"""
cmd = "iscsiadm -m discovery -t sendtargets -p %s" % portal_ip
output = utils.system_output(cmd, ignore_status=True)
session = ""
if "Invalid" in output:
logging.debug(output)
else:
session = output
return session
class Iscsi(object):
"""
Basic iscsi support class. Will handle the emulated iscsi export and
access to both real iscsi and emulated iscsi device.
"""
def __init__(self, params, root_dir="/tmp"):
os_dep.command("iscsiadm")
self.target = params.get("target")
self.export_flag = False
if params.get("portal_ip"):
self.portal_ip = params.get("portal_ip")
else:
self.portal_ip = utils.system_output("hostname")
if params.get("iscsi_thread_id"):
self.id = params.get("iscsi_thread_id")
else:
self.id = utils.generate_random_string(4)
self.initiator = params.get("initiator")
if params.get("emulated_image"):
self.initiator = None
os_dep.command("tgtadm")
emulated_image = params.get("emulated_image")
self.emulated_image = os.path.join(root_dir, emulated_image)
self.emulated_id = ""
self.emulated_size = params.get("image_size")
self.unit = self.emulated_size[-1].upper()
self.emulated_size = self.emulated_size[:-1]
# maps K,M,G,T => (count, bs)
emulated_size = {'K': (1, 1),
'M': (1, 1024),
'G': (1024, 1024),
'T': (1024, 1048576),
}
if emulated_size.has_key(self.unit):
block_size = emulated_size[self.unit][1]
size = int(self.emulated_size) * emulated_size[self.unit][0]
self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK"
% (self.emulated_image, size, block_size))
def logged_in(self):
"""
Check if the session is login or not.
"""
sessions = iscsi_get_sessions()
login = False
if self.target in map(lambda x: x[1], sessions):
login = True
return login
def portal_visible(self):
"""
Check if the portal can be found or not.
"""
return bool(re.findall("%s$" % self.target,
iscsi_discover(self.portal_ip), re.M))
def login(self):
"""
Login session for both real iscsi device and emulated iscsi. Include
env check and setup.
"""
login_flag = False
if self.portal_visible():
login_flag = True
elif self.initiator:
logging.debug("Try to update iscsi initiatorname")
cmd = "mv /etc/iscsi/initiatorname.iscsi "
cmd += "/etc/iscsi/initiatorname.iscsi-%s" % self.id
utils.system(cmd)
fd = open("/etc/iscsi/initiatorname.iscsi", 'w')
fd.write("InitiatorName=%s" % self.initiator)
fd.close()
utils.system("service iscsid restart")
if self.portal_visible():
login_flag = True
elif self.emulated_image:
self.export_target()
utils.system("service iscsid restart")
if self.portal_visible():
login_flag = True
if login_flag:
iscsi_login(self.target)
def get_device_name(self):
"""
Get device name from the target name.
"""
cmd = "iscsiadm -m session -P 3"
device_name = ""
if self.logged_in():
output = utils.system_output(cmd)
pattern = r"Target:\s+%s.*?disk\s(\w+)\s+\S+\srunning" % self.target
device_name = re.findall(pattern, output, re.S)
try:
device_name = "/dev/%s" % device_name[0]
except IndexError:
logging.error("Can not find target '%s' after login.", self.target)
else:
logging.error("Session is not logged in yet.")
return device_name
def get_target_id(self):
"""
Get target id from image name. Only works for emulated iscsi device
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = utils.system_output(cmd)
target_id = ""
for line in re.split("\n", target_info):
if re.findall("Target\s+(\d+)", line):
target_id = re.findall("Target\s+(\d+)", line)[0]
if re.findall("Backing store path:\s+(/+.+)", line):
if self.emulated_image in line:
break
else:
target_id = ""
return target_id
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
if not os.path.isfile(self.emulated_image):
utils.system(self.create_cmd)
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = utils.system_output(cmd)
except error.CmdError:
utils.system("service tgtd restart")
output = utils.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
output = utils.system_output(cmd)
used_id = re.findall("Target\s+(\d+)", output)
emulated_id = 1
while str(emulated_id) in used_id:
emulated_id += 1
self.emulated_id = str(emulated_id)
cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id
cmd += " --lld iscsi --targetname %s" % self.target
utils.system(cmd)
cmd = "tgtadm --lld iscsi --op bind --mode target "
cmd += "--tid %s -I ALL" % self.emulated_id
utils.system(cmd)
else:
target_strs = re.findall("Target\s+(\d+):\s+%s$" %
self.target, output, re.M)
self.emulated_id = target_strs[0].split(':')[0].split()[-1]
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = utils.system_output(cmd)
except error.CmdError: # In case service stopped
utils.system("service tgtd restart")
output = utils.system_output(cmd)
# Create a LUN with emulated image
if re.findall(self.emulated_image, output, re.M):
# Exist already
logging.debug("Exported image already exists.")
self.export_flag = True
return
else:
luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
cmd = "tgtadm --mode logicalunit --op new "
cmd += "--tid %s --lld iscsi " % self.emulated_id
cmd += "--lun %s " % luns
cmd += "--backing-store %s" % self.emulated_image
utils.system(cmd)
self.export_flag = True
def delete_target(self):
"""
Delete target from host.
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
output = utils.system_output(cmd)
if re.findall("%s$" % self.target, output, re.M):
if self.emulated_id:
cmd = "tgtadm --lld iscsi --mode target --op delete "
cmd += "--tid %s" % self.emulated_id
utils.system(cmd)
def logout(self):
"""
Logout from target.
"""
if self.logged_in():
iscsi_logout(self.target)
def cleanup(self):
"""
Clean up env after iscsi used.
"""
self.logout()
if os.path.isfile("/etc/iscsi/initiatorname.iscsi-%s" % self.id):
cmd = " mv /etc/iscsi/initiatorname.iscsi-%s" % self.id
cmd += " /etc/iscsi/initiatorname.iscsi"
utils.system(cmd)
cmd = "service iscsid restart"
utils.system(cmd)
if self.export_flag:
self.delete_target()
| gpl-2.0 |
gunchleoc/django | tests/auth_tests/test_middleware.py | 86 | 1251 | from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
class TestAuthenticationMiddleware(TestCase):
def setUp(self):
self.user = User.objects.create_user('test_user', 'test@example.com', 'test_password')
self.middleware = AuthenticationMiddleware()
self.client.force_login(self.user)
self.request = HttpRequest()
self.request.session = self.client.session
def test_no_password_change_doesnt_invalidate_session(self):
self.request.session = self.client.session
self.middleware.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertFalse(self.request.user.is_anonymous())
def test_changed_password_invalidates_session(self):
# After password change, user should be anonymous
self.user.set_password('new_password')
self.user.save()
self.middleware.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertTrue(self.request.user.is_anonymous())
# session should be flushed
self.assertIsNone(self.request.session.session_key)
| bsd-3-clause |
Pablo126/SSBW | Entrega1/lib/python3.5/site-packages/setuptools/command/setopt.py | 299 | 5085 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
from setuptools.extern.six.moves import configparser
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
log.debug("Reading configuration from %s", filename)
opts = configparser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| gpl-3.0 |
miyakz1192/neutron | neutron/plugins/ml2/drivers/cisco/apic/apic_model.py | 39 | 7320 | # Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.db import models_v2
from neutron.plugins.ml2 import models as models_ml2
class RouterContract(model_base.BASEV2, models_v2.HasTenant):
"""Contracts created on the APIC.
tenant_id represents the owner (APIC side) of the contract.
router_id is the UUID of the router (Neutron side) this contract is
referring to.
"""
__tablename__ = 'cisco_ml2_apic_contracts'
router_id = sa.Column(sa.String(64), sa.ForeignKey('routers.id',
ondelete='CASCADE'),
primary_key=True)
class HostLink(model_base.BASEV2):
"""Connectivity of host links."""
__tablename__ = 'cisco_ml2_apic_host_links'
host = sa.Column(sa.String(255), nullable=False, primary_key=True)
ifname = sa.Column(sa.String(64), nullable=False, primary_key=True)
ifmac = sa.Column(sa.String(32), nullable=True)
swid = sa.Column(sa.String(32), nullable=False)
module = sa.Column(sa.String(32), nullable=False)
port = sa.Column(sa.String(32), nullable=False)
class ApicName(model_base.BASEV2):
"""Mapping of names created on the APIC."""
__tablename__ = 'cisco_ml2_apic_names'
neutron_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
neutron_type = sa.Column(sa.String(32), nullable=False, primary_key=True)
apic_name = sa.Column(sa.String(255), nullable=False)
class ApicDbModel(object):
"""DB Model to manage all APIC DB interactions."""
def __init__(self):
self.session = db_api.get_session()
def get_contract_for_router(self, router_id):
"""Returns the specified router's contract."""
return self.session.query(RouterContract).filter_by(
router_id=router_id).first()
def write_contract_for_router(self, tenant_id, router_id):
"""Stores a new contract for the given tenant."""
contract = RouterContract(tenant_id=tenant_id,
router_id=router_id)
with self.session.begin(subtransactions=True):
self.session.add(contract)
return contract
def update_contract_for_router(self, tenant_id, router_id):
with self.session.begin(subtransactions=True):
contract = self.session.query(RouterContract).filter_by(
router_id=router_id).with_lockmode('update').first()
if contract:
contract.tenant_id = tenant_id
self.session.merge(contract)
else:
self.write_contract_for_router(tenant_id, router_id)
def delete_contract_for_router(self, router_id):
with self.session.begin(subtransactions=True):
try:
self.session.query(RouterContract).filter_by(
router_id=router_id).delete()
except orm.exc.NoResultFound:
return
def add_hostlink(self, host, ifname, ifmac, swid, module, port):
link = HostLink(host=host, ifname=ifname, ifmac=ifmac,
swid=swid, module=module, port=port)
with self.session.begin(subtransactions=True):
self.session.merge(link)
def get_hostlinks(self):
return self.session.query(HostLink).all()
def get_hostlink(self, host, ifname):
return self.session.query(HostLink).filter_by(
host=host, ifname=ifname).first()
def get_hostlinks_for_host(self, host):
return self.session.query(HostLink).filter_by(
host=host).all()
def get_hostlinks_for_host_switchport(self, host, swid, module, port):
return self.session.query(HostLink).filter_by(
host=host, swid=swid, module=module, port=port).all()
def get_hostlinks_for_switchport(self, swid, module, port):
return self.session.query(HostLink).filter_by(
swid=swid, module=module, port=port).all()
def delete_hostlink(self, host, ifname):
with self.session.begin(subtransactions=True):
try:
self.session.query(HostLink).filter_by(host=host,
ifname=ifname).delete()
except orm.exc.NoResultFound:
return
def get_switches(self):
return self.session.query(HostLink.swid).distinct()
def get_modules_for_switch(self, swid):
return self.session.query(
HostLink.module).filter_by(swid=swid).distinct()
def get_ports_for_switch_module(self, swid, module):
return self.session.query(
HostLink.port).filter_by(swid=swid, module=module).distinct()
def get_switch_and_port_for_host(self, host):
return self.session.query(
HostLink.swid, HostLink.module, HostLink.port).filter_by(
host=host).distinct()
def get_tenant_network_vlan_for_host(self, host):
pb = models_ml2.PortBinding
po = models_v2.Port
ns = models_ml2.NetworkSegment
return self.session.query(
po.tenant_id, ns.network_id, ns.segmentation_id).filter(
po.id == pb.port_id).filter(pb.host == host).filter(
po.network_id == ns.network_id).distinct()
def add_apic_name(self, neutron_id, neutron_type, apic_name):
name = ApicName(neutron_id=neutron_id,
neutron_type=neutron_type,
apic_name=apic_name)
with self.session.begin(subtransactions=True):
self.session.add(name)
def update_apic_name(self, neutron_id, neutron_type, apic_name):
with self.session.begin(subtransactions=True):
name = self.session.query(ApicName).filter_by(
neutron_id=neutron_id,
neutron_type=neutron_type).with_lockmode('update').first()
if name:
name.apic_name = apic_name
self.session.merge(name)
else:
self.add_apic_name(neutron_id, neutron_type, apic_name)
def get_apic_names(self):
return self.session.query(ApicName).all()
def get_apic_name(self, neutron_id, neutron_type):
return self.session.query(ApicName.apic_name).filter_by(
neutron_id=neutron_id, neutron_type=neutron_type).first()
def delete_apic_name(self, neutron_id):
with self.session.begin(subtransactions=True):
try:
self.session.query(ApicName).filter_by(
neutron_id=neutron_id).delete()
except orm.exc.NoResultFound:
return
| apache-2.0 |
Plexxi/st2 | st2client/st2client/shell.py | 3 | 17406 | #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line interface to StackStorm.
"""
from __future__ import print_function
from __future__ import absolute_import
# Ignore CryptographyDeprecationWarning warnings which appear on older versions of Python 2.7
import warnings
from cryptography.utils import CryptographyDeprecationWarning
warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
import os
import sys
import argcomplete
import argparse
import logging
import locale
import six
from six.moves.configparser import ConfigParser
from st2client import __version__
from st2client import models
from st2client.base import BaseCLIApp
from st2client.commands import auth
from st2client.commands import action
from st2client.commands import action_alias
from st2client.commands import keyvalue
from st2client.commands import inquiry
from st2client.commands import pack
from st2client.commands import policy
from st2client.commands import resource
from st2client.commands import sensor
from st2client.commands import trace
from st2client.commands import trigger
from st2client.commands import triggerinstance
from st2client.commands import timer
from st2client.commands import webhook
from st2client.commands import rule
from st2client.commands import rule_enforcement
from st2client.commands import rbac
from st2client.commands import workflow
from st2client.commands import service_registry
from st2client.config import set_config
from st2client.exceptions.operations import OperationFailureException
from st2client.utils.logging import LogLevelFilter, set_log_level_for_all_loggers
from st2client.utils.misc import reencode_list_with_surrogate_escape_sequences
from st2client.commands.auth import TokenCreateCommand
from st2client.commands.auth import LoginCommand
__all__ = ["Shell"]
LOGGER = logging.getLogger(__name__)
CLI_DESCRIPTION = (
"CLI for StackStorm event-driven automation platform. https://stackstorm.com"
)
USAGE_STRING = """
Usage: %(prog)s [options] <command> <sub command> [options]
For example:
%(prog)s action list --pack=st2
%(prog)s run core.local cmd=date
%(prog)s --debug run core.local cmd=date
""".strip()
NON_UTF8_LOCALE = (
"""
Locale %s with encoding %s which is not UTF-8 is used. This means that some functionality which
relies on outputting unicode characters won't work.
You are encouraged to use UTF-8 locale by setting LC_ALL environment variable to en_US.UTF-8 or
similar.
""".strip()
.replace("\n", " ")
.replace(" ", " ")
)
PACKAGE_METADATA_FILE_PATH = "/opt/stackstorm/st2/package.meta"
"""
Here we sanitize the provided args and ensure they contain valid unicode values.
By default, sys.argv will contain a unicode string where the actual item values which contain
unicode sequences are escaped using unicode surrogates.
For example, if "examples.test_rule_utf8_náme" value is specified as a CLI argument, sys.argv
and as such also url, would contain "examples.test_rule_utf8_n%ED%B3%83%ED%B2%A1me" which is not
what we want.
Complete sys.argv example:
1. Default - ['shell.py', '--debug', 'rule', 'get', 'examples.test_rule_utf8_n\udcc3\udca1me']
2. What we want - ['shell.py', '--debug', 'rule', 'get', 'examples.test_rule_utf8_náme']
This won't work correctly when sending requests to the API. As such, we correctly escape the
value to the unicode string here and then let the http layer (requests) correctly url encode
this value.
Technically, we could also just try to re-encode it in the HTTPClient and I tried that first, but
it turns out more code in the client results in exceptions if it's not re-encoded as early as
possible.
"""
REENCODE_ARGV = os.environ.get("ST2_CLI_RENCODE_ARGV", "true").lower() in [
"true",
"1",
"yes",
]
if REENCODE_ARGV:
try:
sys.argv = reencode_list_with_surrogate_escape_sequences(sys.argv)
except Exception as e:
print("Failed to re-encode sys.argv: %s" % (str(e)))
def get_stackstorm_version():
"""
Return StackStorm version including git commit revision if running a dev release and a file
with package metadata which includes git revision is available.
:rtype: ``str``
"""
if "dev" in __version__:
version = __version__
if not os.path.isfile(PACKAGE_METADATA_FILE_PATH):
return version
config = ConfigParser()
try:
config.read(PACKAGE_METADATA_FILE_PATH)
except Exception:
return version
try:
git_revision = config.get("server", "git_sha")
except Exception:
return version
version = "%s (%s)" % (version, git_revision)
else:
version = __version__
return version
class Shell(BaseCLIApp):
LOG = LOGGER
SKIP_AUTH_CLASSES = [
TokenCreateCommand.__name__,
LoginCommand.__name__,
]
def __init__(self):
# Set up of endpoints is delayed until program is run.
self.client = None
# Set up the main parser.
self.parser = argparse.ArgumentParser(description=CLI_DESCRIPTION)
# Set up general program options.
self.parser.add_argument(
"--version",
action="version",
version="%(prog)s {version}, on Python {python_major}.{python_minor}.{python_patch}".format(
version=get_stackstorm_version(),
python_major=sys.version_info.major,
python_minor=sys.version_info.minor,
python_patch=sys.version_info.micro,
),
)
self.parser.add_argument(
"--url",
action="store",
dest="base_url",
default=None,
help="Base URL for the API servers. Assumes all servers use the "
"same base URL and default ports are used. Get ST2_BASE_URL "
"from the environment variables by default.",
)
self.parser.add_argument(
"--auth-url",
action="store",
dest="auth_url",
default=None,
help="URL for the authentication service. Get ST2_AUTH_URL "
"from the environment variables by default.",
)
self.parser.add_argument(
"--api-url",
action="store",
dest="api_url",
default=None,
help="URL for the API server. Get ST2_API_URL "
"from the environment variables by default.",
)
self.parser.add_argument(
"--stream-url",
action="store",
dest="stream_url",
default=None,
help="URL for the stream endpoint. Get ST2_STREAM_URL"
"from the environment variables by default.",
)
self.parser.add_argument(
"--api-version",
action="store",
dest="api_version",
default=None,
help="API version to use. Get ST2_API_VERSION "
"from the environment variables by default.",
)
self.parser.add_argument(
"--cacert",
action="store",
dest="cacert",
default=None,
help="Path to the CA cert bundle for the SSL endpoints. "
"Get ST2_CACERT from the environment variables by default. "
"If this is not provided, then SSL cert will not be verified.",
)
self.parser.add_argument(
"--config-file",
action="store",
dest="config_file",
default=None,
help="Path to the CLI config file",
)
self.parser.add_argument(
"--print-config",
action="store_true",
dest="print_config",
default=False,
help="Parse the config file and print the values",
)
self.parser.add_argument(
"--skip-config",
action="store_true",
dest="skip_config",
default=False,
help="Don't parse and use the CLI config file",
)
self.parser.add_argument(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Enable debug mode",
)
# Set up list of commands and subcommands.
self.subparsers = self.parser.add_subparsers(dest="parser")
self.subparsers.required = True
self.commands = {}
self.commands["run"] = action.ActionRunCommand(
models.Action, self, self.subparsers, name="run", add_help=False
)
self.commands["action"] = action.ActionBranch(
"An activity that happens as a response to the external event.",
self,
self.subparsers,
)
self.commands["action-alias"] = action_alias.ActionAliasBranch(
"Action aliases.", self, self.subparsers
)
self.commands["auth"] = auth.TokenCreateCommand(
models.Token, self, self.subparsers, name="auth"
)
self.commands["login"] = auth.LoginCommand(
models.Token, self, self.subparsers, name="login"
)
self.commands["whoami"] = auth.WhoamiCommand(
models.Token, self, self.subparsers, name="whoami"
)
self.commands["api-key"] = auth.ApiKeyBranch("API Keys.", self, self.subparsers)
self.commands["execution"] = action.ActionExecutionBranch(
"An invocation of an action.", self, self.subparsers
)
self.commands["inquiry"] = inquiry.InquiryBranch(
"Inquiries provide an opportunity to ask a question "
"and wait for a response in a workflow.",
self,
self.subparsers,
)
self.commands["key"] = keyvalue.KeyValuePairBranch(
"Key value pair is used to store commonly used configuration "
"for reuse in sensors, actions, and rules.",
self,
self.subparsers,
)
self.commands["pack"] = pack.PackBranch(
"A group of related integration resources: " "actions, rules, and sensors.",
self,
self.subparsers,
)
self.commands["policy"] = policy.PolicyBranch(
"Policy that is enforced on a resource.", self, self.subparsers
)
self.commands["policy-type"] = policy.PolicyTypeBranch(
"Type of policy that can be applied to resources.", self, self.subparsers
)
self.commands["rule"] = rule.RuleBranch(
'A specification to invoke an "action" on a "trigger" selectively '
"based on some criteria.",
self,
self.subparsers,
)
self.commands["webhook"] = webhook.WebhookBranch(
"Webhooks.", self, self.subparsers
)
self.commands["timer"] = timer.TimerBranch("Timers.", self, self.subparsers)
self.commands["runner"] = resource.ResourceBranch(
models.RunnerType,
"Runner is a type of handler for a specific class of actions.",
self,
self.subparsers,
read_only=True,
has_disable=True,
)
self.commands["sensor"] = sensor.SensorBranch(
"An adapter which allows you to integrate StackStorm with external system.",
self,
self.subparsers,
)
self.commands["trace"] = trace.TraceBranch(
"A group of executions, rules and triggerinstances that are related.",
self,
self.subparsers,
)
self.commands["trigger"] = trigger.TriggerTypeBranch(
"An external event that is mapped to a st2 input. It is the "
"st2 invocation point.",
self,
self.subparsers,
)
self.commands["trigger-instance"] = triggerinstance.TriggerInstanceBranch(
"Actual instances of triggers received by st2.", self, self.subparsers
)
self.commands["rule-enforcement"] = rule_enforcement.RuleEnforcementBranch(
"Models that represent enforcement of rules.", self, self.subparsers
)
self.commands["workflow"] = workflow.WorkflowBranch(
"Commands for workflow authoring related operations. "
"Only orquesta workflows are supported.",
self,
self.subparsers,
)
# Service Registry
self.commands["service-registry"] = service_registry.ServiceRegistryBranch(
"Service registry group and membership related commands.",
self,
self.subparsers,
)
# RBAC
self.commands["role"] = rbac.RoleBranch("RBAC roles.", self, self.subparsers)
self.commands["role-assignment"] = rbac.RoleAssignmentBranch(
"RBAC role assignments.", self, self.subparsers
)
def run(self, argv):
debug = False
parser = self.parser
if len(argv) == 0:
# Print a more user-friendly help string if no arguments are provided
# Note: We only set usage variable for the main parser. If we passed "usage" argument
# to the main ArgumentParser class above, this would also set a custom usage string for
# sub-parsers which we don't want.
parser.usage = USAGE_STRING
sys.stderr.write(parser.format_help())
return 2
# Provide autocomplete for shell
argcomplete.autocomplete(self.parser)
if "--print-config" in argv:
# Hack because --print-config requires no command to be specified
argv = argv + ["action", "list"]
# Parse command line arguments.
args = self.parser.parse_args(args=argv)
print_config = args.print_config
if print_config:
self._print_config(args=args)
return 3
# Parse config and store it in the config module
config = self._parse_config_file(args=args, validate_config_permissions=False)
set_config(config=config)
self._check_locale_and_print_warning()
# Setup client and run the command
try:
debug = getattr(args, "debug", False)
if debug:
set_log_level_for_all_loggers(level=logging.DEBUG)
# Set up client.
self.client = self.get_client(args=args, debug=debug)
# TODO: This is not so nice work-around for Python 3 because of a breaking change in
# Python 3 - https://bugs.python.org/issue16308
try:
func = getattr(args, "func")
except AttributeError:
parser.print_help()
sys.exit(2)
# Execute command.
func(args)
return 0
except OperationFailureException:
if debug:
self._print_debug_info(args=args)
return 2
except Exception as e:
# We allow exception to define custom exit codes
exit_code = getattr(e, "exit_code", 1)
print("ERROR: %s\n" % e)
if debug:
self._print_debug_info(args=args)
return exit_code
def _print_config(self, args):
config = self._parse_config_file(args=args)
for section, options in six.iteritems(config):
print("[%s]" % (section))
for name, value in six.iteritems(options):
print("%s = %s" % (name, value))
def _check_locale_and_print_warning(self):
"""
Method which checks that unicode locale is used and prints a warning if it's not.
"""
try:
default_locale = locale.getdefaultlocale()[0]
preferred_encoding = locale.getpreferredencoding()
except ValueError:
# Ignore unknown locale errors for now
default_locale = "unknown"
preferred_encoding = "unknown"
if preferred_encoding and preferred_encoding.lower() != "utf-8":
msg = NON_UTF8_LOCALE % (default_locale or "unknown", preferred_encoding)
LOGGER.warn(msg)
def setup_logging(argv):
debug = "--debug" in argv
root = LOGGER
root.setLevel(logging.WARNING)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.WARNING)
formatter = logging.Formatter("%(asctime)s %(levelname)s - %(message)s")
handler.setFormatter(formatter)
if not debug:
handler.addFilter(LogLevelFilter(log_levels=[logging.ERROR]))
root.addHandler(handler)
def main(argv=sys.argv[1:]):
setup_logging(argv)
return Shell().run(argv)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
ryfeus/lambda-packs | Tensorflow/source/numpy/__init__.py | 49 | 6274 | """
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage <http://www.scipy.org>`_.
We recommend exploring the docstrings using
`IPython <http://ipython.scipy.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as `np`::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
Available subpackages
---------------------
doc
Topical documentation on broadcasting, indexing, etc.
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
NumPy testing tools
f2py
Fortran to Python Interface Generator.
distutils
Enhancements to distutils with support for
Fortran compilers support and more.
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
dual
Overwrite certain functions with high-performance Scipy tools
matlib
Make everything matrices.
__version__
NumPy version string
Viewing documentation using IPython
-----------------------------------
Start IPython with the NumPy profile (``ipython -p numpy``), which will
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
paste examples into the shell. To see which functions are available in
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
from __future__ import division, absolute_import, print_function
import sys
import warnings
from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
from ._globals import _NoValue
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
try:
from numpy.__config__ import show as show_config
except ImportError:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg)
from .version import git_revision as __git_revision__
from .version import version as __version__
from ._import_tools import PackageLoader
def pkgload(*packages, **options):
loader = PackageLoader(infunc=True)
return loader(*packages, **options)
from . import add_newdocs
__all__ = ['add_newdocs',
'ModuleDeprecationWarning',
'VisibleDeprecationWarning']
pkgload.__doc__ = PackageLoader.__call__.__doc__
# We don't actually use this ourselves anymore, but I'm not 100% sure that
# no-one else in the world is using it (though I hope not)
from .testing import Tester
test = testing.nosetester._numpy_tester().test
bench = testing.nosetester._numpy_tester().bench
# Allow distributors to run custom init code
from . import _distributor_init
from . import core
from .core import *
from . import compat
from . import lib
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
from .compat import long
# Make these accessible from numpy name-space
# but not imported in from numpy import *
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
from .core import round, abs, max, min
__all__.extend(['__version__', 'pkgload', 'PackageLoader',
'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# Filter annoying Cython warnings that serve no good purpose.
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
| mit |
kevinmel2000/sl4a | python-build/python-libs/gdata/samples/finance/test_finance.py | 128 | 10785 | #!/usr/bin/python
#
# Copyright (C) 2009 Tan Swee Heng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'thesweeheng@gmail.com'
from gdata.finance.service import \
FinanceService, PortfolioQuery, PositionQuery
from gdata.finance import \
PortfolioEntry, PortfolioData, TransactionEntry, TransactionData, \
Price, Commission, Money
import datetime
import sys
def PrintReturns(pfx, d):
"""Print returns."""
print pfx, '%1.5f(1w) %1.5f(4w) %1.5f(3m) %1.5f(YTD)' % tuple(
float(i) for i in (d.return1w, d.return4w, d.return3m, d.returnYTD))
pfx = ' ' * len(pfx)
print pfx, '%1.5f(1y) %1.5f(3y) %1.5f(5y) %1.5f(overall)' % tuple(
float(i) for i in (d.return1y, d.return3y, d.return5y, d.return_overall))
PrRtn = PrintReturns
def PrintTransactions(transactions):
"""Print transactions."""
print " Transactions:"
fmt = ' %4s %-23s %-10s %6s %-11s %-11s'
print fmt % ('ID','Date','Type','Shares','Price','Commission')
for txn in transactions:
d = txn.transaction_data
print fmt % (txn.transaction_id, d.date or '----', d.type,
d.shares, d.price.money[0], d.commission.money[0])
if d.notes:
print " Notes:", d.notes
print
def PrintPosition(pos, with_returns=False):
"""Print single position."""
print ' Position :', pos.position_title
print ' Ticker ID :', pos.ticker_id
print ' Symbol :', pos.symbol
print ' Last updated :', pos.updated.text
d = pos.position_data
print ' Shares :', d.shares
if with_returns:
print ' Gain % :', d.gain_percentage
PrRtn(' Returns :', d)
print ' Cost basis :', d.cost_basis
print ' Days gain :', d.days_gain
print ' Gain :', d.gain
print ' Market value :', d.market_value
print
if pos.transactions:
print " <inlined transactions>\n"
PrintTransactions(pos.transactions)
print " </inlined transactions>\n"
def PrintPositions(positions, with_returns=False):
for pos in positions:
PrintPosition(pos, with_returns)
def PrintPortfolio(pfl, with_returns=False):
"""Print single portfolio."""
print 'Portfolio Title :', pfl.portfolio_title
print 'Portfolio ID :', pfl.portfolio_id
print ' Last updated :', pfl.updated.text
d = pfl.portfolio_data
print ' Currency :', d.currency_code
if with_returns:
print ' Gain % :', d.gain_percentage
PrRtn(' Returns :', d)
print ' Cost basis :', d.cost_basis
print ' Days gain :', d.days_gain
print ' Gain :', d.gain
print ' Market value :', d.market_value
print
if pfl.positions:
print " <inlined positions>\n"
PrintPositions(pfl.positions, with_returns)
print " </inlined positions>\n"
def PrintPortfolios(portfolios, with_returns=False):
for pfl in portfolios:
PrintPortfolio(pfl, with_returns)
def ShowCallDetails(meth):
def wrap(*args, **kwargs):
print '@', meth.__name__, args[1:], kwargs
meth(*args, **kwargs)
return wrap
class FinanceTester(object):
def __init__(self, email, password):
self.client = FinanceService(source='gdata-finance-test')
self.client.ClientLogin(email, password)
def GetPortfolios(self, with_returns=False, inline_positions=False):
query = PortfolioQuery()
query.returns = with_returns
query.positions = inline_positions
return self.client.GetPortfolioFeed(query=query).entry
def GetPositions(self, portfolio, with_returns=False, inline_transactions=False):
query = PositionQuery()
query.returns = with_returns
query.transactions = inline_transactions
return self.client.GetPositionFeed(portfolio, query=query).entry
def GetTransactions(self, position=None, portfolio=None, ticker=None):
if position:
feed = self.client.GetTransactionFeed(position)
elif portfolio and ticker:
feed = self.client.GetTransactionFeed(
portfolio_id=portfolio.portfolio_id, ticker_id=ticker)
return feed.entry
@ShowCallDetails
def TestShowDetails(self, with_returns=False, inline_positions=False,
inline_transactions=False):
portfolios = self.GetPortfolios(with_returns, inline_positions)
for pfl in portfolios:
PrintPortfolio(pfl, with_returns)
positions = self.GetPositions(pfl, with_returns, inline_transactions)
for pos in positions:
PrintPosition(pos, with_returns)
PrintTransactions(self.GetTransactions(pos))
def DeletePortfoliosByName(self, portfolio_titles):
for pfl in self.GetPortfolios():
if pfl.portfolio_title in portfolio_titles:
self.client.DeletePortfolio(pfl)
def AddPortfolio(self, portfolio_title, currency_code):
pfl = PortfolioEntry(portfolio_data=PortfolioData(
currency_code=currency_code))
pfl.portfolio_title = portfolio_title
return self.client.AddPortfolio(pfl)
def UpdatePortfolio(self, portfolio,
portfolio_title=None, currency_code=None):
if portfolio_title:
portfolio.portfolio_title = portfolio_title
if currency_code:
portfolio.portfolio_data.currency_code = currency_code
return self.client.UpdatePortfolio(portfolio)
def DeletePortfolio(self, portfolio):
self.client.DeletePortfolio(portfolio)
@ShowCallDetails
def TestManagePortfolios(self):
pfl_one = 'Portfolio Test: Emerging Markets 12345'
pfl_two = 'Portfolio Test: Renewable Energy 31415'
print '---- Deleting portfolios ----'
self.DeletePortfoliosByName([pfl_one, pfl_two])
PrintPortfolios(self.GetPortfolios())
print '---- Adding new portfolio ----'
pfl = self.AddPortfolio(pfl_one, 'SGD')
PrintPortfolios(self.GetPortfolios())
print '---- Changing portfolio title and currency code ----'
pfl = self.UpdatePortfolio(pfl, pfl_two, 'USD')
PrintPortfolios(self.GetPortfolios())
print '---- Deleting portfolio ----'
self.DeletePortfolio(pfl)
PrintPortfolios(self.GetPortfolios())
def Transact(self, type, portfolio, ticker, date=None, shares=None,
notes=None, price=None, commission=None, currency_code=None):
if price is not None:
price = Price(money=[Money(amount=str(price),
currency_code=currency_code or
portfolio.portfolio_data.currency_code)])
if commission is not None:
commission = Commission(money=[Money(amount=str(comission),
currency_code=currency_code or
portfolio.portfolio_data.currency_code)])
if date is not None and isinstance(date, datetime.datetime):
date = date.isoformat()
if shares is not None:
shares = str(shares)
txn = TransactionEntry(transaction_data=TransactionData(type=type,
date=date, shares=shares, notes=notes, price=price,
commission=commission))
return self.client.AddTransaction(txn,
portfolio_id=portfolio.portfolio_id, ticker_id=ticker)
def Buy(self, portfolio, ticker, **kwargs):
return self.Transact('Buy', portfolio, ticker, **kwargs)
def Sell(self, portfolio, ticker, **kwargs):
return self.Transact('Sell', portfolio, ticker, **kwargs)
def GetPosition(self, portfolio, ticker, with_returns=False, inline_transactions=False):
query = PositionQuery()
query.returns = with_returns
query.transactions = inline_transactions
return self.client.GetPosition(
portfolio_id=portfolio.portfolio_id, ticker_id=ticker, query=query)
def DeletePosition(self, position):
self.client.DeletePosition(position_entry=position)
def UpdateTransaction(self, transaction):
self.client.UpdateTransaction(transaction)
def DeleteTransaction(self, transaction):
self.client.DeleteTransaction(transaction)
@ShowCallDetails
def TestManageTransactions(self):
pfl_title = 'Transaction Test: Technology 27182'
self.DeletePortfoliosByName([pfl_title])
print '---- Adding new portfolio ----'
pfl = self.AddPortfolio(pfl_title, 'USD')
PrintPortfolios(self.GetPortfolios())
print '---- Adding buy transactions ----'
tkr1 = 'NASDAQ:GOOG'
date = datetime.datetime(2009,04,01)
days = datetime.timedelta(1)
txn1 = self.Buy(pfl, tkr1, shares=500, price=321.00, date=date)
txn2 = self.Buy(pfl, tkr1, shares=150, price=312.00, date=date+15*days)
pos = self.GetPosition(portfolio=pfl, ticker=tkr1, with_returns=True)
PrintPosition(pos, with_returns=True)
PrintTransactions(self.GetTransactions(pos))
print '---- Adding sell transactions ----'
txn3 = self.Sell(pfl, tkr1, shares=400, price=322.00, date=date+30*days)
txn4 = self.Sell(pfl, tkr1, shares=200, price=330.00, date=date+45*days)
pos = self.GetPosition(portfolio=pfl, ticker=tkr1, with_returns=True)
PrintPosition(pos, with_returns=True)
PrintTransactions(self.GetTransactions(pos))
print "---- Modifying first and deleting third ----"
txn1.transaction_data.shares = '400.0'
self.UpdateTransaction(txn1)
self.DeleteTransaction(txn3)
pos = self.GetPosition(portfolio=pfl, ticker=tkr1, with_returns=True)
PrintPosition(pos, with_returns=True)
PrintTransactions(self.GetTransactions(pos))
print "---- Deleting position ----"
print "Number of positions (before):", len(self.GetPositions(pfl))
self.DeletePosition(pos)
print "Number of positions (after) :", len(self.GetPositions(pfl))
print '---- Deleting portfolio ----'
self.DeletePortfolio(pfl)
PrintPortfolios(self.GetPortfolios())
if __name__ == '__main__':
try:
email = sys.argv[1]
password = sys.argv[2]
cases = sys.argv[3:]
except IndexError:
print "Usage: test_finance account@google.com password [0 1 2...]"
sys.exit(1)
tester = FinanceTester(email, password)
tests = [
tester.TestShowDetails,
lambda: tester.TestShowDetails(with_returns=True),
tester.TestManagePortfolios,
tester.TestManageTransactions,
lambda: tester.TestShowDetails(with_returns=True, inline_positions=True),
lambda: tester.TestShowDetails(with_returns=True, inline_positions=True,
inline_transactions=True),]
if not cases:
cases = range(len(tests))
for i in cases:
print "===== TEST CASE", i, "="*50
tests[int(i)]()
| apache-2.0 |
euroscipy/www.euroscipy.org | papercall_grabbing.py | 1 | 4306 | """
Functions to grab info from papercall.io
"""
import os
import time
import requests
token = 'your_papercall_token' # ,<-- fill this in
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
des_template = """
Title: {title}
URL: 2017/descriptions/{id}.html
save_as: 2017/descriptions/{id}.html
{description}
""".lstrip()
def get_submission_ids():
# Query all submission ids
all_ids = []
for state in ('submitted', 'accepted', 'rejected', 'waitlist'):
url = 'https://www.papercall.io/api/v1/submissions?_token=%s&per_page=999&state=%s'
all = requests.get(url % (token, state)).json()
all_ids.extend([x['id'] for x in all])
return all_ids
def get_reviewer_list():
""" Print out the names of all people who did reviews.
"""
# Collect submission ids
all_ids = get_submission_ids()
# Collect all reviewers
reviewers = set()
for id in all_ids:
url = 'https://www.papercall.io/api/v1/submissions/%s/ratings?_token=%s'
ratings = requests.get(url % (id, token)).json()
for rating in ratings:
reviewers.add(rating['user']['name'])
# Print a list
for reviewer in sorted(reviewers):
print(reviewer)
def get_talk_descriptions():
""" Get talk descriptions and store each in a markdown file.
"""
# Collect submission ids
all_ids = get_submission_ids()
# Collect descriptions
index = {}
for id in all_ids:
url = 'https://www.papercall.io/api/v1/submissions/%s?_token=%s'
submission = requests.get(url % (id, token)).json()
id = str(submission['id'])
title = submission['talk']['title']
page = des_template.format(description=submission['talk']['description'],
title=title, id=id)
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', id + '.md')
with open(fname, 'wb') as f:
f.write(page.encode())
index[id] = title
time.sleep(0.1)
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md')
with open(fname, 'wb') as f:
for id in sorted(index):
line = id + ' - ' + index[id] + '\n'
f.write(line.encode())
def make_links_in_program():
""" Make the talk titles in the program link to description pages,
as far as we can, anyway. The rest should be done by hand by making use of
the descriptions.index.md.
Beware, this is ugly, and makes all kinds of assumptions about how the program
table is formatted, and it needs manual corrections, and it does not work after
it has applied the changes. We should probably just throw it away.
"""
# Build reverse index
rindex = {}
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md')
with open(fname, 'rb') as f:
for line in f.read().decode().splitlines():
if line.strip():
id, _, title = line.partition('-')
rindex[title.strip().lower()] = 'descriptions/' + id.strip() + '.html'
default_link = 'descriptions/oops.html'
# Add links
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'program.md')
text = open(fname, 'rb').read().decode()
lines = text.splitlines()
for i in range(len(lines)-1):
line = lines[i]
if line.lstrip().startswith("<td>") and not line.rstrip().endswith(">"):
if ' ' not in lines[i+1]:
title = line.lstrip()[4:]
id = rindex.get(title.strip().lower(), default_link)
lines[i] = " <td><a href='%s'>%s</a>" % (id, title)
if line.lstrip().startswith("<td>") and line.rstrip().endswith("</td>"):
if '<br>' in line and ' ' not in line:
title, _, rest = line.lstrip()[4:].partition('<br>')
id = rindex.get(title.strip().lower(), default_link)
lines[i] = " <td><a href='%s'>%s</a><br>%s" % (id, title, rest)
with open(fname, 'wb') as f:
text = '\n'.join(lines)
f.write(text.encode())
if __name__ == '__main__':
pass
# get_reviewer_list()
# get_talk_descriptions()
# make_links_in_program()
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-batchai/azure/mgmt/batchai/operations/file_servers_operations.py | 1 | 22642 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class FileServersOperations(object):
"""FileServersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Specifies the version of API used for this request. Constant value: "2018-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01"
self.config = config
def _create_initial(
self, resource_group_name, file_server_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FileServerCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, file_server_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a file server.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param parameters: The parameters to provide for file server creation.
:type parameters:
~azure.mgmt.batchai.models.FileServerCreateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns FileServer or
ClientRawResponse<FileServer> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batchai.models.FileServer]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.batchai.models.FileServer]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
file_server_name=file_server_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'}
def _delete_initial(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Delete a file Server.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
file_server_name=file_server_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'}
def get(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified Cluster.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FileServer or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batchai.models.FileServer or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'}
def list(
self, file_servers_list_options=None, custom_headers=None, raw=False, **operation_config):
"""To list all the file servers available under the given subscription
(and across all resource groups within that subscription).
:param file_servers_list_options: Additional parameters for the
operation
:type file_servers_list_options:
~azure.mgmt.batchai.models.FileServersListOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FileServer
:rtype:
~azure.mgmt.batchai.models.FileServerPaged[~azure.mgmt.batchai.models.FileServer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
filter = None
if file_servers_list_options is not None:
filter = file_servers_list_options.filter
select = None
if file_servers_list_options is not None:
select = file_servers_list_options.select
max_results = None
if file_servers_list_options is not None:
max_results = file_servers_list_options.max_results
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FileServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FileServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.BatchAI/fileServers'}
def list_by_resource_group(
self, resource_group_name, file_servers_list_by_resource_group_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets a formatted list of file servers and their properties associated
within the specified resource group.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_servers_list_by_resource_group_options: Additional
parameters for the operation
:type file_servers_list_by_resource_group_options:
~azure.mgmt.batchai.models.FileServersListByResourceGroupOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FileServer
:rtype:
~azure.mgmt.batchai.models.FileServerPaged[~azure.mgmt.batchai.models.FileServer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
filter = None
if file_servers_list_by_resource_group_options is not None:
filter = file_servers_list_by_resource_group_options.filter
select = None
if file_servers_list_by_resource_group_options is not None:
select = file_servers_list_by_resource_group_options.select
max_results = None
if file_servers_list_by_resource_group_options is not None:
max_results = file_servers_list_by_resource_group_options.max_results
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FileServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FileServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers'}
| mit |
tlakshman26/cinder-new-branch | cinder/api/contrib/qos_specs_manage.py | 18 | 19517 | # Copyright (c) 2013 eBay Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The QoS specs extension"""
from oslo_log import log as logging
from oslo_utils import strutils
import six
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import qos_specs as view_qos_specs
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _, _LI
from cinder import rpc
from cinder import utils
from cinder.volume import qos_specs
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'qos_specs_manage')
def make_qos_specs(elem):
elem.set('id')
elem.set('name')
elem.set('consumer')
elem.append(SpecsTemplate())
def make_associations(elem):
elem.set('association_type')
elem.set('name')
elem.set('id')
class SpecsTemplate(xmlutil.TemplateBuilder):
def construct(self):
return xmlutil.MasterTemplate(xmlutil.make_flat_dict('specs'), 1)
class QoSSpecsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('qos_specs')
elem = xmlutil.SubTemplateElement(root, 'qos_spec',
selector='qos_specs')
make_qos_specs(elem)
return xmlutil.MasterTemplate(root, 1)
class QoSSpecsKeyDeserializer(wsgi.XMLDeserializer):
def _extract_keys(self, key_node):
keys = []
for key in key_node.childNodes:
key_name = key.tagName
keys.append(key_name)
return keys
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
key_node = self.find_first_child_named(dom, 'keys')
if not key_node:
LOG.info(_LI("Unable to parse XML input."))
msg = _("Unable to parse XML request. "
"Please provide XML in correct format.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return {'body': {'keys': self._extract_keys(key_node)}}
class AssociationsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('qos_associations')
elem = xmlutil.SubTemplateElement(root, 'associations',
selector='qos_associations')
make_associations(elem)
return xmlutil.MasterTemplate(root, 1)
def _check_specs(context, specs_id):
try:
qos_specs.get_qos_specs(context, specs_id)
except exception.QoSSpecsNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=six.text_type(ex))
class QoSSpecsController(wsgi.Controller):
"""The volume type extra specs API controller for the OpenStack API."""
_view_builder_class = view_qos_specs.ViewBuilder
@staticmethod
def _notify_qos_specs_error(context, method, payload):
rpc.get_notifier('QoSSpecs').error(context,
method,
payload)
@wsgi.serializers(xml=QoSSpecsTemplate)
def index(self, req):
"""Returns the list of qos_specs."""
context = req.environ['cinder.context']
authorize(context)
specs = qos_specs.get_all_specs(context)
return self._view_builder.summary_list(req, specs)
@wsgi.serializers(xml=QoSSpecsTemplate)
def create(self, req, body=None):
context = req.environ['cinder.context']
authorize(context)
self.assert_valid_body(body, 'qos_specs')
specs = body['qos_specs']
name = specs.get('name', None)
if name is None:
msg = _("Please specify a name for QoS specs.")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.validate_string_length(name, 'name', min_length=1,
max_length=255, remove_whitespaces=True)
name = name.strip()
try:
qos_specs.create(context, name, specs)
spec = qos_specs.get_qos_specs_by_name(context, name)
notifier_info = dict(name=name, specs=specs)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.create',
notifier_info)
except exception.InvalidQoSSpecs as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
except exception.QoSSpecsExists as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPConflict(explanation=six.text_type(err))
except exception.QoSSpecsCreateFailed as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return self._view_builder.detail(req, spec)
@wsgi.serializers(xml=QoSSpecsTemplate)
def update(self, req, id, body=None):
context = req.environ['cinder.context']
authorize(context)
self.assert_valid_body(body, 'qos_specs')
specs = body['qos_specs']
try:
qos_specs.update(context, id, specs)
notifier_info = dict(id=id, specs=specs)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.update',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.InvalidQoSSpecs as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
except exception.QoSSpecsUpdateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return body
@wsgi.serializers(xml=QoSSpecsTemplate)
def show(self, req, id):
"""Return a single qos spec item."""
context = req.environ['cinder.context']
authorize(context)
try:
spec = qos_specs.get_qos_specs(context, id)
except exception.QoSSpecsNotFound as err:
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
return self._view_builder.detail(req, spec)
def delete(self, req, id):
"""Deletes an existing qos specs."""
context = req.environ['cinder.context']
authorize(context)
force = req.params.get('force', None)
# Convert string to bool type in strict manner
force = strutils.bool_from_string(force)
LOG.debug("Delete qos_spec: %(id)s, force: %(force)s",
{'id': id, 'force': force})
try:
qos_specs.delete(context, id, force)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.delete',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsInUse as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
if force:
msg = _('Failed to disassociate qos specs.')
raise webob.exc.HTTPInternalServerError(explanation=msg)
msg = _('Qos specs still in use.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.deserializers(xml=QoSSpecsKeyDeserializer)
def delete_keys(self, req, id, body):
"""Deletes specified keys in qos specs."""
context = req.environ['cinder.context']
authorize(context)
if not (body and 'keys' in body
and isinstance(body.get('keys'), list)):
raise webob.exc.HTTPBadRequest()
keys = body['keys']
LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s",
{'id': id, 'keys': keys})
try:
qos_specs.delete_keys(context, id, keys)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete_keys',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete_keys',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsKeyNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete_keys',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
return webob.Response(status_int=202)
@wsgi.serializers(xml=AssociationsTemplate)
def associations(self, req, id):
"""List all associations of given qos specs."""
context = req.environ['cinder.context']
authorize(context)
LOG.debug("Get associations for qos_spec id: %s", id)
try:
associates = qos_specs.get_associations(context, id)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.associations',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associations',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.CinderException as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associations',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return self._view_builder.associations(req, associates)
def associate(self, req, id):
"""Associate a qos specs with a volume type."""
context = req.environ['cinder.context']
authorize(context)
type_id = req.params.get('vol_type_id', None)
if not type_id:
msg = _('Volume Type id must not be None.')
notifier_err = dict(id=id, error_message=msg)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s",
{'id': id, 'type_id': type_id})
try:
qos_specs.associate_qos_with_type(context, id, type_id)
notifier_info = dict(id=id, type_id=type_id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.associate',
notifier_info)
except exception.VolumeTypeNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.InvalidVolumeType as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
except exception.QoSSpecsAssociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return webob.Response(status_int=202)
def disassociate(self, req, id):
"""Disassociate a qos specs from a volume type."""
context = req.environ['cinder.context']
authorize(context)
type_id = req.params.get('vol_type_id', None)
if not type_id:
msg = _('Volume Type id must not be None.')
notifier_err = dict(id=id, error_message=msg)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s",
{'id': id, 'type_id': type_id})
try:
qos_specs.disassociate_qos_specs(context, id, type_id)
notifier_info = dict(id=id, type_id=type_id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.disassociate',
notifier_info)
except exception.VolumeTypeNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsDisassociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return webob.Response(status_int=202)
def disassociate_all(self, req, id):
"""Disassociate a qos specs from all volume types."""
context = req.environ['cinder.context']
authorize(context)
LOG.debug("Disassociate qos_spec: %s from all.", id)
try:
qos_specs.disassociate_all(context, id)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.disassociate_all',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate_all',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsDisassociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate_all',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return webob.Response(status_int=202)
class Qos_specs_manage(extensions.ExtensionDescriptor):
"""QoS specs support."""
name = "Qos_specs_manage"
alias = "qos-specs"
namespace = "http://docs.openstack.org/volume/ext/qos-specs/api/v1"
updated = "2013-08-02T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Qos_specs_manage.alias,
QoSSpecsController(),
member_actions={"associations": "GET",
"associate": "GET",
"disassociate": "GET",
"disassociate_all": "GET",
"delete_keys": "PUT"})
resources.append(res)
return resources
| apache-2.0 |
GoogleChrome/chromium-dashboard | internals/processes_test.py | 1 | 9385 | from __future__ import division
from __future__ import print_function
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import testing_config # Must be imported before the module under test.
import mock
from internals import approval_defs
from internals import models
from internals import processes
BakeApproval = approval_defs.ApprovalFieldDef(
'Approval for baking',
'The head chef must approve of you using the oven',
9, approval_defs.ONE_LGTM, ['chef@example.com'])
BAKE_APPROVAL_DEF_DICT = collections.OrderedDict([
('name', 'Approval for baking'),
('description', 'The head chef must approve of you using the oven'),
('field_id', 9),
('rule', approval_defs.ONE_LGTM),
('approvers', ['chef@example.com']),
])
class HelperFunctionsTest(testing_config.CustomTestCase):
def test_process_to_dict(self):
process = processes.Process(
'Baking',
'This is how you make bread',
'Make it before you are hungry',
[processes.ProcessStage(
'Make dough',
'Mix it and kneed',
['Cold dough'],
[('Share kneeding video', 'https://example.com')],
[],
0, 1),
processes.ProcessStage(
'Bake it',
'Heat at 375 for 40 minutes',
['A loaf', 'A dirty pan'],
[],
[BakeApproval],
1, 2),
])
expected = {
'name': 'Baking',
'description': 'This is how you make bread',
'applicability': 'Make it before you are hungry',
'stages': [
{'name': 'Make dough',
'description': 'Mix it and kneed',
'progress_items': ['Cold dough'],
'actions': [('Share kneeding video', 'https://example.com')],
'approvals': [],
'incoming_stage': 0,
'outgoing_stage': 1},
{'name': 'Bake it',
'description': 'Heat at 375 for 40 minutes',
'progress_items': ['A loaf', 'A dirty pan'],
'actions': [],
'approvals': [BAKE_APPROVAL_DEF_DICT],
'incoming_stage': 1,
'outgoing_stage': 2},
]
}
actual = processes.process_to_dict(process)
self.assertEqual(expected['stages'][1]['approvals'],
actual['stages'][1]['approvals'])
self.assertEqual(expected, actual)
def test_review_is_done(self):
"""A review step is done if the review has completed or was N/a."""
self.assertFalse(processes.review_is_done(None))
self.assertFalse(processes.review_is_done(0))
self.assertFalse(processes.review_is_done(models.REVIEW_PENDING))
self.assertFalse(processes.review_is_done(models.REVIEW_ISSUES_OPEN))
self.assertTrue(processes.review_is_done(models.REVIEW_ISSUES_ADDRESSED))
self.assertTrue(processes.review_is_done(models.REVIEW_NA))
class ProgressDetectorsTest(testing_config.CustomTestCase):
def setUp(self):
self.feature_1 = models.Feature(
name='feature one', summary='sum', category=1, visibility=1,
standardization=1, web_dev_views=models.DEV_NO_SIGNALS,
impl_status_chrome=1,
intent_stage=models.INTENT_IMPLEMENT)
self.feature_1.put()
def tearDown(self):
self.feature_1.key.delete()
def test_initial_public_proposal_url(self):
detector = processes.PROGRESS_DETECTORS['Initial public proposal']
self.assertFalse(detector(self.feature_1))
self.feature_1.initial_public_proposal_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_explainer(self):
detector = processes.PROGRESS_DETECTORS['Explainer']
self.assertFalse(detector(self.feature_1))
self.feature_1.explainer_links = ['http://example.com']
self.assertTrue(detector(self.feature_1))
def test_security_review_completed(self):
detector = processes.PROGRESS_DETECTORS['Security review issues addressed']
self.assertFalse(detector(self.feature_1))
self.feature_1.security_review_status = models.REVIEW_ISSUES_ADDRESSED
self.assertTrue(detector(self.feature_1))
def test_privacy_review_completed(self):
detector = processes.PROGRESS_DETECTORS['Privacy review issues addressed']
self.assertFalse(detector(self.feature_1))
self.feature_1.privacy_review_status = models.REVIEW_ISSUES_ADDRESSED
self.assertTrue(detector(self.feature_1))
def test_intent_to_prototype_email(self):
detector = processes.PROGRESS_DETECTORS['Intent to Prototype email']
self.assertFalse(detector(self.feature_1))
self.feature_1.intent_to_implement_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_intent_to_ship_email(self):
detector = processes.PROGRESS_DETECTORS['Intent to Ship email']
self.assertFalse(detector(self.feature_1))
self.feature_1.intent_to_ship_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_ready_for_trial_email(self):
detector = processes.PROGRESS_DETECTORS['Ready for Trial email']
self.assertFalse(detector(self.feature_1))
self.feature_1.ready_for_trial_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_intent_to_experiment_email(self):
detector = processes.PROGRESS_DETECTORS['Intent to Experiment email']
self.assertFalse(detector(self.feature_1))
self.feature_1.intent_to_experiment_url = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_one_i2e_lgtm(self):
detector = processes.PROGRESS_DETECTORS['One LGTM on Intent to Experiment']
self.assertFalse(detector(self.feature_1))
self.feature_1.i2e_lgtms = ['api_owner@chromium.org']
self.assertTrue(detector(self.feature_1))
def test_one_i2e_lgtm(self):
detector = processes.PROGRESS_DETECTORS[
'One LGTM on Request for Deprecation Trial']
self.assertFalse(detector(self.feature_1))
self.feature_1.i2e_lgtms = ['api_owner@chromium.org']
self.assertTrue(detector(self.feature_1))
def test_three_i2s_lgtm(self):
detector = processes.PROGRESS_DETECTORS['Three LGTMs on Intent to Ship']
self.assertFalse(detector(self.feature_1))
self.feature_1.i2s_lgtms = [
'one@chromium.org',
'two@chromium.org',
'three@chromium.org']
self.assertTrue(detector(self.feature_1))
def test_samples(self):
detector = processes.PROGRESS_DETECTORS['Samples']
self.assertFalse(detector(self.feature_1))
self.feature_1.sample_links = ['http://example.com']
self.assertTrue(detector(self.feature_1))
def test_doc_links(self):
detector = processes.PROGRESS_DETECTORS['Doc links']
self.assertFalse(detector(self.feature_1))
self.feature_1.doc_links = ['http://example.com']
self.assertTrue(detector(self.feature_1))
def test_tag_review_requested(self):
detector = processes.PROGRESS_DETECTORS['TAG review requested']
self.assertFalse(detector(self.feature_1))
self.feature_1.tag_review = 'http://example.com'
self.assertTrue(detector(self.feature_1))
def test_tag_review_completed(self):
detector = processes.PROGRESS_DETECTORS['TAG review issues addressed']
self.assertFalse(detector(self.feature_1))
self.feature_1.tag_review_status = models.REVIEW_ISSUES_ADDRESSED
self.assertTrue(detector(self.feature_1))
def test_web_dav_signals(self):
detector = processes.PROGRESS_DETECTORS['Web developer signals']
self.assertFalse(detector(self.feature_1))
self.feature_1.web_dev_views = models.PUBLIC_SUPPORT
self.assertTrue(detector(self.feature_1))
def test_vendor_signals(self):
detector = processes.PROGRESS_DETECTORS['Vendor signals']
self.assertFalse(detector(self.feature_1))
self.feature_1.ff_views = models.PUBLIC_SUPPORT
self.assertTrue(detector(self.feature_1))
def test_estimated_target_milestone(self):
detector = processes.PROGRESS_DETECTORS['Estimated target milestone']
self.assertFalse(detector(self.feature_1))
self.feature_1.shipped_milestone = 99
self.assertTrue(detector(self.feature_1))
def test_code_in_chromium(self):
detector = processes.PROGRESS_DETECTORS['Code in Chromium']
self.assertFalse(detector(self.feature_1))
self.feature_1.impl_status_chrome = models.ENABLED_BY_DEFAULT
self.assertTrue(detector(self.feature_1))
def test_motivation(self):
detector = processes.PROGRESS_DETECTORS['Motivation']
self.assertFalse(detector(self.feature_1))
self.feature_1.motivation = 'test motivation'
self.assertTrue(detector(self.feature_1))
def test_code_removed(self):
detector = processes.PROGRESS_DETECTORS['Code removed']
self.assertFalse(detector(self.feature_1))
self.feature_1.impl_status_chrome = models.REMOVED
self.assertTrue(detector(self.feature_1)) | apache-2.0 |
fusionbox/mezzanine | mezzanine/blog/migrations/south/0005_auto__del_comment__add_field_blogpost_comments_count__chg_field_blogpo.py | 8 | 10953 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Comment'
db.delete_table('blog_comment')
# Adding field 'BlogPost.comments_count'
db.add_column('blog_blogpost', 'comments_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Changing field 'BlogPost.description'
db.alter_column('blog_blogpost', 'description', self.gf('django.db.models.fields.TextField')(blank=True))
def backwards(self, orm):
# Adding model 'Comment'
db.create_table('blog_comment', (
('blog_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['blog.BlogPost'])),
('body', self.gf('django.db.models.fields.TextField')()),
('by_author', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('email_hash', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('time_created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('approved', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('replied_to', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', null=True, to=orm['blog.Comment'], blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('blog', ['Comment'])
# Deleting field 'BlogPost.comments_count'
db.delete_column('blog_blogpost', 'comments_count')
# Changing field 'BlogPost.description'
db.alter_column('blog_blogpost', 'description', self.gf('mezzanine.core.fields.HtmlField')(blank=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.blogcategory': {
'Meta': {'object_name': 'BlogCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'blog.blogpost': {
'Meta': {'object_name': 'BlogPost'},
'_keywords': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'blogposts'", 'blank': 'True', 'to': "orm['blog.BlogCategory']"}),
#'comments': ('mezzanine.generic.fields.CommentsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.ThreadedComment']"}),
'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Keyword']", 'symmetrical': 'False', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': "orm['%s']" % user_orm_label})
},
'comments.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['%s']" % user_orm_label}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.threadedcomment': {
'Meta': {'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blog']
| bsd-2-clause |
anryko/ansible | lib/ansible/modules/cloud/amazon/aws_waf_condition.py | 9 | 29857 | #!/usr/bin/python
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_condition
short_description: Create and delete WAF Conditions
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/)
version_added: "2.5"
author:
- Will Thames (@willthames)
- Mike Mochan (@mmochan)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall condition to manage.
required: true
type: str
type:
description: The type of matching to perform.
choices:
- byte
- geo
- ip
- regex
- size
- sql
- xss
type: str
required: true
filters:
description:
- A list of the filters against which to match.
- For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string).
- For I(type=geo), the only valid key is I(country).
- For I(type=ip), the only valid key is I(ip_address).
- For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern).
- For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size).
- For I(type=sql), valid keys are I(field_to_match) and I(transformation).
- For I(type=xss), valid keys are I(field_to_match) and I(transformation).
- Required when I(state=present).
type: list
elements: dict
suboptions:
field_to_match:
description:
- The field upon which to perform the match.
- Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
type: str
choices: ['uri', 'query_string', 'header', 'method', 'body']
position:
description:
- Where in the field the match needs to occur.
- Only valid when I(type=byte).
type: str
choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word']
header:
description:
- Which specific header should be matched.
- Required when I(field_to_match=header).
- Valid when I(type=byte).
type: str
transformation:
description:
- A transform to apply on the field prior to performing the match.
- Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
type: str
choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode']
country:
description:
- Value of geo constraint (typically a two letter country code).
- The only valid key when I(type=geo).
type: str
ip_address:
description:
- An IP Address or CIDR to match.
- The only valid key when I(type=ip).
type: str
regex_pattern:
description:
- A dict describing the regular expressions used to perform the match.
- Only valid when I(type=regex).
type: dict
suboptions:
name:
description: A name to describe the set of patterns.
type: str
regex_strings:
description: A list of regular expressions to match.
type: list
elements: str
comparison:
description:
- What type of comparison to perform.
- Only valid key when I(type=size).
type: str
choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']
size:
description:
- The size of the field (in bytes).
- Only valid key when I(type=size).
type: int
target_string:
description:
- The string to search for.
- May be up to 50 bytes.
- Valid when I(type=byte).
type: str
purge_filters:
description:
- Whether to remove existing filters from a condition if not passed in I(filters).
default: false
type: bool
waf_regional:
description: Whether to use waf-regional module.
default: false
required: no
type: bool
version_added: 2.9
state:
description: Whether the condition should be C(present) or C(absent).
choices:
- present
- absent
default: present
type: str
'''
EXAMPLES = '''
- name: create WAF byte condition
aws_waf_condition:
name: my_byte_condition
filters:
- field_to_match: header
position: STARTS_WITH
target_string: Hello
header: Content-type
type: byte
- name: create WAF geo condition
aws_waf_condition:
name: my_geo_condition
filters:
- country: US
- country: AU
- country: AT
type: geo
- name: create IP address condition
aws_waf_condition:
name: "{{ resource_prefix }}_ip_condition"
filters:
- ip_address: "10.0.0.0/8"
- ip_address: "192.168.0.0/24"
type: ip
- name: create WAF regex condition
aws_waf_condition:
name: my_regex_condition
filters:
- field_to_match: query_string
regex_pattern:
name: greetings
regex_strings:
- '[hH]ello'
- '^Hi there'
- '.*Good Day to You'
type: regex
- name: create WAF size condition
aws_waf_condition:
name: my_size_condition
filters:
- field_to_match: query_string
size: 300
comparison: GT
type: size
- name: create WAF sql injection condition
aws_waf_condition:
name: my_sql_condition
filters:
- field_to_match: query_string
transformation: url_decode
type: sql
- name: create WAF xss condition
aws_waf_condition:
name: my_xss_condition
filters:
- field_to_match: query_string
transformation: url_decode
type: xss
'''
RETURN = '''
condition:
description: Condition returned by operation.
returned: always
type: complex
contains:
condition_id:
description: Type-agnostic ID for the condition.
returned: when state is present
type: str
sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
byte_match_set_id:
description: ID for byte match set.
returned: always
type: str
sample: c4882c96-837b-44a2-a762-4ea87dbf812b
byte_match_tuples:
description: List of byte match tuples.
returned: always
type: complex
contains:
field_to_match:
description: Field to match.
returned: always
type: complex
contains:
data:
description: Which specific header (if type is header).
type: str
sample: content-type
type:
description: Type of field
type: str
sample: HEADER
positional_constraint:
description: Position in the field to match.
type: str
sample: STARTS_WITH
target_string:
description: String to look for.
type: str
sample: Hello
text_transformation:
description: Transformation to apply to the field before matching.
type: str
sample: NONE
geo_match_constraints:
description: List of geographical constraints.
returned: when type is geo and state is present
type: complex
contains:
type:
description: Type of geo constraint.
type: str
sample: Country
value:
description: Value of geo constraint (typically a country code).
type: str
sample: AT
geo_match_set_id:
description: ID of the geo match set.
returned: when type is geo and state is present
type: str
sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
ip_set_descriptors:
description: list of IP address filters
returned: when type is ip and state is present
type: complex
contains:
type:
description: Type of IP address (IPV4 or IPV6).
returned: always
type: str
sample: IPV4
value:
description: IP address.
returned: always
type: str
sample: 10.0.0.0/8
ip_set_id:
description: ID of condition.
returned: when type is ip and state is present
type: str
sample: 78ad334a-3535-4036-85e6-8e11e745217b
name:
description: Name of condition.
returned: when state is present
type: str
sample: my_waf_condition
regex_match_set_id:
description: ID of the regex match set.
returned: when type is regex and state is present
type: str
sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c
regex_match_tuples:
description: List of regex matches.
returned: when type is regex and state is present
type: complex
contains:
field_to_match:
description: Field on which the regex match is applied.
type: complex
contains:
type:
description: The field name.
returned: when type is regex and state is present
type: str
sample: QUERY_STRING
regex_pattern_set_id:
description: ID of the regex pattern.
type: str
sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e
text_transformation:
description: transformation applied to the text before matching
type: str
sample: NONE
size_constraint_set_id:
description: ID of the size constraint set.
returned: when type is size and state is present
type: str
sample: de84b4b3-578b-447e-a9a0-0db35c995656
size_constraints:
description: List of size constraints to apply.
returned: when type is size and state is present
type: complex
contains:
comparison_operator:
description: Comparison operator to apply.
type: str
sample: GT
field_to_match:
description: Field on which the size constraint is applied.
type: complex
contains:
type:
description: Field name.
type: str
sample: QUERY_STRING
size:
description: Size to compare against the field.
type: int
sample: 300
text_transformation:
description: Transformation applied to the text before matching.
type: str
sample: NONE
sql_injection_match_set_id:
description: ID of the SQL injection match set.
returned: when type is sql and state is present
type: str
sample: de84b4b3-578b-447e-a9a0-0db35c995656
sql_injection_match_tuples:
description: List of SQL injection match sets.
returned: when type is sql and state is present
type: complex
contains:
field_to_match:
description: Field on which the SQL injection match is applied.
type: complex
contains:
type:
description: Field name.
type: str
sample: QUERY_STRING
text_transformation:
description: Transformation applied to the text before matching.
type: str
sample: URL_DECODE
xss_match_set_id:
description: ID of the XSS match set.
returned: when type is xss and state is present
type: str
sample: de84b4b3-578b-447e-a9a0-0db35c995656
xss_match_tuples:
description: List of XSS match sets.
returned: when type is xss and state is present
type: complex
contains:
field_to_match:
description: Field on which the XSS match is applied.
type: complex
contains:
type:
description: Field name
type: str
sample: QUERY_STRING
text_transformation:
description: transformation applied to the text before matching.
type: str
sample: URL_DECODE
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies
from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP
from ansible.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff
class Condition(object):
def __init__(self, client, module):
self.client = client
self.module = module
self.type = module.params['type']
self.method_suffix = MATCH_LOOKUP[self.type]['method']
self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
self.conditiontype = MATCH_LOOKUP[self.type]['type']
def format_for_update(self, condition_set_id):
# Prep kwargs
kwargs = dict()
kwargs['Updates'] = list()
for filtr in self.module.params.get('filters'):
# Only for ip_set
if self.type == 'ip':
# there might be a better way of detecting an IPv6 address
if ':' in filtr.get('ip_address'):
ip_type = 'IPV6'
else:
ip_type = 'IPV4'
condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
# Specific for geo_match_set
if self.type == 'geo':
condition_insert = dict(Type='Country', Value=filtr.get('country'))
# Common For everything but ip_set and geo_match_set
if self.type not in ('ip', 'geo'):
condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
TextTransformation=filtr.get('transformation', 'none').upper())
if filtr.get('field_to_match').upper() == "HEADER":
if filtr.get('header'):
condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
else:
self.module.fail_json(msg=str("DATA required when HEADER requested"))
# Specific for byte_match_set
if self.type == 'byte':
condition_insert['TargetString'] = filtr.get('target_string')
condition_insert['PositionalConstraint'] = filtr.get('position')
# Specific for size_constraint_set
if self.type == 'size':
condition_insert['ComparisonOperator'] = filtr.get('comparison')
condition_insert['Size'] = filtr.get('size')
# Specific for regex_match_set
if self.type == 'regex':
condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
kwargs[self.conditionsetid] = condition_set_id
return kwargs
def format_for_deletion(self, condition):
return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
for current_condition_tuple in condition[self.conditiontuples]],
self.conditionsetid: condition[self.conditionsetid]}
@AWSRetry.exponential_backoff()
def list_regex_patterns_with_backoff(self, **params):
return self.client.list_regex_pattern_sets(**params)
@AWSRetry.exponential_backoff()
def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id):
return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)
def list_regex_patterns(self):
# at time of writing(2017-11-20) no regex pattern paginator exists
regex_patterns = []
params = {}
while True:
try:
response = self.list_regex_patterns_with_backoff(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list regex patterns')
regex_patterns.extend(response['RegexPatternSets'])
if 'NextMarker' in response:
params['NextMarker'] = response['NextMarker']
else:
break
return regex_patterns
def get_regex_pattern_by_name(self, name):
existing_regex_patterns = self.list_regex_patterns()
regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
if name in regex_lookup:
return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
else:
return None
def ensure_regex_pattern_present(self, regex_pattern):
name = regex_pattern['name']
pattern_set = self.get_regex_pattern_by_name(name)
if not pattern_set:
pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
self.client.create_regex_pattern_set)['RegexPatternSet']
missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
if not missing and not extra:
return pattern_set
updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
self.client.update_regex_pattern_set, wait=True)
return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
def delete_unused_regex_pattern(self, regex_pattern_set_id):
try:
regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
updates = list()
for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
self.client.update_regex_pattern_set)
run_func_with_change_token_backoff(self.client, self.module,
{'RegexPatternSetId': regex_pattern_set_id},
self.client.delete_regex_pattern_set, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if e.response['Error']['Code'] == 'WAFNonexistentItemException':
return
self.module.fail_json_aws(e, msg='Could not delete regex pattern')
def get_condition_by_name(self, name):
all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
if all_conditions:
return all_conditions[0][self.conditionsetid]
@AWSRetry.exponential_backoff()
def get_condition_by_id_with_backoff(self, condition_set_id):
params = dict()
params[self.conditionsetid] = condition_set_id
func = getattr(self.client, 'get_' + self.method_suffix)
return func(**params)[self.conditionset]
def get_condition_by_id(self, condition_set_id):
try:
return self.get_condition_by_id_with_backoff(condition_set_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not get condition')
def list_conditions(self):
method = 'list_' + self.method_suffix + 's'
try:
paginator = self.client.get_paginator(method)
func = paginator.paginate().build_full_result
except botocore.exceptions.OperationNotPageableError:
# list_geo_match_sets and list_regex_match_sets do not have a paginator
func = getattr(self.client, method)
try:
return func()[self.conditionsets]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
def tidy_up_regex_patterns(self, regex_match_set):
all_regex_match_sets = self.list_conditions()
all_match_set_patterns = list()
for rms in all_regex_match_sets:
all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
for filtr in regex_match_set[self.conditiontuples]:
if filtr['RegexPatternSetId'] not in all_match_set_patterns:
self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
def find_condition_in_rules(self, condition_set_id):
rules_in_use = []
try:
if self.client.__class__.__name__ == 'WAF':
all_rules = list_rules_with_backoff(self.client)
elif self.client.__class__.__name__ == 'WAFRegional':
all_rules = list_regional_rules_with_backoff(self.client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not list rules')
for rule in all_rules:
try:
rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not get rule details')
if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
rules_in_use.append(rule_details['Name'])
return rules_in_use
def find_and_delete_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
in_use_rules = self.find_condition_in_rules(condition_set_id)
if in_use_rules:
rulenames = ', '.join(in_use_rules)
self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
if current_condition[self.conditiontuples]:
# Filters are deleted using update with the DELETE action
func = getattr(self.client, 'update_' + self.method_suffix)
params = self.format_for_deletion(current_condition)
try:
# We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not delete filters from condition')
func = getattr(self.client, 'delete_' + self.method_suffix)
params = dict()
params[self.conditionsetid] = condition_set_id
try:
run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not delete condition')
# tidy up regex patterns
if self.type == 'regex':
self.tidy_up_regex_patterns(current_condition)
return True, {}
def find_missing(self, update, current_condition):
missing = []
for desired in update['Updates']:
found = False
desired_condition = desired[self.conditiontuple]
current_conditions = current_condition[self.conditiontuples]
for condition in current_conditions:
if not compare_policies(condition, desired_condition):
found = True
if not found:
missing.append(desired)
return missing
def find_and_update_condition(self, condition_set_id):
current_condition = self.get_condition_by_id(condition_set_id)
update = self.format_for_update(condition_set_id)
missing = self.find_missing(update, current_condition)
if self.module.params.get('purge_filters'):
extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
for current_tuple in current_condition[self.conditiontuples]
if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
else:
extra = []
changed = bool(missing or extra)
if changed:
update['Updates'] = missing + extra
func = getattr(self.client, 'update_' + self.method_suffix)
try:
result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not update condition')
return changed, self.get_condition_by_id(condition_set_id)
def ensure_condition_present(self):
name = self.module.params['name']
condition_set_id = self.get_condition_by_name(name)
if condition_set_id:
return self.find_and_update_condition(condition_set_id)
else:
params = dict()
params['Name'] = name
func = getattr(self.client, 'create_' + self.method_suffix)
try:
condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg='Could not create condition')
return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
def ensure_condition_absent(self):
condition_set_id = self.get_condition_by_name(self.module.params['name'])
if condition_set_id:
return self.find_and_delete_condition(condition_set_id)
return False, {}
def main():
filters_subspec = dict(
country=dict(),
field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
header=dict(),
transformation=dict(choices=['none', 'compress_white_space',
'html_entity_decode', 'lowercase',
'cmd_line', 'url_decode']),
position=dict(choices=['exactly', 'starts_with', 'ends_with',
'contains', 'contains_word']),
comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
target_string=dict(), # Bytes
size=dict(type='int'),
ip_address=dict(),
regex_pattern=dict(),
)
argument_spec = dict(
name=dict(required=True),
type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
filters=dict(type='list'),
purge_filters=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['filters']]])
state = module.params.get('state')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
condition = Condition(client, module)
if state == 'present':
(changed, results) = condition.ensure_condition_present()
# return a condition agnostic ID for use by aws_waf_rule
results['ConditionId'] = results[condition.conditionsetid]
else:
(changed, results) = condition.ensure_condition_absent()
module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
| gpl-3.0 |
vivianli32/TravelConnect | flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
mojofunk/ardour | tools/bug_tool/ClientCookie/__init__.py | 11 | 1394 | # Import names so that they can be imported directly from the package, like
# this:
#from ClientCookie import <whatever>
try: True
except NameError:
True = 1
False = 0
import sys
# don't edit these here: do eg.
# import ClientCookie; ClientCookie.HTTP_DEBUG = 1
DEBUG_STREAM = sys.stderr
CLIENTCOOKIE_DEBUG = False
REDIRECT_DEBUG = False
HTTP_DEBUG = False
from _ClientCookie import VERSION, __doc__, \
CookieJar, Cookie, \
CookiePolicy, DefaultCookiePolicy, \
lwp_cookie_str
from _MozillaCookieJar import MozillaCookieJar
from _MSIECookieJar import MSIECookieJar
try:
from urllib2 import AbstractHTTPHandler
except ImportError:
pass
else:
from ClientCookie._urllib2_support import \
HTTPHandler, build_opener, install_opener, urlopen, \
HTTPRedirectHandler
from ClientCookie._urllib2_support import \
OpenerDirector, BaseProcessor, \
HTTPRequestUpgradeProcessor, \
HTTPEquivProcessor, SeekableProcessor, HTTPCookieProcessor, \
HTTPRefererProcessor, HTTPStandardHeadersProcessor, \
HTTPRefreshProcessor, HTTPErrorProcessor, \
HTTPResponseDebugProcessor
import httplib
if hasattr(httplib, 'HTTPS'):
from ClientCookie._urllib2_support import HTTPSHandler
del AbstractHTTPHandler, httplib
from _Util import http2time
str2time = http2time
del http2time
del sys
| gpl-2.0 |
Kilhog/odoo | addons/document/__openerp__.py | 260 | 2096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Management System',
'version': '2.1',
'category': 'Knowledge Management',
'description': """
This is a complete document management system.
==============================================
* User Authentication
* Document Indexation:- .pptx and .docx files are not supported in Windows platform.
* Dashboard for Document that includes:
* New Files (list)
* Files by Resource Type (graph)
* Files by Partner (graph)
* Files Size by Month (graph)
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['knowledge', 'mail'],
'data': [
'security/document_security.xml',
'document_view.xml',
'document_data.xml',
'wizard/document_configuration_view.xml',
'security/ir.model.access.csv',
'report/document_report_view.xml',
'views/document.xml',
],
'demo': [ 'document_demo.xml' ],
'test': ['test/document_test2.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
roopeshsivam/certify | certificates/CreateCertView.py | 1 | 5826 | from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import logout as django_logout
from django.shortcuts import redirect, render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.utils.decorators import method_decorator
from django.views.generic.edit import FormView
from django.forms import BaseModelFormSet
from django.views.generic.edit import CreateView
from django.views.decorators.http import condition
from django.views.generic.edit import FormMixin
from django.views.generic.edit import UpdateView
from .ContextData import *
@method_decorator(login_required(login_url="/in/login/"), name='dispatch')
class CreateCertificateView(CreateView):
def get_form_class(self, **kwargs):
"""
Returns an instance of the form to be used in this view.
kwarg from database
"""
return ContextData[self.kwargs['cert_id']]['FormName']
def get_template_names(self, **kwargs):
ShipID = self.request.GET.get('shipid')
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
# if str(ModelObject.objects.filter(CertState='c', ShipMainData__pk=ShipID)) == '<QuerySet []>':
# return 'pages/create-'+ContextData[self.kwargs['cert_id']]['TemplateName']
# else:
# return 'pages/active-certificate-error.html'
return 'pages/certificate-base-form.html'
def get_form(self, form_class=None):
form = super(CreateCertificateView, self).get_form()
return form
def form_valid(self, form, **kwargs):
ShipID = self.request.GET.get('shipid')
form.instance.DocAuthor = self.request.user
form.instance.ShipMainData = ShipMainData.objects.get(id=ShipID)
form.instance.CertState = 'd'
return super(CreateCertificateView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(CreateCertificateView, self).get_context_data(**kwargs)
ShipID = self.request.GET.get('shipid')
context['CertName'] = ContextData[self.kwargs['cert_id']]['CertName']
context['TemplateName'] = 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
context['State'] = "Create New Certificate"
context['ButtonState'] = "Add"
context['ShipName'] = ShipMainData.objects.get(id=ShipID)
return context
@method_decorator(login_required(login_url="/in/login/"), name='dispatch')
class UpdateCertificateView(UpdateView):
queryset = None
def get_form_class(self, **kwargs):
"""
Returns the form class to use in this view
"""
return ContextData[self.kwargs['cert_id']]['FormName']
def get_queryset(self, **kwargs):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
return ModelObject.objects.all()
def get_template_names(self, **kwargs):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
ModelObject = ModelObject.objects.get(pk=self.kwargs['pk'])
if ModelObject.CertState=='d':
return 'pages/certificate-base-form.html'
# return 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
else:
return 'pages/form-error-update.html'
def form_valid(self, form):
form = self.get_form()
form.save()
return super(UpdateCertificateView, self).form_valid(form)
def get_success_url(self):
return "../"
def post(self, request, *args, **kwargs):
request.POST = (request.POST.copy())
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
Certificate = ModelObject.objects.get(pk=self.kwargs['pk'])
CertFilter = ModelObject.objects.filter(ShipMainData_id=Certificate.ShipMainData.id)
State = 'c'
for Certificates in CertFilter: #Check simultaneous confirmation of multiple certificates
if Certificates.CertState == "c":
State = 'd'
if 'save' in request.POST: #Check before editing or saving confirmed certificates
form = self.get_form()
if Certificate.CertState != "c":
return super(UpdateCertificateView, self).post(request, *args, **kwargs)
else:
return HttpResponseRedirect('../') # change to redirect
if 'confirm' in request.POST:
ModelObject.objects.filter(pk=self.kwargs['pk']).update(CertState=State)
return HttpResponseRedirect('../') # change to redirect
if 'deactivate' in request.POST:
ModelObject.objects.filter(pk=self.kwargs['pk']).update(CertState='x')
return HttpResponseRedirect('../') #change to redirect
def get_context_data(self, **kwargs):
context = super(UpdateCertificateView, self).get_context_data(**kwargs)
CertData = ContextData[self.kwargs['cert_id']]['ModelName']
Certificate= CertData.objects.get(pk=self.kwargs['pk'])
context['CertName'] = ContextData[self.kwargs['cert_id']]['CertName']
context['TemplateName'] = 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
context['State'] = "Edit Certificate"
context['ButtonState'] = "Update"
context['ShipName'] = Certificate.ShipMainData
return context | gpl-3.0 |
rajeefmk/Barcamp-Bangalore-Android-App | gcm_flask/flask/config.py | 50 | 6150 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import imp
import os
import errno
from werkzeug.utils import import_string
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, basestring):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| apache-2.0 |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/nltk/app/wordnet_app.py | 5 | 34430 | # Natural Language Toolkit: WordNet Browser Application
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
# Paul Bone <pbone@students.csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A WordNet Browser application which launches the default browser
(if it is not already running) and opens a new tab with a connection
to http://localhost:port/ . It also starts an HTTP server on the
specified port and begins serving browser requests. The default
port is 8000. (For command-line help, run "python wordnet -h")
This application requires that the user's web browser supports
Javascript.
BrowServer is a server for browsing the NLTK Wordnet database It first
launches a browser client to be used for browsing and then starts
serving the requests of that and maybe other clients
Usage::
browserver.py -h
browserver.py [-s] [-p <port>]
Options::
-h or --help
Display this help message.
-l <file> or --log-file <file>
Logs messages to the given file, If this option is not specified
messages are silently dropped.
-p <port> or --port <port>
Run the web server on this TCP port, defaults to 8000.
-s or --server-mode
Do not start a web browser, and do not allow a user to
shotdown the server through the web interface.
"""
# TODO: throughout this package variable names and docstrings need
# modifying to be compliant with NLTK's coding standards. Tests also
# need to be develop to ensure this continues to work in the face of
# changes to other NLTK packages.
from __future__ import print_function
# Allow this program to run inside the NLTK source tree.
from sys import path
import os
import sys
from sys import argv
from collections import defaultdict
import webbrowser
import datetime
import re
import threading
import time
import getopt
import base64
import pickle
import copy
from nltk import compat
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Synset, Lemma
if compat.PY3:
from http.server import HTTPServer, BaseHTTPRequestHandler
else:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# now included in local file
# from util import html_header, html_trailer, \
# get_static_index_page, get_static_page_by_path, \
# page_from_word, page_from_href
firstClient = True
# True if we're not also running a web browser. The value f server_mode
# gets set by demo().
server_mode = None
# If set this is a file object for writting log messages.
logfile = None
class MyServerHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_head()
def do_GET(self):
global firstClient
sp = self.path[1:]
if compat.unquote_plus(sp) == 'SHUTDOWN THE SERVER':
if server_mode:
page = "Server must be killed with SIGTERM."
type = "text/plain"
else:
print('Server shutting down!')
os._exit(0)
elif sp == '': # First request.
type = 'text/html'
if not server_mode and firstClient:
firstClient = False
page = get_static_index_page(True)
else:
page = get_static_index_page(False)
word = 'green'
elif sp.endswith('.html'): # Trying to fetch a HTML file TODO:
type = 'text/html'
usp = compat.unquote_plus(sp)
if usp == 'NLTK Wordnet Browser Database Info.html':
word = '* Database Info *'
if os.path.isfile(usp):
with open(usp, 'r') as infile:
page = infile.read()
else:
page = (html_header % word) + \
'<p>The database info file:'\
'<p><b>' + usp + '</b>' + \
'<p>was not found. Run this:' + \
'<p><b>python dbinfo_html.py</b>' + \
'<p>to produce it.' + html_trailer
else:
# Handle files here.
word = sp
page = get_static_page_by_path(usp)
elif sp.startswith("search"):
# This doesn't seem to work with MWEs.
type = 'text/html'
parts = (sp.split("?")[1]).split("&")
word = [p.split("=")[1].replace("+", " ")
for p in parts if p.startswith("nextWord")][0]
page, word = page_from_word(word)
elif sp.startswith("lookup_"):
# TODO add a variation of this that takes a non ecoded word or MWE.
type = 'text/html'
sp = sp[len("lookup_"):]
page, word = page_from_href(sp)
elif sp == "start_page":
# if this is the first request we should display help
# information, and possibly set a default word.
type = 'text/html'
page, word = page_from_word("wordnet")
else:
type = 'text/plain'
page = "Could not parse request: '%s'" % sp
# Send result.
self.send_head(type)
self.wfile.write(page.encode('utf8'))
def send_head(self, type=None):
self.send_response(200)
self.send_header('Content-type', type)
self.end_headers()
def log_message(self, format, *args):
global logfile
if logfile:
logfile.write(
"%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind('%23')
if pos != -1:
return int(sp[(pos + 3):])
else:
return None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except IOError as e:
sys.stderr.write("Couldn't open %s for writing: %s",
logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = 'http://localhost:' + str(port)
server_ready = None
browser_thread = None
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(('', port), MyServerHandler)
if logfile:
logfile.write(
'NLTK Wordnet browser server running serving: %s\n' % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
if logfile:
logfile.close()
def startBrowser(url, server_ready):
def run():
server_ready.wait()
time.sleep(1) # Wait a little bit more, there's still the chance of
# a race condition.
webbrowser.open(url, new = 2, autoraise = 1)
t = threading.Thread(target=run)
t.start()
return t
#####################################################################
# Utilities
#####################################################################
"""
WordNet Browser Utilities.
This provides a backend to both wxbrowse and browserver.py.
"""
################################################################################
#
# Main logic for wordnet browser.
#
# This is wrapped inside a function since wn is only available if the
# WordNet corpus is installed.
def _pos_tuples():
return [
(wn.NOUN,'N','noun'),
(wn.VERB,'V','verb'),
(wn.ADJ,'J','adj'),
(wn.ADV,'R','adv')]
def _pos_match(pos_tuple):
"""
This function returns the complete pos tuple for the partial pos
tuple given to it. It attempts to match it against the first
non-null component of the given pos tuple.
"""
if pos_tuple[0] == 's':
pos_tuple = ('a', pos_tuple[1], pos_tuple[2])
for n,x in enumerate(pos_tuple):
if x is not None:
break
for pt in _pos_tuples():
if pt[n] == pos_tuple[n]: return pt
return None
HYPONYM = 0
HYPERNYM = 1
CLASS_REGIONAL = 2
PART_HOLONYM = 3
PART_MERONYM = 4
ATTRIBUTE = 5
SUBSTANCE_HOLONYM = 6
SUBSTANCE_MERONYM = 7
MEMBER_HOLONYM = 8
MEMBER_MERONYM = 9
VERB_GROUP = 10
INSTANCE_HYPONYM = 12
INSTANCE_HYPERNYM = 13
CAUSE = 14
ALSO_SEE = 15
SIMILAR = 16
ENTAILMENT = 17
ANTONYM = 18
FRAMES = 19
PERTAINYM = 20
CLASS_CATEGORY = 21
CLASS_USAGE = 22
CLASS_REGIONAL = 23
CLASS_USAGE = 24
CLASS_CATEGORY = 11
DERIVATIONALLY_RELATED_FORM = 25
INDIRECT_HYPERNYMS = 26
def lemma_property(word, synset, func):
def flattern(l):
if l == []:
return []
else:
return l[0] + flattern(l[1:])
return flattern([func(l) for l in synset.lemmas if l.name == word])
def rebuild_tree(orig_tree):
node = orig_tree[0]
children = orig_tree[1:]
return (node, [rebuild_tree(t) for t in children])
def get_relations_data(word, synset):
"""
Get synset relations data for a synset. Note that this doesn't
yet support things such as full hyponym vs direct hyponym.
"""
if synset.pos() == wn.NOUN:
return ((HYPONYM, 'Hyponyms',
synset.hyponyms()),
(INSTANCE_HYPONYM , 'Instance hyponyms',
synset.instance_hyponyms()),
(HYPERNYM, 'Direct hypernyms',
synset.hypernyms()),
(INDIRECT_HYPERNYMS, 'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]),
# hypernyms', 'Sister terms',
(INSTANCE_HYPERNYM , 'Instance hypernyms',
synset.instance_hypernyms()),
# (CLASS_REGIONAL, ['domain term region'], ),
(PART_HOLONYM, 'Part holonyms',
synset.part_holonyms()),
(PART_MERONYM, 'Part meronyms',
synset.part_meronyms()),
(SUBSTANCE_HOLONYM, 'Substance holonyms',
synset.substance_holonyms()),
(SUBSTANCE_MERONYM, 'Substance meronyms',
synset.substance_meronyms()),
(MEMBER_HOLONYM, 'Member holonyms',
synset.member_holonyms()),
(MEMBER_MERONYM, 'Member meronyms',
synset.member_meronyms()),
(ATTRIBUTE, 'Attributes',
synset.attributes()),
(ANTONYM, "Antonyms",
lemma_property(word, synset, lambda l: l.antonyms())),
(DERIVATIONALLY_RELATED_FORM, "Derivationally related form",
lemma_property(word, synset, lambda l: l.derivationally_related_forms())))
elif synset.pos() == wn.VERB:
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),
(HYPONYM, 'Hyponym',
synset.hyponyms()),
(HYPERNYM, 'Direct hypernyms',
synset.hypernyms()),
(INDIRECT_HYPERNYMS, 'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]),
(ENTAILMENT, 'Entailments',
synset.entailments()),
(CAUSE, 'Causes',
synset.causes()),
(ALSO_SEE, 'Also see',
synset.also_sees()),
(VERB_GROUP, 'Verb Groups',
synset.verb_groups()),
(DERIVATIONALLY_RELATED_FORM, "Derivationally related form",
lemma_property(word, synset, lambda l: l.derivationally_related_forms())))
elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT:
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),
(SIMILAR, 'Similar to',
synset.similar_tos()),
# Participle of verb - not supported by corpus
(PERTAINYM, 'Pertainyms',
lemma_property(word, synset, lambda l: l.pertainyms())),
(ATTRIBUTE, 'Attributes',
synset.attributes()),
(ALSO_SEE, 'Also see',
synset.also_sees()))
elif synset.pos() == wn.ADV:
# This is weird. adverbs such as 'quick' and 'fast' don't seem
# to have antonyms returned by the corpus.a
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),)
# Derived from adjective - not supported by corpus
else:
raise TypeError("Unhandles synset POS type: " + str(synset.pos()))
html_header = '''
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
'''
html_trailer = '''
</body>
</html>
'''
explanation = '''
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.
</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
<hr width='100%'>
'''
# HTML oriented functions
def _bold(txt): return '<b>%s</b>' % txt
def _center(txt): return '<center>%s</center>' % txt
def _hlev(n,txt): return '<h%d>%s</h%d>' % (n,txt,n)
def _italic(txt): return '<i>%s</i>' % txt
def _li(txt): return '<li>%s</li>' % txt
def pg(word, body):
'''
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
'''
return (html_header % word) + body + html_trailer
def _ul(txt): return '<ul>' + txt + '</ul>'
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold('<br>'*10 + '*'*10 + ' ' + txt + ' ' + '*'*10))
full_hyponym_cont_text = \
_ul(_li(_italic('(has full hyponym continuation)'))) + '\n'
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrived via synset.name()
"""
return wn.synset(synset_key)
def _collect_one_synset(word, synset, synset_relations):
'''
Returns the HTML string for one synset or word
:param word: the current word
:type word: str
:param synset: a synset
:type synset: synset
:param synset_relations: information about which synset relations
to display.
:type synset_relations: dict(synset_key, set(relation_id))
:return: The HTML string built for this synset
:rtype: str
'''
if isinstance(synset, tuple): # It's a word
raise NotImplementedError("word not supported by _collect_one_synset")
typ = 'S'
pos_tuple = _pos_match((synset.pos(), None, None))
assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos()
descr = pos_tuple[2]
ref = copy.deepcopy(Reference(word, synset_relations))
ref.toggle_synset(synset)
synset_label = typ + ";"
if synset.name() in synset_relations:
synset_label = _bold(synset_label)
s = '<li>%s (%s) ' % (make_lookup_link(ref, synset_label), descr)
def format_lemma(w):
w = w.replace('_', ' ')
if w.lower() == word:
return _bold(w)
else:
ref = Reference(w)
return make_lookup_link(ref, w)
s += ', '.join(format_lemma(l.name()) for l in synset.lemmas())
gl = " (%s) <i>%s</i> " % \
(synset.definition(),
"; ".join("\"%s\"" % e for e in synset.examples()))
return s + gl + _synset_relations(word, synset, synset_relations) + '</li>\n'
def _collect_all_synsets(word, pos, synset_relations=dict()):
"""
Return a HTML unordered list of synsets for the given word and
part of speech.
"""
return '<ul>%s\n</ul>\n' % \
''.join((_collect_one_synset(word, synset, synset_relations)
for synset
in wn.synsets(word, pos)))
def _synset_relations(word, synset, synset_relations):
'''
Builds the HTML string for the relations of a synset
:param word: The current word
:type word: str
:param synset: The synset for which we're building the relations.
:type synset: Synset
:param synset_relations: synset keys and relation types for which to display relations.
:type synset_relations: dict(synset_key, set(relation_type))
:return: The HTML for a synset's relations
:rtype: str
'''
if not synset.name() in synset_relations:
return ""
ref = Reference(word, synset_relations)
def relation_html(r):
if isinstance(r, Synset):
return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0])
elif isinstance(r, Lemma):
return relation_html(r.synset())
elif isinstance(r, tuple):
# It's probably a tuple containing a Synset and a list of
# similar tuples. This forms a tree of synsets.
return "%s\n<ul>%s</ul>\n" % \
(relation_html(r[0]),
''.join('<li>%s</li>\n' % relation_html(sr) for sr in r[1]))
else:
raise TypeError("r must be a synset, lemma or list, it was: type(r) = %s, r = %s" % (type(r), r))
def make_synset_html(db_name, disp_name, rels):
synset_html = '<i>%s</i>\n' % \
make_lookup_link(
copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(),
disp_name)
if db_name in ref.synset_relations[synset.name()]:
synset_html += '<ul>%s</ul>\n' % \
''.join("<li>%s</li>\n" % relation_html(r) for r in rels)
return synset_html
html = '<ul>' + \
'\n'.join(("<li>%s</li>" % make_synset_html(*rel_data) for rel_data
in get_relations_data(word, synset)
if rel_data[2] != [])) + \
'</ul>'
return html
class Reference(object):
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
@staticmethod
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = pickle.loads(string)
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def make_lookup_link(ref, label):
return '<a href="lookup_%s">%s</a>' % (ref.encode(), label)
def page_from_word(word):
"""
Return a HTML page for the given word.
:param word: The currently active word
:type word: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word))
def page_from_href(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
return page_from_reference(Reference.decode(href))
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_')
for w in words]
if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos,pos_str,name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
else:
return "Internal error: Path for static page '%s' is unknown" % path
def get_static_web_help_page():
"""
Return the static web help page.
"""
return \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2017 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="http://wordnet.princeton.edu/"><b> http://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="http://nltk.sourceforge.net/"><b>http://nltk.sourceforge.net/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return \
"""
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2017 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2017 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = "<a href=\"SHUTDOWN THE SERVER\">Shutdown</a>"
else:
shutdown_link = ""
return template % shutdown_link
def usage():
"""
Display the command line help message.
"""
print(__doc__)
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(argv[1:], "l:p:sh",
["logfile=", "port=", "server-mode", "help"])
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename)
if __name__ == '__main__':
app()
__all__ = ['app']
| mit |
yongshengwang/hue | desktop/core/ext-py/markdown/regression-tests.py | 44 | 8693 | #!/usr/bin/python
"""
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
import unittest
from doctest import DocTestSuite
import os
import markdown
class TestMarkdown(unittest.TestCase):
""" Tests basics of the Markdown class. """
def setUp(self):
""" Create instance of Markdown. """
self.md = markdown.Markdown()
def testBlankInput(self):
""" Test blank input. """
self.assertEqual(self.md.convert(''), '')
def testWhitespaceOnly(self):
""" Test input of only whitespace. """
self.assertEqual(self.md.convert(' '), '')
def testSimpleInput(self):
""" Test simple input. """
self.assertEqual(self.md.convert('foo'), '<p>foo</p>')
class TestBlockParser(unittest.TestCase):
""" Tests of the BlockParser class. """
def setUp(self):
""" Create instance of BlockParser. """
self.parser = markdown.Markdown().parser
def testParseChunk(self):
""" Test BlockParser.parseChunk. """
root = markdown.etree.Element("div")
text = 'foo'
self.parser.parseChunk(root, text)
self.assertEqual(markdown.etree.tostring(root), "<div><p>foo</p></div>")
def testParseDocument(self):
""" Test BlockParser.parseDocument. """
lines = ['#foo', '', 'bar', '', ' baz']
tree = self.parser.parseDocument(lines)
self.assert_(isinstance(tree, markdown.etree.ElementTree))
self.assert_(markdown.etree.iselement(tree.getroot()))
self.assertEqual(markdown.etree.tostring(tree.getroot()),
"<div><h1>foo</h1><p>bar</p><pre><code>baz\n</code></pre></div>")
class TestBlockParserState(unittest.TestCase):
""" Tests of the State class for BlockParser. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test State.isstate(). """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test State.reset(). """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
class TestHtmlStash(unittest.TestCase):
""" Test Markdown's HtmlStash. """
def setUp(self):
self.stash = markdown.preprocessors.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test HtmlStash.store. """
self.assertEqual(self.placeholder,
markdown.preprocessors.HTML_PLACEHOLDER % 0)
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, [('foo', False)])
def testStoreMore(self):
""" Test HtmlStash.store with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder,
markdown.preprocessors.HTML_PLACEHOLDER % 1)
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(self.stash.rawHtmlBlocks,
[('foo', False), ('bar', False)])
def testSafeStore(self):
""" Test HtmlStash.store with 'safe' html. """
self.stash.store('bar', True)
self.assertEqual(self.stash.rawHtmlBlocks,
[('foo', False), ('bar', True)])
def testReset(self):
""" Test HtmlStash.reset. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
class TestOrderedDict(unittest.TestCase):
""" Test OrderedDict storage class. """
def setUp(self):
self.odict = markdown.odict.OrderedDict()
self.odict['first'] = 'This'
self.odict['third'] = 'a'
self.odict['fourth'] = 'self'
self.odict['fifth'] = 'test'
def testValues(self):
""" Test output of OrderedDict.values(). """
self.assertEqual(self.odict.values(), ['This', 'a', 'self', 'test'])
def testKeys(self):
""" Test output of OrderedDict.keys(). """
self.assertEqual(self.odict.keys(),
['first', 'third', 'fourth', 'fifth'])
def testItems(self):
""" Test output of OrderedDict.items(). """
self.assertEqual(self.odict.items(),
[('first', 'This'), ('third', 'a'),
('fourth', 'self'), ('fifth', 'test')])
def testAddBefore(self):
""" Test adding an OrderedDict item before a given key. """
self.odict.add('second', 'is', '<third')
self.assertEqual(self.odict.items(),
[('first', 'This'), ('second', 'is'), ('third', 'a'),
('fourth', 'self'), ('fifth', 'test')])
def testAddAfter(self):
""" Test adding an OrderDict item after a given key. """
self.odict.add('second', 'is', '>first')
self.assertEqual(self.odict.items(),
[('first', 'This'), ('second', 'is'), ('third', 'a'),
('fourth', 'self'), ('fifth', 'test')])
def testAddAfterEnd(self):
""" Test adding an OrderedDict item after the last key. """
self.odict.add('sixth', '.', '>fifth')
self.assertEqual(self.odict.items(),
[('first', 'This'), ('third', 'a'),
('fourth', 'self'), ('fifth', 'test'), ('sixth', '.')])
def testAdd_begin(self):
""" Test adding an OrderedDict item using "_begin". """
self.odict.add('zero', 'CRAZY', '_begin')
self.assertEqual(self.odict.items(),
[('zero', 'CRAZY'), ('first', 'This'), ('third', 'a'),
('fourth', 'self'), ('fifth', 'test')])
def testAdd_end(self):
""" Test adding an OrderedDict item using "_end". """
self.odict.add('sixth', '.', '_end')
self.assertEqual(self.odict.items(),
[('first', 'This'), ('third', 'a'),
('fourth', 'self'), ('fifth', 'test'), ('sixth', '.')])
def testAddBadLocation(self):
""" Test Error on bad location in OrderedDict.add(). """
self.assertRaises(ValueError, self.odict.add, 'sixth', '.', '<seventh')
self.assertRaises(ValueError, self.odict.add, 'second', 'is', 'third')
def testDeleteItem(self):
""" Test deletion of an OrderedDict item. """
del self.odict['fourth']
self.assertEqual(self.odict.items(),
[('first', 'This'), ('third', 'a'), ('fifth', 'test')])
def testChangeValue(self):
""" Test OrderedDict change value. """
self.odict['fourth'] = 'CRAZY'
self.assertEqual(self.odict.items(),
[('first', 'This'), ('third', 'a'),
('fourth', 'CRAZY'), ('fifth', 'test')])
def testChangeOrder(self):
""" Test OrderedDict change order. """
self.odict.link('fourth', '<third')
self.assertEqual(self.odict.items(),
[('first', 'This'), ('fourth', 'self'),
('third', 'a'), ('fifth', 'test')])
def suite():
""" Build a test suite of the above tests and extension doctests. """
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestMarkdown))
suite.addTest(unittest.makeSuite(TestBlockParser))
suite.addTest(unittest.makeSuite(TestBlockParserState))
suite.addTest(unittest.makeSuite(TestHtmlStash))
suite.addTest(unittest.makeSuite(TestOrderedDict))
for filename in os.listdir('markdown/extensions'):
if filename.endswith('.py'):
module = 'markdown.extensions.%s' % filename[:-3]
try:
suite.addTest(DocTestSuite(module))
except: ValueError
# No tests
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| apache-2.0 |
nordri/check_domains | lib/python2.7/site-packages/django/db/backends/mysql/base.py | 5 | 23626 | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper)
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
# XXX MySQL DB-API drivers currently fail on binary data on Python 3.
supports_binary_field = six.PY2
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_binary_field = False
can_introspect_boolean_field = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
uses_savepoints = True
atomic_transactions = False
supports_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def has_zoneinfo_database(self):
# MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
# abbreviations (eg. EAT). When pytz isn't installed and the current
# time zone is LocalTimezone (the only sensible value in this
# context), the current time zone name will be an abbreviation. As a
# consequence, MySQL cannot perform time zone conversions reliably.
if pytz is None:
return False
# Test if the time zone definitions are installed.
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 4294967295),
PositiveIntegerField=(0, 18446744073709551615),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# Truncate already resets the AUTO_INCREMENT field from
# MySQL version 5.0.13 onwards. Refs #16961.
if self.connection.mysql_version < (5, 0, 13):
return [
"%s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences
]
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def year_lookup_bounds_for_datetime_field(self, value):
# Again, no microseconds
first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
return [first.replace(microsecond=0), second.replace(microsecond=0)]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| gpl-3.0 |
sweetycode/omaha | installers/generate_resource_script.py | 65 | 2015 | #!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import getopt
import os.path
import re
import sys
def GenerateResourceScript(input_filename, output_filename, payload_filename,
manifest_filename, resource_filename):
"""
Given a template name, output filename, payload filename, manifest filename,
and resource_filename, and creates a resource script.
"""
f_int = open(input_filename, 'r')
f_out = open(output_filename, 'w')
for line in f_int.readlines():
f_out.write(re.sub(r'__RESOURCE_FILENAME__', resource_filename,
re.sub(r'__MANIFEST_FILENAME__', manifest_filename,
re.sub(r'__PAYLOAD_FILENAME__', payload_filename, line))))
(opts, args) = getopt.getopt(sys.argv[1:], 'i:o:p:m:r:')
input_filename = ''
output_filename = ''
payload_filename = ''
manifest_filename = ''
resource_filename = ''
for (o, v) in opts:
if o == '-i':
input_filename = v
if o == '-o':
output_filename = v
if o == '-p':
payload_filename = v
if o == '-m':
manifest_filename = v
if o == '-r':
resource_filename = v
# The forward slashes prevent the RC compiler from trying to interpret
# backslashes in the quoted path.
GenerateResourceScript(input_filename, output_filename,
re.sub(r'\\', r'/', payload_filename),
re.sub(r'\\', r'/', manifest_filename),
re.sub(r'\\', r'/', resource_filename))
| apache-2.0 |
exocad/exotrac | trac/tests/contentgen.py | 5 | 2360 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import random
import uuid
try:
all_words = [x.strip() for x in open('/usr/share/dict/words').readlines()
if x.strip().isalpha()]
except IOError:
all_words = [
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
]
def random_word(min_length=1):
word = random.choice(all_words)
while len(word) < min_length:
word = random.choice(all_words)
# Do not return CamelCase words
if word[0].isupper():
word = word.lower().capitalize()
return word
_random_unique_camels = []
def random_unique_camel():
"""Returns a unique camelcase word pair"""
while True:
camel = random_word(2).title() + random_word(2).title()
if not camel in _random_unique_camels:
break
_random_unique_camels.append(camel)
return camel
def random_sentence(word_count=None):
"""Generates a random sentence. The first word consists of the first 8
characters of a uuid to ensure uniqueness.
:param word_count: number of words in the sentence
"""
if word_count is None:
word_count = random.randint(1, 20)
words = [random_word() for x in range(word_count - 1)]
words.insert(0, str(uuid.uuid1()).split('-')[0])
return '%s.' % ' '.join(words)
def random_paragraph(sentence_count=None):
if sentence_count is None:
sentence_count = random.randint(1, 10)
sentences = [random_sentence(random.randint(2, 15)) for x in range(sentence_count)]
return ' '.join(sentences)
def random_page(paragraph_count=None):
if paragraph_count is None:
paragraph_count = random.randint(1, 10)
paragraphs = [random_paragraph(random.randint(1, 5)) for x in range(paragraph_count)]
return '\r\n\r\n'.join(paragraphs)
| bsd-3-clause |
waseem18/oh-mainline | vendor/packages/scrapy/docs/conf.py | 17 | 6109 | # -*- coding: utf-8 -*-
#
# Scrapy documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 24 12:02:52 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os import path
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(path.join(path.dirname(__file__), "_ext"))
sys.path.append(path.join(path.dirname(path.dirname(__file__)), "scrapy"))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['scrapydocs']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Scrapy'
copyright = u'2008-2011, Insophia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
import scrapy
version = '.'.join(map(str, scrapy.version_info[:2]))
release = scrapy.__version__
except ImportError:
version = ''
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'scrapydoc.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scrapydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Scrapy.tex', ur'Scrapy Documentation',
ur'Insophia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| agpl-3.0 |
beiko-lab/gengis | bin/Lib/lib-tk/test/test_ttk/test_extensions.py | 7 | 9460 | import sys
import unittest
import Tkinter
import ttk
from test.test_support import requires, run_unittest
import support
requires('gui')
class LabeledScaleTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
def tearDown(self):
support.root_withdraw()
def test_widget_destroy(self):
# automatically created variable
x = ttk.LabeledScale()
var = x._variable._name
x.destroy()
self.assertRaises(Tkinter.TclError, x.tk.globalgetvar, var)
# manually created variable
myvar = Tkinter.DoubleVar()
name = myvar._name
x = ttk.LabeledScale(variable=myvar)
x.destroy()
self.assertEqual(x.tk.globalgetvar(name), myvar.get())
del myvar
self.assertRaises(Tkinter.TclError, x.tk.globalgetvar, name)
# checking that the tracing callback is properly removed
myvar = Tkinter.IntVar()
# LabeledScale will start tracing myvar
x = ttk.LabeledScale(variable=myvar)
x.destroy()
# Unless the tracing callback was removed, creating a new
# LabeledScale with the same var will cause an error now. This
# happens because the variable will be set to (possibly) a new
# value which causes the tracing callback to be called and then
# it tries calling instance attributes not yet defined.
ttk.LabeledScale(variable=myvar)
if hasattr(sys, 'last_type'):
self.assertFalse(sys.last_type == Tkinter.TclError)
def test_initialization(self):
# master passing
x = ttk.LabeledScale()
self.assertEqual(x.master, Tkinter._default_root)
x.destroy()
master = Tkinter.Frame()
x = ttk.LabeledScale(master)
self.assertEqual(x.master, master)
x.destroy()
# variable initialization/passing
passed_expected = ((2.5, 2), ('0', 0), (0, 0), (10, 10),
(-1, -1), (sys.maxint + 1, sys.maxint + 1))
for pair in passed_expected:
x = ttk.LabeledScale(from_=pair[0])
self.assertEqual(x.value, pair[1])
x.destroy()
x = ttk.LabeledScale(from_='2.5')
self.assertRaises(ValueError, x._variable.get)
x.destroy()
x = ttk.LabeledScale(from_=None)
self.assertRaises(ValueError, x._variable.get)
x.destroy()
# variable should have its default value set to the from_ value
myvar = Tkinter.DoubleVar(value=20)
x = ttk.LabeledScale(variable=myvar)
self.assertEqual(x.value, 0)
x.destroy()
# check that it is really using a DoubleVar
x = ttk.LabeledScale(variable=myvar, from_=0.5)
self.assertEqual(x.value, 0.5)
self.assertEqual(x._variable._name, myvar._name)
x.destroy()
# widget positionment
def check_positions(scale, scale_pos, label, label_pos):
self.assertEqual(scale.pack_info()['side'], scale_pos)
self.assertEqual(label.place_info()['anchor'], label_pos)
x = ttk.LabeledScale(compound='top')
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
x = ttk.LabeledScale(compound='bottom')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
x = ttk.LabeledScale(compound='unknown') # invert default positions
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
x = ttk.LabeledScale() # take default positions
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
# extra, and invalid, kwargs
self.assertRaises(Tkinter.TclError, ttk.LabeledScale, a='b')
def test_horizontal_range(self):
lscale = ttk.LabeledScale(from_=0, to=10)
lscale.pack()
lscale.wait_visibility()
lscale.update()
linfo_1 = lscale.label.place_info()
prev_xcoord = lscale.scale.coords()[0]
self.assertEqual(prev_xcoord, int(linfo_1['x']))
# change range to: from -5 to 5. This should change the x coord of
# the scale widget, since 0 is at the middle of the new
# range.
lscale.scale.configure(from_=-5, to=5)
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
lscale.update()
curr_xcoord = lscale.scale.coords()[0]
self.assertTrue(prev_xcoord != curr_xcoord)
# the label widget should have been repositioned too
linfo_2 = lscale.label.place_info()
self.assertEqual(lscale.label['text'], 0)
self.assertEqual(curr_xcoord, int(linfo_2['x']))
# change the range back
lscale.scale.configure(from_=0, to=10)
self.assertTrue(prev_xcoord != curr_xcoord)
self.assertEqual(prev_xcoord, int(linfo_1['x']))
lscale.destroy()
def test_variable_change(self):
x = ttk.LabeledScale()
x.pack()
x.wait_visibility()
x.update()
curr_xcoord = x.scale.coords()[0]
newval = x.value + 1
x.value = newval
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
x.update()
self.assertEqual(x.label['text'], newval)
self.assertTrue(x.scale.coords()[0] > curr_xcoord)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# value outside range
x.value = x.scale['to'] + 1 # no changes shouldn't happen
x.update()
self.assertEqual(x.label['text'], newval)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
x.destroy()
def test_resize(self):
x = ttk.LabeledScale()
x.pack(expand=True, fill='both')
x.wait_visibility()
x.update()
width, height = x.master.winfo_width(), x.master.winfo_height()
width_new, height_new = width * 2, height * 2
x.value = 3
x.update()
x.master.wm_geometry("%dx%d" % (width_new, height_new))
self.assertEqual(int(x.label.place_info()['x']),
x.scale.coords()[0])
# Reset geometry
x.master.wm_geometry("%dx%d" % (width, height))
x.destroy()
class OptionMenuTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.textvar = Tkinter.StringVar()
def tearDown(self):
del self.textvar
support.root_withdraw()
def test_widget_destroy(self):
var = Tkinter.StringVar()
optmenu = ttk.OptionMenu(None, var)
name = var._name
optmenu.update_idletasks()
optmenu.destroy()
self.assertEqual(optmenu.tk.globalgetvar(name), var.get())
del var
self.assertRaises(Tkinter.TclError, optmenu.tk.globalgetvar, name)
def test_initialization(self):
self.assertRaises(Tkinter.TclError,
ttk.OptionMenu, None, self.textvar, invalid='thing')
optmenu = ttk.OptionMenu(None, self.textvar, 'b', 'a', 'b')
self.assertEqual(optmenu._variable.get(), 'b')
self.assertTrue(optmenu['menu'])
self.assertTrue(optmenu['textvariable'])
optmenu.destroy()
def test_menu(self):
items = ('a', 'b', 'c')
default = 'a'
optmenu = ttk.OptionMenu(None, self.textvar, default, *items)
found_default = False
for i in range(len(items)):
value = optmenu['menu'].entrycget(i, 'value')
self.assertEqual(value, items[i])
if value == default:
found_default = True
self.assertTrue(found_default)
optmenu.destroy()
# default shouldn't be in menu if it is not part of values
default = 'd'
optmenu = ttk.OptionMenu(None, self.textvar, default, *items)
curr = None
i = 0
while True:
last, curr = curr, optmenu['menu'].entryconfigure(i, 'value')
if last == curr:
# no more menu entries
break
self.assertFalse(curr == default)
i += 1
self.assertEqual(i, len(items))
# check that variable is updated correctly
optmenu.pack()
optmenu.wait_visibility()
optmenu['menu'].invoke(0)
self.assertEqual(optmenu._variable.get(), items[0])
# changing to an invalid index shouldn't change the variable
self.assertRaises(Tkinter.TclError, optmenu['menu'].invoke, -1)
self.assertEqual(optmenu._variable.get(), items[0])
optmenu.destroy()
# specifying a callback
success = []
def cb_test(item):
self.assertEqual(item, items[1])
success.append(True)
optmenu = ttk.OptionMenu(None, self.textvar, 'a', command=cb_test,
*items)
optmenu['menu'].invoke(1)
if not success:
self.fail("Menu callback not invoked")
optmenu.destroy()
tests_gui = (LabeledScaleTest, OptionMenuTest)
if __name__ == "__main__":
run_unittest(*tests_gui)
| gpl-3.0 |
diN0bot/ProcrasDonate | lib/html_emailer.py | 1 | 1392 | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import settings
def send_email(sender, recipient, subject, text, html):
if settings.DJANGO_SERVER:
print "="*60
print "FROM:", sender
print "TO:", recipient
print "SUBJECT:", subject
print "========= TEXT MESSAGE =========\n", text
print "\n\n========= HTML MESSAGE ==========\n", html
else:
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(sender, recipient, msg.as_string())
s.quit()
| agpl-3.0 |
mgsergio/omim | tools/python/ResponseProvider.py | 3 | 7085 | from __future__ import print_function
import jsons
import logging
import os
BIG_FILE_SIZE = 47684
class Payload:
def __init__(self, message, response_code=200, headers={}):
self.__response_code = response_code
self.__message = message
self.__headers = headers
def response_code(self):
"""
Response code to send to the client.
"""
return self.__response_code
def message(self):
"""
The message to send to the client.
"""
return self.__message
def length(self):
"""
The length of the response.
"""
return len(self.message())
def headers(self):
"""
The headers to be sent to the client. Please, note, that these do not include
the Content-Length header, which you need to send separately.
"""
return self.__headers
def __repr__(self):
return "{}: {}: {}".format(self.response_code(), self.length(), self.message())
class ResponseProviderMixin:
"""
A mixin (basically, an interface) that the web-server that we might use relies on.
In this implementation, the job of the web-server is just to get the request
(the url and the headers), and to send the response as it knows how. It isn't
its job to decide how to respond to what request. It is the job of the
ResponseProvider.
In your web-server you should initialize the ResponseProvider, and ask it for
response_for_url_and_headers(url, headers)
Which will return a Payload object that the server must send as response.
The server might be notified when a particular request has been received:
got_pinged(self) - someone sent a ping request. The Response provider will
respond with "pong" and call this method of the server. You might want to
increment the count of active users, for ping is the request that new instances
of servers send to check if other servers are currently serving.
kill(self) - someone sent the kill request, which means that that someone
no longer needs this server to serve. You might want to decrement the count of
active users and/or stop the server.
"""
def dispatch_response(self, payload):
"""
Define this mehtod to dispatch the response received from the ResponseProvider
"""
raise NotImplementedError()
def got_pinged(self):
"""
A ping request has been received. In most scenarios it means that the number of
users of this server has increased by 1.
"""
raise NotImplementedError()
def kill(self):
"""
Someone no longer needs this server. Decrement the number of users and stop
the server if the number fell to 0.
"""
raise NotImplementedError()
class ResponseProvider:
def __init__(self, delegate):
self.headers = list()
self.delegate = delegate
self.byterange = None
self.is_chunked = False
self.response_code = 200
def pong(self):
self.delegate.got_pinged()
return Payload("pong")
def my_id(self):
return Payload(str(os.getpid()))
def strip_query(self, url):
query_start = url.find("?")
if (query_start > 0):
return url[:query_start]
return url
def response_for_url_and_headers(self, url, headers):
self.headers = headers
self.chunk_requested()
url = self.strip_query(url)
try:
return {
"/unit_tests/1.txt": self.test1,
"/unit_tests/notexisting_unittest": self.test_404,
"/unit_tests/permanent": self.test_301,
"/unit_tests/47kb.file": self.test_47_kb,
# Following two URIs are used to test downloading failures on different platforms.
"/unit_tests/mac/1234/Uruguay.mwm": self.test_404,
"/unit_tests/linux/1234/Uruguay.mwm": self.test_404,
"/ping": self.pong,
"/kill": self.kill,
"/id": self.my_id,
"/partners/time": self.partners_time,
"/partners/price": self.partners_price,
"/booking/min_price": self.partners_minprice,
"/booking/min_price.getHotelAvailability": self.partners_minprice,
"/booking/min_price/hotelAvailability": self.partners_hotel_availability,
"/partners/taxi_info": self.partners_yandex_taxi_info,
"/partners/get-offers-in-bbox/": self.partners_rent_nearby,
}[url]()
except:
return self.test_404()
def chunk_requested(self):
if "range" in self.headers:
self.is_chunked = True
self.response_code = 206
meaningful_string = self.headers["range"][6:]
first, last = meaningful_string.split("-")
self.byterange = (int(first), int(last))
def trim_message(self, message):
if not self.is_chunked:
return message
return message[self.byterange[0]: self.byterange[1] + 1]
def test1(self):
init_message = "Test1"
message = self.trim_message(init_message)
size = len(init_message)
self.check_byterange(size)
headers = self.chunked_response_header(size)
return Payload(message, self.response_code, headers)
def test_404(self):
return Payload("", response_code=404)
def test_301(self):
return Payload("", 301, {"Location" : "google.com"})
def check_byterange(self, size):
if self.byterange is None:
self.byterange = (0, size)
def chunked_response_header(self, size):
return {
"Content-Range" : "bytes {start}-{end}/{out_of}".format(start=self.byterange[0],
end=self.byterange[1], out_of=size)
}
def test_47_kb(self):
self.check_byterange(BIG_FILE_SIZE)
headers = self.chunked_response_header(BIG_FILE_SIZE)
message = self.trim_message(self.message_for_47kb_file())
return Payload(message, self.response_code, headers)
def message_for_47kb_file(self):
message = []
for i in range(0, BIG_FILE_SIZE + 1):
message.append(chr(i / 256))
message.append(chr(i % 256))
return "".join(message)
# Partners_api_tests
def partners_time(self):
return Payload(jsons.PARTNERS_TIME)
def partners_price(self):
return Payload(jsons.PARTNERS_PRICE)
def partners_minprice(self):
return Payload(jsons.PARTNERS_MINPRICE)
def partners_hotel_availability(self):
return Payload(jsons.HOTEL_AVAILABILITY)
def partners_yandex_taxi_info(self):
return Payload(jsons.PARTNERS_TAXI_INFO)
def partners_rent_nearby(self):
return Payload(jsons.PARTNERS_RENT_NEARBY)
def kill(self):
logging.debug("Kill called in ResponseProvider")
self.delegate.kill()
return Payload("Bye...")
| apache-2.0 |
JING-TIME/ustc-course | tests/resize_avatar.py | 1 | 1146 | #!/usr/bin/env python3
import sys
sys.path.append('..') # fix import directory
from app import app
from app.models import User
from PIL import Image
from app.utils import rand_str
ctx = app.test_request_context()
ctx.push()
users = User.query.all()
for u in users:
if u._avatar:
with Image.open('../uploads/images/' + u._avatar) as img:
image_width, image_height = img.size
thumbnail_width = 192
thumbnail_height = 192
if image_width <= thumbnail_width and image_height <= thumbnail_height:
continue
# generate thumbnail if the avatar is too large
new_filename = rand_str() + '.png'
try:
img.thumbnail((thumbnail_width, thumbnail_height), Image.ANTIALIAS)
img.save('../uploads/images/' + new_filename, "PNG")
except IOError:
print("Failed to create thumbnail from '" + u._avatar + "' to '" + new_filename + "'")
u._avatar = new_filename
u.save()
print('User ID ' + str(u.id) + ' original ' + u._avatar + ' thumbnail ' + new_filename)
| agpl-3.0 |
equialgo/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
srcLurker/home-assistant | homeassistant/components/switch/flux.py | 2 | 8700 | """
Flux for Home-Assistant.
The idea was taken from https://github.com/KpaBap/hue-flux/
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.flux/
"""
from datetime import time
import logging
import voluptuous as vol
from homeassistant.components.light import is_on, turn_on
from homeassistant.components.sun import next_setting, next_rising
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util.color import (
color_temperature_to_rgb, color_RGB_to_xy,
color_temperature_kelvin_to_mired, HASS_COLOR_MIN, HASS_COLOR_MAX)
from homeassistant.util.dt import now as dt_now
from homeassistant.util.dt import as_local
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['sun', 'light']
SUN = "sun.sun"
_LOGGER = logging.getLogger(__name__)
CONF_LIGHTS = 'lights'
CONF_START_TIME = 'start_time'
CONF_STOP_TIME = 'stop_time'
CONF_START_CT = 'start_colortemp'
CONF_SUNSET_CT = 'sunset_colortemp'
CONF_STOP_CT = 'stop_colortemp'
CONF_BRIGHTNESS = 'brightness'
CONF_MODE = 'mode'
MODE_XY = 'xy'
MODE_MIRED = 'mired'
DEFAULT_MODE = MODE_XY
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'flux',
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(CONF_NAME, default="Flux"): cv.string,
vol.Optional(CONF_START_TIME): cv.time,
vol.Optional(CONF_STOP_TIME, default=time(22, 0)): cv.time,
vol.Optional(CONF_START_CT, default=4000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_SUNSET_CT, default=3000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_STOP_CT, default=1900):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
vol.Optional(CONF_MODE, default=DEFAULT_MODE):
vol.Any(MODE_XY, MODE_MIRED)
})
def set_lights_xy(hass, lights, x_val, y_val, brightness):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
xy_color=[x_val, y_val],
brightness=brightness,
transition=30)
def set_lights_temp(hass, lights, mired, brightness):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
color_temp=int(mired),
brightness=brightness,
transition=30)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Flux switches."""
name = config.get(CONF_NAME)
lights = config.get(CONF_LIGHTS)
start_time = config.get(CONF_START_TIME)
stop_time = config.get(CONF_STOP_TIME)
start_colortemp = config.get(CONF_START_CT)
sunset_colortemp = config.get(CONF_SUNSET_CT)
stop_colortemp = config.get(CONF_STOP_CT)
brightness = config.get(CONF_BRIGHTNESS)
mode = config.get(CONF_MODE)
flux = FluxSwitch(name, hass, False, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, mode)
add_devices([flux])
def update(call=None):
"""Update lights."""
flux.flux_update()
hass.services.register(DOMAIN, name + '_update', update)
class FluxSwitch(SwitchDevice):
"""Representation of a Flux switch."""
def __init__(self, name, hass, state, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, mode):
"""Initialize the Flux switch."""
self._name = name
self.hass = hass
self._state = state
self._lights = lights
self._start_time = start_time
self._stop_time = stop_time
self._start_colortemp = start_colortemp
self._sunset_colortemp = sunset_colortemp
self._stop_colortemp = stop_colortemp
self._brightness = brightness
self._mode = mode
self.unsub_tracker = None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn on flux."""
self._state = True
self.unsub_tracker = track_utc_time_change(self.hass, self.flux_update,
second=[0, 30])
self.update_ha_state()
def turn_off(self, **kwargs):
"""Turn off flux."""
if self.unsub_tracker is not None:
self.unsub_tracker()
self.unsub_tracker = None
self._state = False
self.update_ha_state()
def flux_update(self, now=None):
"""Update all the lights using flux."""
if now is None:
now = dt_now()
sunset = next_setting(self.hass, SUN).replace(day=now.day,
month=now.month,
year=now.year)
start_time = self.find_start_time(now)
stop_time = now.replace(hour=self._stop_time.hour,
minute=self._stop_time.minute,
second=0)
if start_time < now < sunset:
# Daytime
time_state = 'day'
temp_range = abs(self._start_colortemp - self._sunset_colortemp)
day_length = int(sunset.timestamp() - start_time.timestamp())
seconds_from_start = int(now.timestamp() - start_time.timestamp())
percentage_complete = seconds_from_start / day_length
temp_offset = temp_range * percentage_complete
if self._start_colortemp > self._sunset_colortemp:
temp = self._start_colortemp - temp_offset
else:
temp = self._start_colortemp + temp_offset
else:
# Nightime
time_state = 'night'
if now < stop_time and now > start_time:
now_time = now
else:
now_time = stop_time
temp_range = abs(self._sunset_colortemp - self._stop_colortemp)
night_length = int(stop_time.timestamp() - sunset.timestamp())
seconds_from_sunset = int(now_time.timestamp() -
sunset.timestamp())
percentage_complete = seconds_from_sunset / night_length
temp_offset = temp_range * percentage_complete
if self._sunset_colortemp > self._stop_colortemp:
temp = self._sunset_colortemp - temp_offset
else:
temp = self._sunset_colortemp + temp_offset
x_val, y_val, b_val = color_RGB_to_xy(*color_temperature_to_rgb(temp))
brightness = self._brightness if self._brightness else b_val
if self._mode == MODE_XY:
set_lights_xy(self.hass, self._lights, x_val,
y_val, brightness)
_LOGGER.info("Lights updated to x:%s y:%s brightness:%s, %s%%"
" of %s cycle complete at %s", x_val, y_val,
brightness, round(
percentage_complete * 100), time_state,
as_local(now))
else:
# Convert to mired and clamp to allowed values
mired = color_temperature_kelvin_to_mired(temp)
mired = max(HASS_COLOR_MIN, min(mired, HASS_COLOR_MAX))
set_lights_temp(self.hass, self._lights, mired, brightness)
_LOGGER.info("Lights updated to mired:%s brightness:%s, %s%%"
" of %s cycle complete at %s", mired, brightness,
round(percentage_complete * 100),
time_state, as_local(now))
def find_start_time(self, now):
"""Return sunrise or start_time if given."""
if self._start_time:
sunrise = now.replace(hour=self._start_time.hour,
minute=self._start_time.minute,
second=0)
else:
sunrise = next_rising(self.hass, SUN).replace(day=now.day,
month=now.month,
year=now.year)
return sunrise
| mit |
prechelt/pyth | pyth/__init__.py | 1 | 1207 | """
Pyth -- Python text markup and conversion
"""
from __future__ import absolute_import
import os.path
__version__ = '0.5.6'
writerMap = {
'.rtf': 'pyth.plugins.rtf15.writer.Rtf15Writer',
'.html': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.xhtml': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.txt': 'pyth.plugins.plaintext.writer.PlaintextWriter',
'.pdf': 'pyth.plugins.pdf.writer.PDFWriter',
}
mimeMap = {
'.rtf': 'application/rtf',
'.html': 'text/html',
'.xhtml': 'application/xhtml+xml',
'.txt': 'text/plain',
}
def write(doc, filename):
ext = os.path.splitext(filename)[1]
writer = namedObject(writerMap[ext])
buff = writer.write(doc)
buff.seek(0)
return (buff, mimeMap[ext])
# Stolen from twisted.python.reflect
def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1])
| mit |
chauhanhardik/populo_2 | openedx/core/djangoapps/credit/models.py | 8 | 24732 | # -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
import datetime
from collections import defaultdict
import logging
import pytz
from django.conf import settings
from django.core.cache import cache
from django.dispatch import receiver
from django.db import models, transaction, IntegrityError
from django.core.validators import RegexValidator
from simple_history.models import HistoricalRecords
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from xmodule_django.models import CourseKeyField
from django.utils.translation import ugettext_lazy
log = logging.getLogger(__name__)
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=r"^[a-z,A-Z,0-9,\-]+$",
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
unicode(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
return unicode(course_key) in credit_courses
@classmethod
def get_credit_course(cls, course_key):
"""
Get the credit course if exists for the given 'course_key'.
Args:
course_key(CourseKey): The course identifier
Raises:
DoesNotExist if no CreditCourse exists for the given course key.
Returns:
CreditCourse if one exists for the given course key.
"""
return cls.objects.get(course_key=course_key, enabled=True)
def __unicode__(self):
"""Unicode representation of the credit course. """
return unicode(self.course_key)
@receiver(models.signals.post_save, sender=CreditCourse)
@receiver(models.signals.post_delete, sender=CreditCourse)
def invalidate_credit_courses_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit courses. """
cache.delete(CreditCourse.CREDIT_COURSES_CACHE_KEY)
class CreditRequirement(TimeStampedModel):
"""
This model represents a credit requirement.
Each requirement is uniquely identified by its 'namespace' and
'name' fields.
The 'name' field stores the unique name or location (in case of XBlock)
for a requirement, which serves as the unique identifier for that
requirement.
The 'display_name' field stores the display name of the requirement.
The 'criteria' field dictionary provides additional information, clients
may need to determine whether a user has satisfied the requirement.
"""
course = models.ForeignKey(CreditCourse, related_name="credit_requirements")
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255, default="")
order = models.PositiveIntegerField(default=0)
criteria = JSONField()
active = models.BooleanField(default=True)
class Meta(object):
"""
Model metadata.
"""
unique_together = ('namespace', 'name', 'course')
ordering = ["order"]
@classmethod
def add_or_update_course_requirement(cls, credit_course, requirement, order):
"""
Add requirement to a given course.
Args:
credit_course(CreditCourse): The identifier for credit course
requirement(dict): Requirement dict to be added
Returns:
(CreditRequirement, created) tuple
"""
credit_requirement, created = cls.objects.get_or_create(
course=credit_course,
namespace=requirement["namespace"],
name=requirement["name"],
defaults={
"display_name": requirement["display_name"],
"criteria": requirement["criteria"],
"order": order,
"active": True
}
)
if not created:
credit_requirement.criteria = requirement["criteria"]
credit_requirement.active = True
credit_requirement.order = order
credit_requirement.display_name = requirement["display_name"]
credit_requirement.save()
return credit_requirement, created
@classmethod
def get_course_requirements(cls, course_key, namespace=None, name=None):
"""
Get credit requirements of a given course.
Args:
course_key (CourseKey): The identifier for a course
Keyword Arguments
namespace (str): Optionally filter credit requirements by namespace.
name (str): Optionally filter credit requirements by name.
Returns:
QuerySet of CreditRequirement model
"""
# order credit requirements according to their appearance in courseware
requirements = CreditRequirement.objects.filter(course__course_key=course_key, active=True)
if namespace is not None:
requirements = requirements.filter(namespace=namespace)
if name is not None:
requirements = requirements.filter(name=name)
return requirements
@classmethod
def disable_credit_requirements(cls, requirement_ids):
"""
Mark the given requirements inactive.
Args:
requirement_ids(list): List of ids
Returns:
None
"""
cls.objects.filter(id__in=requirement_ids).update(active=False)
@classmethod
def get_course_requirement(cls, course_key, namespace, name):
"""
Get credit requirement of a given course.
Args:
course_key(CourseKey): The identifier for a course
namespace(str): Namespace of credit course requirements
name(str): Name of credit course requirement
Returns:
CreditRequirement object if exists
"""
try:
return cls.objects.get(
course__course_key=course_key, active=True, namespace=namespace, name=name
)
except cls.DoesNotExist:
return None
class CreditRequirementStatus(TimeStampedModel):
"""
This model represents the status of each requirement.
For a particular credit requirement, a user can either:
1) Have satisfied the requirement (example: approved in-course reverification)
2) Have failed the requirement (example: denied in-course reverification)
3) Neither satisfied nor failed (example: the user hasn't yet attempted in-course reverification).
Cases (1) and (2) are represented by having a CreditRequirementStatus with
the status set to "satisfied" or "failed", respectively.
In case (3), no CreditRequirementStatus record will exist for the requirement and user.
"""
REQUIREMENT_STATUS_CHOICES = (
("satisfied", "satisfied"),
("failed", "failed"),
)
username = models.CharField(max_length=255, db_index=True)
requirement = models.ForeignKey(CreditRequirement, related_name="statuses")
status = models.CharField(max_length=32, choices=REQUIREMENT_STATUS_CHOICES)
# Include additional information about why the user satisfied or failed
# the requirement. This is specific to the type of requirement.
# For example, the minimum grade requirement might record the user's
# final grade when the user completes the course. This allows us to display
# the grade to users later and to send the information to credit providers.
reason = JSONField(default={})
# Maintain a history of requirement status updates for auditing purposes
history = HistoricalRecords()
class Meta(object): # pylint: disable=missing-docstring
unique_together = ('username', 'requirement')
@classmethod
def get_statuses(cls, requirements, username):
"""
Get credit requirement statuses of given requirement and username
Args:
requirement(CreditRequirement): The identifier for a requirement
username(str): username of the user
Returns:
Queryset 'CreditRequirementStatus' objects
"""
return cls.objects.filter(requirement__in=requirements, username=username)
@classmethod
@transaction.commit_on_success
def add_or_update_requirement_status(cls, username, requirement, status="satisfied", reason=None):
"""
Add credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
status(str): Status of the requirement
reason(dict): Reason of the status
"""
requirement_status, created = cls.objects.get_or_create(
username=username,
requirement=requirement,
defaults={"reason": reason, "status": status}
)
if not created:
requirement_status.status = status
requirement_status.reason = reason if reason else {}
requirement_status.save()
@classmethod
@transaction.commit_on_success
def remove_requirement_status(cls, username, requirement):
"""
Remove credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
"""
try:
requirement_status = cls.objects.get(username=username, requirement=requirement)
requirement_status.delete()
except cls.DoesNotExist:
log.exception(u'The requirement status does not exist against the username %s.', username)
return
class CreditEligibility(TimeStampedModel):
"""
A record of a user's eligibility for credit from a specific credit
provider for a specific course.
"""
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="eligibilities")
# Deadline for when credit eligibility will expire.
# Once eligibility expires, users will no longer be able to purchase
# or request credit.
# We save the deadline as a database field just in case
# we need to override the deadline for particular students.
deadline = models.DateTimeField(
default=lambda: (
datetime.datetime.now(pytz.UTC) + datetime.timedelta(
days=getattr(settings, "CREDIT_ELIGIBILITY_EXPIRATION_DAYS", 365)
)
),
help_text=ugettext_lazy("Deadline for purchasing and requesting credit.")
)
class Meta(object): # pylint: disable=missing-docstring
unique_together = ('username', 'course')
verbose_name_plural = "Credit eligibilities"
@classmethod
def update_eligibility(cls, requirements, username, course_key):
"""
Update the user's credit eligibility for a course.
A user is eligible for credit when the user has satisfied
all requirements for credit in the course.
Arguments:
requirements (Queryset): Queryset of `CreditRequirement`s to check.
username (str): Identifier of the user being updated.
course_key (CourseKey): Identifier of the course.
Returns: tuple
"""
# Check all requirements for the course to determine if the user
# is eligible. We need to check all the *requirements*
# (not just the *statuses*) in case the user doesn't yet have
# a status for a particular requirement.
status_by_req = defaultdict(lambda: False)
for status in CreditRequirementStatus.get_statuses(requirements, username):
status_by_req[status.requirement.id] = status.status
is_eligible = all(status_by_req[req.id] == "satisfied" for req in requirements)
# If we're eligible, then mark the user as being eligible for credit.
if is_eligible:
try:
CreditEligibility.objects.create(
username=username,
course=CreditCourse.objects.get(course_key=course_key),
)
return is_eligible, True
except IntegrityError:
return is_eligible, False
else:
return is_eligible, False
@classmethod
def get_user_eligibilities(cls, username):
"""
Returns the eligibilities of given user.
Args:
username(str): Username of the user
Returns:
CreditEligibility queryset for the user
"""
return cls.objects.filter(
username=username,
course__enabled=True,
deadline__gt=datetime.datetime.now(pytz.UTC)
).select_related('course')
@classmethod
def is_user_eligible_for_credit(cls, course_key, username):
"""
Check if the given user is eligible for the provided credit course
Args:
course_key(CourseKey): The course identifier
username(str): The username of the user
Returns:
Bool True if the user eligible for credit course else False
"""
return cls.objects.filter(
course__course_key=course_key,
course__enabled=True,
username=username,
deadline__gt=datetime.datetime.now(pytz.UTC),
).exists()
def __unicode__(self):
"""Unicode representation of the credit eligibility. """
return u"{user}, {course}".format(
user=self.username,
course=self.course.course_key,
)
class CreditRequest(TimeStampedModel):
"""
A request for credit from a particular credit provider.
When a user initiates a request for credit, a CreditRequest record will be created.
Each CreditRequest is assigned a unique identifier so we can find it when the request
is approved by the provider. The CreditRequest record stores the parameters to be sent
at the time the request is made. If the user re-issues the request
(perhaps because the user did not finish filling in forms on the credit provider's site),
the request record will be updated, but the UUID will remain the same.
"""
uuid = models.CharField(max_length=32, unique=True, db_index=True)
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="credit_requests")
provider = models.ForeignKey(CreditProvider, related_name="credit_requests")
parameters = JSONField()
REQUEST_STATUS_PENDING = "pending"
REQUEST_STATUS_APPROVED = "approved"
REQUEST_STATUS_REJECTED = "rejected"
REQUEST_STATUS_CHOICES = (
(REQUEST_STATUS_PENDING, "Pending"),
(REQUEST_STATUS_APPROVED, "Approved"),
(REQUEST_STATUS_REJECTED, "Rejected"),
)
status = models.CharField(
max_length=255,
choices=REQUEST_STATUS_CHOICES,
default=REQUEST_STATUS_PENDING
)
history = HistoricalRecords()
class Meta(object): # pylint: disable=missing-docstring
# Enforce the constraint that each user can have exactly one outstanding
# request to a given provider. Multiple requests use the same UUID.
unique_together = ('username', 'course', 'provider')
get_latest_by = 'created'
@classmethod
def credit_requests_for_user(cls, username):
"""
Retrieve all credit requests for a user.
Arguments:
username (unicode): The username of the user.
Returns: list
Example Usage:
>>> CreditRequest.credit_requests_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return [
{
"uuid": request.uuid,
"timestamp": request.parameters.get("timestamp"),
"course_key": request.course.course_key,
"provider": {
"id": request.provider.provider_id,
"display_name": request.provider.display_name
},
"status": request.status
}
for request in cls.objects.select_related('course', 'provider').filter(username=username)
]
@classmethod
def get_user_request_status(cls, username, course_key):
"""
Returns the latest credit request of user against the given course.
Args:
username(str): The username of requesting user
course_key(CourseKey): The course identifier
Returns:
CreditRequest if any otherwise None
"""
try:
return cls.objects.filter(
username=username, course__course_key=course_key
).select_related('course', 'provider').latest()
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of a credit request."""
return u"{course}, {provider}, {status}".format(
course=self.course.course_key,
provider=self.provider.provider_id, # pylint: disable=no-member
status=self.status,
)
| agpl-3.0 |
CyanogenMod/android_kernel_samsung_t1 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
devclone/enigma2-9f38fd6 | lib/python/Components/Sensors.py | 104 | 1928 | from Components.FanControl import fancontrol
class Sensors:
# (type, name, unit, directory)
TYPE_TEMPERATURE = 0
# (type, name, unit, fanid)
TYPE_FAN_RPM = 1
def __init__(self):
# (type, name, unit, sensor_specific_dict/list)
self.sensors_list = []
self.addSensors()
def getSensorsCount(self, type = None):
if type is None:
return len(self.sensors_list)
count = 0
for sensor in self.sensors_list:
if sensor[0] == type:
count += 1
return count
# returns a list of sensorids of type "type"
def getSensorsList(self, type = None):
if type is None:
return range(len(self.sensors_list))
list = []
for sensorid in range(len(self.sensors_list)):
if self.sensors_list[sensorid][0] == type:
list.append(sensorid)
return list
def getSensorType(self, sensorid):
return self.sensors_list[sensorid][0]
def getSensorName(self, sensorid):
return self.sensors_list[sensorid][1]
def getSensorValue(self, sensorid):
value = -1
sensor = self.sensors_list[sensorid]
if sensor[0] == self.TYPE_TEMPERATURE:
value = int(open("%s/value" % sensor[3], "r").readline().strip())
elif sensor[0] == self.TYPE_FAN_RPM:
value = fancontrol.getFanSpeed(sensor[3])
return value
def getSensorUnit(self, sensorid):
return self.sensors_list[sensorid][2]
def addSensors(self):
import os
if os.path.exists("/proc/stb/sensors"):
for dirname in os.listdir("/proc/stb/sensors"):
if dirname.find("temp", 0, 4) == 0:
name = open("/proc/stb/sensors/%s/name" % dirname, "r").readline().strip()
unit = open("/proc/stb/sensors/%s/unit" % dirname, "r").readline().strip()
self.sensors_list.append((self.TYPE_TEMPERATURE, name, unit, "/proc/stb/sensors/%s" % dirname))
for fanid in range(fancontrol.getFanCount()):
if fancontrol.hasRPMSensor(fanid):
self.sensors_list.append((self.TYPE_FAN_RPM, _("Fan %d") % (fanid + 1), "rpm", fanid))
sensors = Sensors()
| gpl-2.0 |
allanino/nupic | nupic/regions/ImageSensorFilters/Resize.py | 17 | 7903 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from PIL import Image
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Resize(BaseFilter):
"""
Create resized versions of the original image, using various methods of
padding and stretching.
"""
def __init__(self,
size=None,
sizes=None,
method='fit',
simultaneous=False,
highQuality=False):
"""
size -- Target size. Either a tuple (width, height), specifying an
absolute size in pixels, or a single value, specifying a scale factor
to apply to the current size.
sizes -- List of target sizes, for creating multiple output images
for each input image. Each entry in the list must satisfy the
requirements for the 'size' parameter, above. 'size' and 'sizes'
may not be used together.
method -- Method to use for generating new images, one of:
'fit' : scale and pad to match new size, preserving aspect ratio
'crop' : scale and crop the image to fill the new size
'stretch' : stretch the image to fill new size, ignoring aspect ratio
'center' : center the original image in the new size without scaling
simultaneous -- Whether the images should be sent out of the sensor
simultaneously.
highQuality -- Whether to use high-quality sampling for resizing
instead of nearest neighbor. If highQuality is True, antialiasing is
used for downsampling and bicubic interpolation is used for
upsampling.
Example usage:
Resize the incoming image to fit within (320, 240) by scaling so that
the image fits exactly within (320, 240) but the aspect ratio is
maintained, and padding with the sensor's background color:
Resize(size=(320, 240))
Scale the image to three different sizes: 100% of the original size,
50% of the original size, and 25% of the original size, and send the
three images out of the sensor simultaneously as multiple scales:
Resize(sizes=(1.0, 0.5, 0.25), simultaneous=True)
Pad the image to fit in a larger image of size (640, 480), centering it
in the new image:
Resize(size=(640, 480), method='center')
"""
BaseFilter.__init__(self)
if (not size and not sizes) or (size and sizes):
raise RuntimeError("Must specify either 'size' or 'sizes'")
if size:
sizes = [size]
if type(sizes) not in (list, tuple):
raise ValueError("Sizes must be a list or tuple")
if type(sizes) is tuple:
sizes = list(sizes)
for i, size in enumerate(sizes):
if type(size) in (list, tuple):
if len(size) > 2:
raise ValueError("Size is too long (must be a scalar or 2-tuple)")
elif type(size) in (int, float):
if size <= 0:
raise ValueError("Sizes must be positive numbers")
sizes[i] = [size]
else:
raise TypeError("Sizes must be positive numbers")
if method not in ('fit', 'crop', 'stretch', 'center'):
raise ValueError("Unknown method "
"(options are 'fit', 'crop', 'stretch', and 'center')")
self.sizes = sizes
self.method = method
self.simultaneous = simultaneous
self.highQuality = highQuality
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
sizes = []
for i, size in enumerate(self.sizes):
if len(size) == 1:
# Convert scalar sizes to absolute sizes in pixels
sizes.append((int(round(image.size[0]*float(size[0]))),
int(round(image.size[1]*float(size[0])))))
else:
sizes.append((int(size[0]), int(size[1])))
newImages = []
for size in sizes:
if image.size == size:
newImage = image
elif self.method == 'fit':
# Resize the image to fit in the target size, preserving aspect ratio
targetRatio = size[0] / float(size[1])
imageRatio = image.size[0] / float(image.size[1])
if imageRatio > targetRatio:
xSize = size[0]
scale = size[0] / float(image.size[0])
ySize = int(scale * image.size[1])
else:
ySize = size[1]
scale = size[1] / float(image.size[1])
xSize = int(scale * image.size[0])
newImage = self._resize(image, (xSize, ySize))
# Pad with the background color if necessary
if newImage.size != size:
paddedImage = Image.new('LA', size, self.background)
paddedImage.paste(newImage,
((size[0] - newImage.size[0])/2,
(size[1] - newImage.size[1])/2))
newImage = paddedImage
elif self.method == 'crop':
# Resize the image to fill the new size
targetRatio = size[0] / float(size[1])
imageRatio = image.size[0] / float(image.size[1])
if imageRatio > targetRatio:
# Original image is too wide
scale = size[1] / float(image.size[1])
newSize = (int(scale * image.size[0]), size[1])
cropStart = ((newSize[0] - size[0]) / 2, 0)
else:
# Original image is too tall
scale = size[0] / float(image.size[0])
newSize = (size[0], int(scale * image.size[1]))
cropStart = (0, (newSize[1] - size[1]) / 2)
newImage = self._resize(image, newSize)
# Crop if necessary
if newSize != size:
newImage = newImage.crop((cropStart[0], cropStart[1],
cropStart[0] + size[0], cropStart[1] + size[1]))
elif self.method == 'stretch':
# Resize the image to each target size, ignoring aspect ratio
newImage = self._resize(image, size)
elif self.method == 'center':
# Center the original image in the new image without rescaling it
newImage = Image.new('LA', size, self.background)
x = (size[0] - image.size[0]) / 2
y = (size[1] - image.size[1]) / 2
newImage.paste(image, (x, y))
newImages.append(newImage)
if not self.simultaneous:
if len(newImages) == 1:
return newImages[0]
else:
return newImages
else:
return [newImages]
def getOutputCount(self):
"""
Return the number of images returned by each call to process().
If the filter creates multiple simultaneous outputs, return a tuple:
(outputCount, simultaneousOutputCount).
"""
if not self.simultaneous:
return len(self.sizes)
else:
return 1, len(self.sizes)
def _resize(self, image, size):
"""
Resize the image with the appropriate sampling method.
"""
if self.highQuality:
if size < image.size:
return image.resize(size, Image.ANTIALIAS)
else:
return image.resize(size, Image.BICUBIC)
else:
return image.resize(size)
| agpl-3.0 |
gfyoung/pandas | pandas/io/formats/printing.py | 3 | 17290 | """
Printing tools.
"""
import sys
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Sized,
Tuple,
TypeVar,
Union,
)
from pandas._config import get_option
from pandas.core.dtypes.inference import is_sequence
EscapeChars = Union[Mapping[str, str], Iterable[str]]
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
def adjoin(space: int, *lists: List[str], **kwargs) -> str:
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop("strlen", len)
justfunc = kwargs.pop("justfunc", justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode="left")
nl.extend([" " * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append("".join(lines))
return "\n".join(out_lines)
def justify(texts: Iterable[str], max_len: int, mode: str = "right") -> List[str]:
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == "left":
return [x.ljust(max_len) for x in texts]
elif mode == "center":
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather than rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(
seq: Sequence, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather than calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = "{{{body}}}"
else:
fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
# handle sets, no slicing
r = [
pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
for i in range(min(nitems, len(seq)))
]
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ","
return fmt.format(body=body)
def _pprint_dict(
seq: Mapping, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather than calling this directly.
"""
fmt = "{{{things}}}"
pairs = []
pfmt = "{key}: {val}"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
)
)
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs))
def pprint_thing(
thing: Any,
_nest_lvl: int = 0,
escape_chars: Optional[EscapeChars] = None,
default_escapes: bool = False,
quote_strings: bool = False,
max_seq_items: Optional[int] = None,
) -> str:
"""
This function is the sanctioned way of converting objects
to a string representation and properly handles nested sequences.
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : int or None, default None
Pass through to other pretty printers to limit sequence printing
Returns
-------
str
"""
def as_escaped_string(
thing: Any, escape_chars: Optional[EscapeChars] = escape_chars
) -> str:
translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or ()
result = str(thing)
for c in escape_chars:
result = result.replace(c, translate[c])
return result
if hasattr(thing, "__next__"):
return str(thing)
elif isinstance(thing, dict) and _nest_lvl < get_option(
"display.pprint_nest_depth"
):
result = _pprint_dict(
thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items
)
elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"):
result = _pprint_seq(
thing,
_nest_lvl,
escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items,
)
elif isinstance(thing, str) and quote_strings:
result = f"'{as_escaped_string(thing)}'"
else:
result = as_escaped_string(thing)
return result
def pprint_thing_encoded(
object, encoding: str = "utf-8", errors: str = "replace"
) -> bytes:
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors)
def enable_data_resource_formatter(enable: bool) -> None:
if "IPython" not in sys.modules:
# definitely not in IPython
return
from IPython import get_ipython
ip = get_ipython()
if ip is None:
# still not in IPython
return
formatters = ip.display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
if enable:
if mimetype not in formatters:
# define tableschema formatter
from IPython.core.formatters import BaseFormatter
class TableSchemaFormatter(BaseFormatter):
print_method = "_repr_data_resource_"
_return_type = (dict,)
# register it:
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
def default_pprint(thing: Any, max_seq_items: Optional[int] = None) -> str:
return pprint_thing(
thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=True,
max_seq_items=max_seq_items,
)
def format_object_summary(
obj,
formatter: Callable,
is_justify: bool = True,
name: Optional[str] = None,
indent_for_name: bool = True,
line_break_each_value: bool = False,
) -> str:
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
Whether subsequent lines should be indented to
align with the name.
line_break_each_value : bool, default False
If True, inserts a line break for each value of ``obj``.
If False, only break lines when the a line of values gets wider
than the display width.
.. versionadded:: 0.25.0
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option("display.width") or 80
if name is None:
name = type(obj).__name__
if indent_for_name:
name_len = len(name)
space1 = f'\n{(" " * (name_len + 1))}'
space2 = f'\n{(" " * (name_len + 2))}'
else:
space1 = "\n"
space2 = "\n " # space for the opening '['
n = len(obj)
if line_break_each_value:
# If we want to vertically align on each value of obj, we need to
# separate values by a line break and indent the values
sep = ",\n " + " " * len(name)
else:
sep = ","
max_seq_items = get_option("display.max_seq_items") or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = get_adjustment()
def _extend_line(
s: str, line: str, value: str, display_width: int, next_line_prefix: str
) -> Tuple[str, str]:
if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values: List[str]) -> int:
if values:
return max(adj.len(x) for x in values)
else:
return 0
close = ", "
if n == 0:
summary = f"[]{close}"
elif n == 1 and not line_break_each_value:
first = formatter(obj[0])
summary = f"[{first}]{close}"
elif n == 2 and not line_break_each_value:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = f"[{first}, {last}]{close}"
else:
if max_seq_items == 1:
# If max_seq_items=1 show only last element
head = []
tail = [formatter(x) for x in obj[-1:]]
elif n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
if line_break_each_value:
# Justify each string in the values of head and tail, so the
# strings will right align when head and tail are stacked
# vertically.
head, tail = _justify(head, tail)
elif is_truncated or not (
len(", ".join(head)) < display_width
and len(", ".join(tail)) < display_width
):
# Each string in head and tail should align with each other
max_length = max(best_len(head), best_len(tail))
head = [x.rjust(max_length) for x in head]
tail = [x.rjust(max_length) for x in tail]
# If we are not truncated and we are only a single
# line, then don't justify
if line_break_each_value:
# Now head and tail are of type List[Tuple[str]]. Below we
# convert them into List[str], so there will be one string per
# value. Also truncate items horizontally if wider than
# max_space
max_space = display_width - len(space2)
value = tail[0]
for max_items in reversed(range(1, len(value) + 1)):
pprinted_seq = _pprint_seq(value, max_seq_items=max_items)
if len(pprinted_seq) < max_space:
break
head = [_pprint_seq(x, max_seq_items=max_items) for x in head]
tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]
summary = ""
line = space2
for max_items in range(len(head)):
word = head[max_items] + sep + " "
summary, line = _extend_line(summary, line, word, display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + "..."
line = space2
for max_items in range(len(tail) - 1):
word = tail[max_items] + sep + " "
summary, line = _extend_line(summary, line, word, display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)
summary += line
# right now close is either '' or ', '
# Now we want to include the ']', but not the maybe space.
close = "]" + close.rstrip(" ")
summary += close
if len(summary) > (display_width) or line_break_each_value:
summary += space1
else: # one row
summary += " "
# remove initial space
summary = "[" + summary[len(space2) :]
return summary
def _justify(
head: List[Sequence[str]], tail: List[Sequence[str]]
) -> Tuple[List[Tuple[str, ...]], List[Tuple[str, ...]]]:
"""
Justify items in head and tail, so they are right-aligned when stacked.
Parameters
----------
head : list-like of list-likes of strings
tail : list-like of list-likes of strings
Returns
-------
tuple of list of tuples of strings
Same as head and tail, but items are right aligned when stacked
vertically.
Examples
--------
>>> _justify([['a', 'b']], [['abc', 'abcd']])
([(' a', ' b')], [('abc', 'abcd')])
"""
combined = head + tail
# For each position for the sequences in ``combined``,
# find the length of the largest string.
max_length = [0] * len(combined[0])
for inner_seq in combined:
length = [len(item) for item in inner_seq]
max_length = [max(x, y) for x, y in zip(max_length, length)]
# justify each item in each list-like in head and tail using max_length
head = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head
]
tail = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail
]
# https://github.com/python/mypy/issues/4975
# error: Incompatible return value type (got "Tuple[List[Sequence[str]],
# List[Sequence[str]]]", expected "Tuple[List[Tuple[str, ...]],
# List[Tuple[str, ...]]]")
return head, tail # type: ignore[return-value]
def format_object_attrs(
obj: Sized, include_dtype: bool = True
) -> List[Tuple[str, Union[str, int]]]:
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
Must be sized.
include_dtype : bool
If False, dtype won't be in the returned list
Returns
-------
list of 2-tuple
"""
attrs: List[Tuple[str, Union[str, int]]] = []
if hasattr(obj, "dtype") and include_dtype:
# error: "Sized" has no attribute "dtype"
attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore[attr-defined]
if getattr(obj, "name", None) is not None:
# error: "Sized" has no attribute "name"
attrs.append(("name", default_pprint(obj.name))) # type: ignore[attr-defined]
# error: "Sized" has no attribute "names"
elif getattr(obj, "names", None) is not None and any(
obj.names # type: ignore[attr-defined]
):
# error: "Sized" has no attribute "names"
attrs.append(("names", default_pprint(obj.names))) # type: ignore[attr-defined]
max_seq_items = get_option("display.max_seq_items") or len(obj)
if len(obj) > max_seq_items:
attrs.append(("length", len(obj)))
return attrs
class PrettyDict(Dict[_KT, _VT]):
"""Dict extension to support abbreviated __repr__"""
def __repr__(self) -> str:
return pprint_thing(self)
| bsd-3-clause |
iuliat/nova | nova/tests/functional/v3/test_deferred_delete.py | 24 | 2132 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-deferred-delete"
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(DeferredDeleteSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.deferred_delete.'
'Deferred_delete')
return f
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_force_delete(self):
uuid = self._post_server()
self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
| apache-2.0 |
civisanalytics/ansible-modules-core | windows/win_reboot.py | 61 | 2471 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION='''
---
module: win_reboot
short_description: Reboot a windows machine
description:
- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
version_added: "2.1"
options:
pre_reboot_delay_sec:
description:
- Seconds for shutdown to wait before requesting reboot
default: 2
shutdown_timeout_sec:
description:
- Maximum seconds to wait for shutdown to occur
- Increase this timeout for very slow hardware, large update applications, etc
default: 600
reboot_timeout_sec:
description:
- Maximum seconds to wait for machine to re-appear on the network and respond to a test command
- This timeout is evaluated separately for both network appearance and test command success (so maximum clock time is actually twice this value)
default: 600
connect_timeout_sec:
description:
- Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again
default: 5
test_command:
description:
- Command to expect success for to determine the machine is ready for management
default: whoami
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES='''
# unconditionally reboot the machine with all defaults
- win_reboot:
# apply updates and reboot if necessary
- win_updates:
register: update_result
- win_reboot:
when: update_result.reboot_required
# reboot a slow machine that might have lots of updates to apply
- win_reboot:
shutdown_timeout_sec: 3600
reboot_timeout_sec: 3600
'''
RETURNS='''
rebooted:
description: true if the machine was rebooted
returned: always
type: boolean
sample: true
'''
| gpl-3.0 |
qtumproject/qtum | test/functional/feature_versionbits_warning.py | 17 | 5111 | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test version bits warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
"""
import os
import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
VB_PATTERN = re.compile("Warning: unknown new rules activated.*versionbit")
class VersionBitsWarningTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8'):
pass
self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
self.setup_nodes()
def send_blocks_with_version(self, peer, numblocks, version):
"""Send numblocks blocks to peer with version set"""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"] + 1
tip = int(tip, 16)
for _ in range(numblocks):
block = create_block(tip, create_coinbase(height + 1), block_time)
block.nVersion = version
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def versionbits_in_alert_file(self):
"""Test that the versionbits warning has been written to the alert file."""
alert_text = open(self.alert_filename, 'r', encoding='utf8').read()
return VB_PATTERN.search(alert_text) is not None
def run_test(self):
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
node_deterministic_address = node.get_deterministic_priv_key().address
# Mine one period worth of blocks
node.generatetoaddress(VB_PERIOD, node_deterministic_address)
self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
# Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
node.generatetoaddress(VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address)
# Check that we're not getting any versionbit-related errors in get*info()
assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD, VB_UNKNOWN_VERSION)
node.generatetoaddress(VB_PERIOD - VB_THRESHOLD, node_deterministic_address)
self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared. This will move the versionbit state to ACTIVE.
node.generatetoaddress(VB_PERIOD, node_deterministic_address)
# Stop-start the node. This is required because bitcoind will only warn once about unknown versions or unknown rules activating.
self.restart_node(0)
# Generating one block guarantees that we'll get out of IBD
node.generatetoaddress(1, node_deterministic_address)
wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=mininode_lock)
# Generating one more block will be enough to generate an error.
node.generatetoaddress(1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
# Check that the alert file shows the versionbits unknown rules warning
wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
if __name__ == '__main__':
VersionBitsWarningTest().main()
| mit |
jonnyhtw/cylc | lib/cylc/batch_sys_manager.py | 1 | 33030 | #!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage submission, poll and kill of a job to the batch systems.
Export the BatchSysManager class.
Batch system handler (a.k.a. job submission method) modules should be placed
under the "cylc.batch_sys_handlers" package. Each module should export the
symbol "BATCH_SYS_HANDLER" for the singleton instance that implements the job
system handler logic.
Each batch system handler class should instantiate with no argument, and may
have the following constants and methods:
batch_sys.filter_poll_output(out, job_id) => boolean
* If this method is available, it will be called after the batch system's
poll command is called and returns zero. The method should read the
output to see if job_id is still alive in the batch system, and return
True if so.
batch_sys.filter_poll_many_output(out) => job_ids
* Called after the batch system's poll many command. The method should read
the output and return a list of job IDs that are still in the batch
system.
batch_sys.filter_submit_output(out, err) => new_out, new_err
* Filter the standard output and standard error of the job submission
command. This is useful if the job submission command returns information
that should just be ignored. See also "batch_sys.SUBMIT_CMD_TMPL".
batch_sys.format_directives(job_conf) => lines
* If relevant, this method formats the job directives for a job file, if
job file directives are relevant for the batch system. The argument
"job_conf" is a dict containing the job configuration.
batch_sys.get_fail_signals(job_conf) => list of strings
* Return a list of names of signals to trap for reporting errors. Default
is ["EXIT", "ERR", "TERM", "XCPU"]. ERR and EXIT are always recommended.
EXIT is used to report premature stopping of the job script, and its trap
is unset at the end of the script.
batch_sys.get_poll_many_cmd(job-id-list) => list
* Return a list containing the shell command to poll the jobs in the
argument list.
batch_sys.get_vacation_signal(job_conf) => str
* If relevant, return a string containing the name of the signal that
indicates the job has been vacated by the batch system.
batch_sys.submit(job_file_path) => ret_code, out, err
* Submit a job and return an instance of the Popen object for the
submission. This method is useful if the job submission requires logic
beyond just running a system or shell command. See also
"batch_sys.SUBMIT_CMD".
batch_sys.SHOULD_KILL_PROC_GROUP
* A boolean to indicate whether it is necessary to kill a job by sending
a signal to its Unix process group.
batch_sys.SHOULD_POLL_PROC_GROUP
* A boolean to indicate whether it is necessary to poll a job by its PID
as well as the job ID.
batch_sys.KILL_CMD_TMPL
* A Python string template for getting the batch system command to remove
and terminate a job ID. The command is formed using the logic:
batch_sys.KILL_CMD_TMPL % {"job_id": job_id}
batch_sys.REC_ID_FROM_SUBMIT_ERR
batch_sys.REC_ID_FROM_SUBMIT_OUT
* A regular expression (compiled) to extract the job "id" from the standard
output or standard error of the job submission command.
batch_sys.SUBMIT_CMD_ENV
* A Python dict (or an iterable that can be used to update a dict)
containing extra environment variables for getting the batch system
command to submit a job file.
batch_sys.SUBMIT_CMD_TMPL
* A Python string template for getting the batch system command to submit a
job file. The command is formed using the logic:
batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path}
See also "batch_sys._job_submit_impl".
"""
import os
import shlex
from shutil import rmtree
from signal import SIGKILL
import stat
from subprocess import Popen, PIPE
import sys
import traceback
from cylc.mkdir_p import mkdir_p
from cylc.task_message import (
CYLC_JOB_PID, CYLC_JOB_INIT_TIME, CYLC_JOB_EXIT_TIME, CYLC_JOB_EXIT,
CYLC_MESSAGE)
from cylc.task_outputs import TASK_OUTPUT_SUCCEEDED
from cylc.task_job_logs import (
JOB_LOG_JOB, JOB_LOG_OUT, JOB_LOG_ERR, JOB_LOG_STATUS)
from cylc.wallclock import get_current_time_string
class JobPollContext(object):
"""Context object for a job poll.
0 ctx.job_log_dir -- cycle/task/submit_num
1 ctx.batch_sys_name -- batch system name
2 ctx.batch_sys_job_id -- job ID in batch system
3 ctx.batch_sys_exit_polled -- 0 for false, 1 for true
4 ctx.run_status -- 0 for success, 1 for failure
5 ctx.run_signal -- signal received on run failure
6 ctx.time_submit_exit -- submit (exit) time
7 ctx.time_run -- run start time
8 ctx.time_run_exit -- run exit time
"""
def __init__(self, job_log_dir):
self.job_log_dir = job_log_dir
self.batch_sys_name = None
self.batch_sys_job_id = None
self.batch_sys_exit_polled = None
self.pid = None
self.run_status = None
self.run_signal = None
self.time_submit_exit = None
self.time_run = None
self.time_run_exit = None
self.messages = []
def get_summary_str(self):
"""Return the poll context as a summary string delimited by "|"."""
items = []
for item in [
self.job_log_dir,
self.batch_sys_name,
self.batch_sys_job_id,
self.batch_sys_exit_polled,
self.run_status,
self.run_signal,
self.time_submit_exit,
self.time_run,
self.time_run_exit]:
if item is None:
items.append("")
else:
items.append(str(item))
return "|".join(items)
class BatchSysManager(object):
"""Job submission, poll and kill.
Manage the importing of job submission method modules.
"""
CYLC_BATCH_SYS_NAME = "CYLC_BATCH_SYS_NAME"
CYLC_BATCH_SYS_JOB_ID = "CYLC_BATCH_SYS_JOB_ID"
CYLC_BATCH_SYS_JOB_SUBMIT_TIME = "CYLC_BATCH_SYS_JOB_SUBMIT_TIME"
CYLC_BATCH_SYS_EXIT_POLLED = "CYLC_BATCH_SYS_EXIT_POLLED"
LINE_PREFIX_CYLC_DIR = "export CYLC_DIR="
LINE_PREFIX_BATCH_SYS_NAME = "# Job submit method: "
LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL = "# Job submit command template: "
LINE_PREFIX_EXECUTION_TIME_LIMIT = "# Execution time limit: "
LINE_PREFIX_EOF = "#EOF: "
LINE_PREFIX_JOB_LOG_DIR = "# Job log directory: "
LINE_UPDATE_CYLC_DIR = (
"# N.B. CYLC_DIR has been updated on the remote host\n")
OUT_PREFIX_COMMAND = "[TASK JOB COMMAND]"
OUT_PREFIX_MESSAGE = "[TASK JOB MESSAGE]"
OUT_PREFIX_SUMMARY = "[TASK JOB SUMMARY]"
OUT_PREFIX_CMD_ERR = "[TASK JOB ERROR]"
_INSTANCES = {}
@classmethod
def configure_suite_run_dir(cls, suite_run_dir):
"""Add local python module paths if not already done."""
for sub_dir in ["python", os.path.join("lib", "python")]:
# TODO - eventually drop the deprecated "python" sub-dir.
suite_py = os.path.join(suite_run_dir, sub_dir)
if os.path.isdir(suite_py) and suite_py not in sys.path:
sys.path.append(suite_py)
def _get_sys(self, batch_sys_name):
"""Return an instance of the class for "batch_sys_name"."""
if batch_sys_name in self._INSTANCES:
return self._INSTANCES[batch_sys_name]
for key in [
"cylc.batch_sys_handlers." + batch_sys_name,
batch_sys_name]:
try:
mod_of_name = __import__(key, fromlist=[key])
self._INSTANCES[batch_sys_name] = getattr(
mod_of_name, "BATCH_SYS_HANDLER")
return self._INSTANCES[batch_sys_name]
except ImportError:
if key == batch_sys_name:
raise
def format_directives(self, job_conf):
"""Format the job directives for a job file, if relevant."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "format_directives"):
return batch_sys.format_directives(job_conf)
def get_fail_signals(self, job_conf):
"""Return a list of failure signal names to trap in the job file."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "get_fail_signals"):
return batch_sys.get_fail_signals(job_conf)
return ["EXIT", "ERR", "TERM", "XCPU"]
def get_vacation_signal(self, job_conf):
"""Return the vacation signal name for a job file."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "get_vacation_signal"):
return batch_sys.get_vacation_signal(job_conf)
def jobs_kill(self, job_log_root, job_log_dirs):
"""Kill multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
"""
# Note: The more efficient way to do this is to group the jobs by their
# batch systems, and call the kill command for each batch system once.
# However, this will make it more difficult to determine if the kill
# command for a particular job is successful or not.
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
now = get_current_time_string()
for job_log_dir in job_log_dirs:
ret_code, err = self.job_kill(
os.path.join(job_log_root, job_log_dir, JOB_LOG_STATUS))
sys.stdout.write("%s%s|%s|%d\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code))
# Note: Print STDERR to STDOUT may look a bit strange, but it
# requires less logic for the suite to parse the output.
if err.strip():
for line in err.splitlines(True):
if not line.endswith("\n"):
line += "\n"
sys.stdout.write("%s%s|%s|%s" % (
self.OUT_PREFIX_CMD_ERR, now, job_log_dir, line))
def jobs_poll(self, job_log_root, job_log_dirs):
"""Poll multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
"""
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
ctx_list = [] # Contexts for all relevant jobs
ctx_list_by_batch_sys = {} # {batch_sys_name1: [ctx1, ...], ...}
for job_log_dir in job_log_dirs:
ctx = self._jobs_poll_status_files(job_log_root, job_log_dir)
if ctx is None:
continue
ctx_list.append(ctx)
if not ctx.batch_sys_name or not ctx.batch_sys_job_id:
# Lost batch system information for some reason.
# Mark the job as if it is no longer in the batch system.
ctx.batch_sys_exit_polled = 1
sys.stderr.write(
"%s/%s: incomplete batch system info\n" % (
ctx.job_log_dir, JOB_LOG_STATUS))
# We can trust:
# * Jobs previously polled to have exited the batch system.
# * Jobs succeeded or failed with ERR/EXIT.
if (ctx.batch_sys_exit_polled or ctx.run_status == 0 or
ctx.run_signal in ["ERR", "EXIT"]):
continue
if ctx.batch_sys_name not in ctx_list_by_batch_sys:
ctx_list_by_batch_sys[ctx.batch_sys_name] = []
ctx_list_by_batch_sys[ctx.batch_sys_name].append(ctx)
for batch_sys_name, my_ctx_list in ctx_list_by_batch_sys.items():
self._jobs_poll_batch_sys(
job_log_root, batch_sys_name, my_ctx_list)
cur_time_str = get_current_time_string()
for ctx in ctx_list:
for message in ctx.messages:
sys.stdout.write("%s%s|%s|%s\n" % (
self.OUT_PREFIX_MESSAGE,
cur_time_str,
ctx.job_log_dir,
message))
sys.stdout.write("%s%s|%s\n" % (
self.OUT_PREFIX_SUMMARY,
cur_time_str,
ctx.get_summary_str()))
def jobs_submit(self, job_log_root, job_log_dirs, remote_mode=False,
utc_mode=False):
"""Submit multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
remote_mode -- am I running on the remote job host?
utc_mode -- is the suite running in UTC mode?
"""
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
if remote_mode:
items = self._jobs_submit_prep_by_stdin(job_log_root, job_log_dirs)
else:
items = self._jobs_submit_prep_by_args(job_log_root, job_log_dirs)
now = get_current_time_string(override_use_utc=utc_mode)
for job_log_dir, batch_sys_name, submit_opts in items:
job_file_path = os.path.join(
job_log_root, job_log_dir, JOB_LOG_JOB)
if not batch_sys_name:
sys.stdout.write("%s%s|%s|1|\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir))
continue
ret_code, out, err, job_id = self._job_submit_impl(
job_file_path, batch_sys_name, submit_opts)
sys.stdout.write("%s%s|%s|%d|%s\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code, job_id))
for key, value in [("STDERR", err), ("STDOUT", out)]:
if value is None or not value.strip():
continue
for line in value.splitlines(True):
if not value.endswith("\n"):
value += "\n"
sys.stdout.write("%s%s|%s|[%s] %s" % (
self.OUT_PREFIX_COMMAND, now, job_log_dir, key, line))
def job_kill(self, st_file_path):
"""Ask batch system to terminate the job specified in "st_file_path".
Return 0 on success, non-zero integer on failure.
"""
# SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status
self.configure_suite_run_dir(st_file_path.rsplit(os.sep, 6)[0])
try:
st_file = open(st_file_path)
for line in st_file:
if line.startswith(self.CYLC_BATCH_SYS_NAME + "="):
batch_sys = self._get_sys(line.strip().split("=", 1)[1])
break
else:
return (1,
"Cannot determine batch system from %s file" % (
JOB_LOG_STATUS))
st_file.seek(0, 0) # rewind
if getattr(batch_sys, "SHOULD_KILL_PROC_GROUP", False):
for line in st_file:
if line.startswith(CYLC_JOB_PID + "="):
pid = line.strip().split("=", 1)[1]
try:
os.killpg(os.getpgid(int(pid)), SIGKILL)
except (OSError, ValueError) as exc:
traceback.print_exc()
return (1, str(exc))
else:
return (0, "")
st_file.seek(0, 0) # rewind
if hasattr(batch_sys, "KILL_CMD_TMPL"):
for line in st_file:
if not line.startswith(self.CYLC_BATCH_SYS_JOB_ID + "="):
continue
job_id = line.strip().split("=", 1)[1]
command = shlex.split(
batch_sys.KILL_CMD_TMPL % {"job_id": job_id})
try:
proc = Popen(
command, stdin=open(os.devnull), stderr=PIPE)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = command[0]
traceback.print_exc()
return (1, str(exc))
else:
return (proc.wait(), proc.communicate()[1])
return (1, "Cannot determine batch job ID from %s file" % (
JOB_LOG_STATUS))
except IOError as exc:
return (1, str(exc))
@classmethod
def _create_nn(cls, job_file_path):
"""Create NN symbolic link, if necessary.
If NN => 01, remove numbered directories with submit numbers greater
than 01.
Helper for "self._job_submit_impl".
"""
job_file_dir = os.path.dirname(job_file_path)
source = os.path.basename(job_file_dir)
task_log_dir = os.path.dirname(job_file_dir)
nn_path = os.path.join(task_log_dir, "NN")
try:
old_source = os.readlink(nn_path)
except OSError:
old_source = None
if old_source is not None and old_source != source:
os.unlink(nn_path)
old_source = None
if old_source is None:
os.symlink(source, nn_path)
# On submit 1, remove any left over digit directories from prev runs
if source == "01":
for name in os.listdir(task_log_dir):
if name != source and name.isdigit():
# Ignore errors, not disastrous if rmtree fails
rmtree(
os.path.join(task_log_dir, name), ignore_errors=True)
def _filter_submit_output(self, st_file_path, batch_sys, out, err):
"""Filter submit command output, if relevant."""
job_id = None
if hasattr(batch_sys, "REC_ID_FROM_SUBMIT_ERR"):
text = err
rec_id = batch_sys.REC_ID_FROM_SUBMIT_ERR
elif hasattr(batch_sys, "REC_ID_FROM_SUBMIT_OUT"):
text = out
rec_id = batch_sys.REC_ID_FROM_SUBMIT_OUT
if rec_id:
for line in str(text).splitlines():
match = rec_id.match(line)
if match:
job_id = match.group("id")
job_status_file = open(st_file_path, "a")
job_status_file.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_JOB_ID, job_id))
job_status_file.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME,
get_current_time_string()))
job_status_file.close()
break
if hasattr(batch_sys, "filter_submit_output"):
out, err = batch_sys.filter_submit_output(out, err)
return out, err, job_id
def _jobs_poll_status_files(self, job_log_root, job_log_dir):
"""Helper 1 for self.jobs_poll(job_log_root, job_log_dirs)."""
ctx = JobPollContext(job_log_dir)
try:
handle = open(os.path.join(
job_log_root, ctx.job_log_dir, JOB_LOG_STATUS))
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
return
for line in handle:
if "=" not in line:
continue
key, value = line.strip().split("=", 1)
if key == self.CYLC_BATCH_SYS_NAME:
ctx.batch_sys_name = value
elif key == self.CYLC_BATCH_SYS_JOB_ID:
ctx.batch_sys_job_id = value
elif key == self.CYLC_BATCH_SYS_EXIT_POLLED:
ctx.batch_sys_exit_polled = 1
elif key == CYLC_JOB_PID:
ctx.pid = value
elif key == self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME:
ctx.time_submit_exit = value
elif key == CYLC_JOB_INIT_TIME:
ctx.time_run = value
elif key == CYLC_JOB_EXIT_TIME:
ctx.time_run_exit = value
elif key == CYLC_JOB_EXIT:
if value == TASK_OUTPUT_SUCCEEDED.upper():
ctx.run_status = 0
else:
ctx.run_status = 1
ctx.run_signal = value
elif key == CYLC_MESSAGE:
ctx.messages.append(value)
handle.close()
return ctx
def _jobs_poll_batch_sys(self, job_log_root, batch_sys_name, my_ctx_list):
"""Helper 2 for self.jobs_poll(job_log_root, job_log_dirs)."""
exp_job_ids = [ctx.batch_sys_job_id for ctx in my_ctx_list]
bad_job_ids = list(exp_job_ids)
exp_pids = []
bad_pids = []
items = [[self._get_sys(batch_sys_name), exp_job_ids, bad_job_ids]]
if getattr(items[0][0], "SHOULD_POLL_PROC_GROUP", False):
exp_pids = [ctx.pid for ctx in my_ctx_list if ctx.pid is not None]
bad_pids.extend(exp_pids)
items.append([self._get_sys("background"), exp_pids, bad_pids])
for batch_sys, exp_ids, bad_ids in items:
if hasattr(batch_sys, "get_poll_many_cmd"):
# Some poll commands may not be as simple
cmd = batch_sys.get_poll_many_cmd(exp_ids)
else: # if hasattr(batch_sys, "POLL_CMD"):
# Simple poll command that takes a list of job IDs
cmd = [batch_sys.POLL_CMD] + exp_ids
try:
proc = Popen(
cmd, stdin=open(os.devnull), stderr=PIPE, stdout=PIPE)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = cmd[0]
sys.stderr.write(str(exc) + "\n")
return
proc.wait()
out, err = proc.communicate()
sys.stderr.write(err)
if hasattr(batch_sys, "filter_poll_many_output"):
# Allow custom filter
for id_ in batch_sys.filter_poll_many_output(out):
try:
bad_ids.remove(id_)
except ValueError:
pass
else:
# Just about all poll commands return a table, with column 1
# being the job ID. The logic here should be sufficient to
# ensure that any table header is ignored.
for line in out.splitlines():
try:
head = line.split(None, 1)[0]
except IndexError:
continue
if head in exp_ids:
try:
bad_ids.remove(head)
except ValueError:
pass
for ctx in my_ctx_list:
ctx.batch_sys_exit_polled = int(
ctx.batch_sys_job_id in bad_job_ids)
# Exited batch system, but process still running
# This can happen to jobs in some "at" implementation
if (ctx.batch_sys_exit_polled and
ctx.pid in exp_pids and ctx.pid not in bad_pids):
ctx.batch_sys_exit_polled = 0
# Add information to "job.status"
if ctx.batch_sys_exit_polled:
try:
handle = open(os.path.join(
job_log_root, ctx.job_log_dir, JOB_LOG_STATUS), "a")
handle.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_EXIT_POLLED,
get_current_time_string()))
handle.close()
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
def _job_submit_impl(
self, job_file_path, batch_sys_name, submit_opts):
"""Helper for self.jobs_submit() and self.job_submit()."""
# Create NN symbolic link, if necessary
self._create_nn(job_file_path)
for name in JOB_LOG_ERR, JOB_LOG_OUT:
try:
os.unlink(os.path.join(job_file_path, name))
except OSError:
pass
# Start new status file
job_status_file = open(job_file_path + ".status", "w")
job_status_file.write(
"%s=%s\n" % (self.CYLC_BATCH_SYS_NAME, batch_sys_name))
job_status_file.close()
# Submit job
batch_sys = self._get_sys(batch_sys_name)
proc_stdin_arg = None
proc_stdin_value = open(os.devnull)
if hasattr(batch_sys, "get_submit_stdin"):
proc_stdin_arg, proc_stdin_value = batch_sys.get_submit_stdin(
job_file_path, submit_opts)
if hasattr(batch_sys, "submit"):
# batch_sys.submit should handle OSError, if relevant.
ret_code, out, err = batch_sys.submit(job_file_path, submit_opts)
else:
env = None
if hasattr(batch_sys, "SUBMIT_CMD_ENV"):
env = dict(os.environ)
env.update(batch_sys.SUBMIT_CMD_ENV)
batch_submit_cmd_tmpl = submit_opts.get("batch_submit_cmd_tmpl")
if batch_submit_cmd_tmpl:
# No need to catch OSError when using shell. It is unlikely
# that we do not have a shell, and still manage to get as far
# as here.
batch_sys_cmd = batch_submit_cmd_tmpl % {"job": job_file_path}
proc = Popen(
batch_sys_cmd,
stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE,
shell=True, env=env)
else:
command = shlex.split(
batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path})
try:
proc = Popen(
command,
stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE,
env=env)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = command[0]
return 1, "", str(exc), ""
out, err = proc.communicate(proc_stdin_value)
ret_code = proc.wait()
# Filter submit command output, if relevant
# Get job ID, if possible
job_id = None
if out or err:
try:
out, err, job_id = self._filter_submit_output(
job_file_path + ".status", batch_sys, out, err)
except OSError:
ret_code = 1
self.job_kill(job_file_path + ".status")
return ret_code, out, err, job_id
def _jobs_submit_prep_by_args(self, job_log_root, job_log_dirs):
"""Prepare job files for submit by reading files in arguments.
Job files are specified in the arguments in local mode. Extract job
submission methods and job submission command templates from each job
file.
Return a list, where each element contains something like:
(job_log_dir, batch_sys_name, submit_opts)
"""
items = []
for job_log_dir in job_log_dirs:
job_file_path = os.path.join(job_log_root, job_log_dir, "job")
batch_sys_name = None
submit_opts = {}
for line in open(job_file_path):
if line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
batch_sys_name = line.replace(
self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
elif line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
submit_opts["batch_submit_cmd_tmpl"] = line.replace(
self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
elif line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
submit_opts["execution_time_limit"] = float(line.replace(
self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
items.append((job_log_dir, batch_sys_name, submit_opts))
return items
def _jobs_submit_prep_by_stdin(self, job_log_root, job_log_dirs):
"""Prepare job files for submit by reading from STDIN.
Job files are uploaded via STDIN in remote mode. Modify job
files' CYLC_DIR for this host. Extract job submission methods
and job submission command templates from each job file.
Return a list, where each element contains something like:
(job_log_dir, batch_sys_name, submit_opts)
"""
items = [[job_log_dir, None, {}] for job_log_dir in job_log_dirs]
items_map = {}
for item in items:
items_map[item[0]] = item
handle = None
batch_sys_name = None
submit_opts = {}
job_log_dir = None
lines = []
# Get job files from STDIN.
# Modify CYLC_DIR in job file, if necessary.
# Get batch system name and batch submit command template from each job
# file.
# Write job file in correct location.
while True: # Note: "for cur_line in sys.stdin:" may hang
cur_line = sys.stdin.readline()
if not cur_line:
if handle is not None:
handle.close()
break
if cur_line.startswith(self.LINE_PREFIX_CYLC_DIR):
old_line = cur_line
cur_line = "%s'%s'\n" % (
self.LINE_PREFIX_CYLC_DIR, os.environ["CYLC_DIR"])
if old_line != cur_line:
lines.append(self.LINE_UPDATE_CYLC_DIR)
elif cur_line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
batch_sys_name = cur_line.replace(
self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
elif cur_line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
submit_opts["batch_submit_cmd_tmpl"] = cur_line.replace(
self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
elif cur_line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
submit_opts["execution_time_limit"] = float(cur_line.replace(
self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
elif cur_line.startswith(self.LINE_PREFIX_JOB_LOG_DIR):
job_log_dir = cur_line.replace(
self.LINE_PREFIX_JOB_LOG_DIR, "").strip()
mkdir_p(os.path.join(job_log_root, job_log_dir))
handle = open(
os.path.join(job_log_root, job_log_dir, "job.tmp"), "wb")
if handle is None:
lines.append(cur_line)
else:
for line in lines + [cur_line]:
handle.write(line)
lines = []
if cur_line.startswith(self.LINE_PREFIX_EOF + job_log_dir):
handle.close()
# Make it executable
os.chmod(handle.name, (
os.stat(handle.name).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
# Rename from "*/job.tmp" to "*/job"
os.rename(handle.name, handle.name[:-4])
try:
items_map[job_log_dir][1] = batch_sys_name
items_map[job_log_dir][2] = submit_opts
except KeyError:
pass
handle = None
job_log_dir = None
batch_sys_name = None
submit_opts = {}
return items
| gpl-3.0 |
srsman/odoo | addons/account/installer.py | 381 | 8404 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil.relativedelta import relativedelta
import logging
from operator import itemgetter
import time
import urllib2
import urlparse
try:
import simplejson as json
except ImportError:
import json # noqa
from openerp.release import serie
from openerp.tools.translate import _
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class account_installer(osv.osv_memory):
_name = 'account.installer'
_inherit = 'res.config.installer'
def _get_charts(self, cr, uid, context=None):
modules = self.pool.get('ir.module.module')
# try get the list on apps server
try:
apps_server = self.pool.get('ir.module.module').get_apps_server(cr, uid, context=context)
up = urlparse.urlparse(apps_server)
url = '{0.scheme}://{0.netloc}/apps/charts?serie={1}'.format(up, serie)
j = urllib2.urlopen(url, timeout=3).read()
apps_charts = json.loads(j)
charts = dict(apps_charts)
except Exception:
charts = dict()
# Looking for the module with the 'Account Charts' category
category_name, category_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'module_category_localization_account_charts')
ids = modules.search(cr, uid, [('category_id', '=', category_id)], context=context)
if ids:
charts.update((m.name, m.shortdesc) for m in modules.browse(cr, uid, ids, context=context))
charts = sorted(charts.items(), key=itemgetter(1))
charts.insert(0, ('configurable', _('Custom')))
return charts
_columns = {
# Accounting
'charts': fields.selection(_get_charts, 'Accounting Package',
required=True,
help="Installs localized accounting charts to match as closely as "
"possible the accounting needs of your company based on your "
"country."),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months', '3 Monthly')], 'Periods', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has Default Company', readonly=True),
}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id and user.company_id.id or False
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
_defaults = {
'date_start': lambda *a: time.strftime('%Y-01-01'),
'date_stop': lambda *a: time.strftime('%Y-12-31'),
'period': 'month',
'company_id': _default_company,
'has_default_company': _default_has_default_company,
'charts': 'configurable'
}
def get_unconfigured_cmp(self, cr, uid, context=None):
""" get the list of companies that have not been configured yet
but don't care about the demo chart of accounts """
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
return list(set(company_ids)-set(configured_cmp))
def check_unconfigured_cmp(self, cr, uid, context=None):
""" check if there are still unconfigured companies """
if not self.get_unconfigured_cmp(cr, uid, context=context):
raise osv.except_osv(_('No Unconfigured Company!'), _("There is currently no company without chart of account. The wizard will therefore not be executed."))
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None: context = {}
res = super(account_installer, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
cmp_select = []
# display in the widget selection only the companies that haven't been configured yet
unconfigured_cmp = self.get_unconfigured_cmp(cr, uid, context=context)
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id', 'in', unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in self.pool.get('res.company').browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def on_change_start_date(self, cr, uid, id, start_date=False):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def execute(self, cr, uid, ids, context=None):
self.execute_simple(cr, uid, ids, context)
return super(account_installer, self).execute(cr, uid, ids, context=context)
def execute_simple(self, cr, uid, ids, context=None):
if context is None:
context = {}
fy_obj = self.pool.get('account.fiscalyear')
for res in self.read(cr, uid, ids, context=context):
if 'date_start' in res and 'date_stop' in res:
f_ids = fy_obj.search(cr, uid, [('date_start', '<=', res['date_start']), ('date_stop', '>=', res['date_stop']), ('company_id', '=', res['company_id'][0])], context=context)
if not f_ids:
name = code = res['date_start'][:4]
if int(name) != int(res['date_stop'][:4]):
name = res['date_start'][:4] + '-' + res['date_stop'][:4]
code = res['date_start'][2:4] + '-' + res['date_stop'][2:4]
vals = {
'name': name,
'code': code,
'date_start': res['date_start'],
'date_stop': res['date_stop'],
'company_id': res['company_id'][0]
}
fiscal_id = fy_obj.create(cr, uid, vals, context=context)
if res['period'] == 'month':
fy_obj.create_period(cr, uid, [fiscal_id])
elif res['period'] == '3months':
fy_obj.create_period3(cr, uid, [fiscal_id])
def modules_to_install(self, cr, uid, ids, context=None):
modules = super(account_installer, self).modules_to_install(
cr, uid, ids, context=context)
chart = self.read(cr, uid, ids, ['charts'],
context=context)[0]['charts']
_logger.debug('Installing chart of accounts %s', chart)
return (modules | set([chart])) - set(['has_default_company', 'configurable'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jhseu/tensorflow | tensorflow/python/kernel_tests/random/stateless_random_ops_test.py | 1 | 7104 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
class StatelessOpsTest(test.TestCase):
def _test_match(self, cases):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
cases = tuple(cases)
key = 0x3ec8f720, 0x02461e29
for seed in (7, 17), (11, 5), (2, 3):
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with test_util.use_gpu():
for stateless_op, stateful_op in cases:
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)
self.assertAllEqual(self.evaluate(stateful), self.evaluate(pure))
def _test_determinism(self, cases):
# Stateless values should be equal iff the seeds are equal (roughly)
cases = tuple(cases)
with self.test_session(use_gpu=True):
for seed_type in [dtypes.int32, dtypes.int64]:
seed_t = array_ops.placeholder(seed_type, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for stateless_op, _ in cases:
pure = stateless_op(seed=seed_t)
values = [
(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds
]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def _float_cases(self, shape_dtypes=(None,)):
float_cases = (
# Uniform distribution, with and without range
(stateless.stateless_random_uniform, random_ops.random_uniform, {}),
(stateless.stateless_random_uniform, random_ops.random_uniform,
dict(minval=2.2, maxval=7.1)),
# Normal distribution, with and without mean+stddev
(stateless.stateless_random_normal, random_ops.random_normal, {}),
(stateless.stateless_random_normal, random_ops.random_normal,
dict(mean=2, stddev=3)),
# Truncated normal distribution, with and without mean+stddev
(stateless.stateless_truncated_normal, random_ops.truncated_normal, {}),
(stateless.stateless_truncated_normal, random_ops.truncated_normal,
dict(mean=3, stddev=4)),
)
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
if shape_dtype is not None:
shape = constant_op.constant(shape, dtype=shape_dtype)
for stateless_op, stateful_op, kwds in float_cases:
kwds = dict(shape=shape, dtype=dtype, **kwds)
yield (functools.partial(stateless_op, **kwds),
functools.partial(stateful_op, **kwds))
def _int_cases(self, shape_dtypes=(None,)):
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
if shape_dtype is not None:
shape = constant_op.constant(shape, dtype=shape_dtype)
for dtype in dtypes.int32, dtypes.int64:
kwds = dict(minval=2, maxval=11111, dtype=dtype, shape=shape)
yield (functools.partial(stateless.stateless_random_uniform, **kwds),
functools.partial(random_ops.random_uniform, **kwds))
def _multinomial_cases(self):
num_samples = 10
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
kwds = dict(
logits=constant_op.constant(logits, dtype=logits_dtype),
num_samples=num_samples,
output_dtype=output_dtype)
yield (functools.partial(stateless.stateless_multinomial, **kwds),
functools.partial(random_ops.multinomial, **kwds))
def _gamma_cases(self):
for dtype in np.float16, np.float32, np.float64:
for alpha in ([[.5, 1., 2.]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]):
kwds = dict(alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)
yield (functools.partial(
stateless.stateless_random_gamma,
shape=(10,) + tuple(np.shape(alpha)),
**kwds),
functools.partial(random_ops.random_gamma, shape=(10,), **kwds))
@test_util.run_deprecated_v1
def testMatchFloat(self):
self._test_match(self._float_cases())
@test_util.run_deprecated_v1
def testMatchInt(self):
self._test_match(self._int_cases())
@test_util.run_deprecated_v1
def testMatchMultinomial(self):
self._test_match(self._multinomial_cases())
@test_util.run_deprecated_v1
def testMatchGamma(self):
self._test_match(self._gamma_cases())
@test_util.run_deprecated_v1
def testDeterminismFloat(self):
self._test_determinism(
self._float_cases(shape_dtypes=(dtypes.int32, dtypes.int64)))
@test_util.run_deprecated_v1
def testDeterminismInt(self):
self._test_determinism(
self._int_cases(shape_dtypes=(dtypes.int32, dtypes.int64)))
@test_util.run_deprecated_v1
def testDeterminismMultinomial(self):
self._test_determinism(self._multinomial_cases())
@test_util.run_deprecated_v1
def testDeterminismGamma(self):
self._test_determinism(self._gamma_cases())
if __name__ == '__main__':
test.main()
| apache-2.0 |
kailIII/emaresa | trunk.pe.bk/web_printscreen_zb/__openerp__.py | 3 | 1527 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 ZestyBeanz Technologies Pvt. Ltd.
# (http://wwww.zbeanztech.com)
# contact@zbeanztech.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Web Printscreen ZB',
'version': '1.4',
'category': 'Web',
'description': """
Module to export current active tree view in to excel report
""",
'author': 'Zesty Beanz Technologies',
'website': 'http://www.zbeanztech.com',
'depends': ['web'],
'js': ['static/src/js/web_printscreen_export.js'],
'qweb': ['static/src/xml/web_printscreen_export.xml'],
'installable': True,
'auto_install': False,
'web_preload': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
akarki15/mozillians | vendor-local/lib/python/djcelery/tests/test_models.py | 13 | 3460 | from __future__ import absolute_import
from datetime import datetime, timedelta
from celery import current_app
from celery import states
from celery.utils import gen_unique_id
from djcelery.models import TaskMeta, TaskSetMeta
from djcelery.tests.utils import unittest
from djcelery.utils import now
class TestModels(unittest.TestCase):
def createTaskMeta(self):
id = gen_unique_id()
taskmeta, created = TaskMeta.objects.get_or_create(task_id=id)
return taskmeta
def createTaskSetMeta(self):
id = gen_unique_id()
tasksetmeta, created = TaskSetMeta.objects.get_or_create(taskset_id=id)
return tasksetmeta
def test_taskmeta(self):
m1 = self.createTaskMeta()
m2 = self.createTaskMeta()
m3 = self.createTaskMeta()
self.assertTrue(unicode(m1).startswith("<Task:"))
self.assertTrue(m1.task_id)
self.assertIsInstance(m1.date_done, datetime)
self.assertEqual(TaskMeta.objects.get_task(m1.task_id).task_id,
m1.task_id)
self.assertNotEqual(TaskMeta.objects.get_task(m1.task_id).status,
states.SUCCESS)
TaskMeta.objects.store_result(m1.task_id, True, status=states.SUCCESS)
TaskMeta.objects.store_result(m2.task_id, True, status=states.SUCCESS)
self.assertEqual(TaskMeta.objects.get_task(m1.task_id).status,
states.SUCCESS)
self.assertEqual(TaskMeta.objects.get_task(m2.task_id).status,
states.SUCCESS)
# Have to avoid save() because it applies the auto_now=True.
TaskMeta.objects.filter(task_id=m1.task_id).update(
date_done=now() - timedelta(days=10))
expired = TaskMeta.objects.get_all_expired(
current_app.conf.CELERY_TASK_RESULT_EXPIRES)
self.assertIn(m1, expired)
self.assertNotIn(m2, expired)
self.assertNotIn(m3, expired)
TaskMeta.objects.delete_expired(
current_app.conf.CELERY_TASK_RESULT_EXPIRES)
self.assertNotIn(m1, TaskMeta.objects.all())
def test_tasksetmeta(self):
m1 = self.createTaskSetMeta()
m2 = self.createTaskSetMeta()
m3 = self.createTaskSetMeta()
self.assertTrue(unicode(m1).startswith("<TaskSet:"))
self.assertTrue(m1.taskset_id)
self.assertIsInstance(m1.date_done, datetime)
self.assertEqual(
TaskSetMeta.objects.restore_taskset(m1.taskset_id).taskset_id,
m1.taskset_id)
# Have to avoid save() because it applies the auto_now=True.
TaskSetMeta.objects.filter(taskset_id=m1.taskset_id).update(
date_done=now() - timedelta(days=10))
expired = TaskSetMeta.objects.get_all_expired(
current_app.conf.CELERY_TASK_RESULT_EXPIRES)
self.assertIn(m1, expired)
self.assertNotIn(m2, expired)
self.assertNotIn(m3, expired)
TaskSetMeta.objects.delete_expired(
current_app.conf.CELERY_TASK_RESULT_EXPIRES)
self.assertNotIn(m1, TaskSetMeta.objects.all())
m4 = self.createTaskSetMeta()
self.assertEqual(
TaskSetMeta.objects.restore_taskset(m4.taskset_id).taskset_id,
m4.taskset_id)
TaskSetMeta.objects.delete_taskset(m4.taskset_id)
self.assertIsNone(TaskSetMeta.objects.restore_taskset(m4.taskset_id))
| bsd-3-clause |
polysquare/polysquare-ci-scripts | setup.py | 1 | 1485 | # /setup.py
#
# Installation and setup script for polysquare-ci-scripts
#
# See /LICENCE.md for Copyright information
"""Installation and setup script for polysquare-ci-scripts."""
from setuptools import (find_packages, setup)
setup(name="polysquare-ci-scripts",
version="0.0.1",
description="Polysquare Continuous Integration Scripts",
long_description_markdown_filename="README.md",
author="Sam Spilsbury",
author_email="smspillaz@gmail.com",
url="http://github.com/polysquare/polysquare-ci-scripts",
classifiers=["Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License"],
license="MIT",
keywords="development linters",
packages=find_packages(exclude=["test"]),
requires=[
"setuptools"
],
extras_require={
"upload": ["setuptools-markdown>=0.1"]
},
zip_safe=True,
include_package_data=True)
| mit |
YOTOV-LIMITED/kuma | vendor/packages/translate/storage/test_properties.py | 24 | 16806 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pytest import deprecated_call, raises
from translate.misc import wStringIO
from translate.storage import properties, test_monolingual
def test_find_delimiter_pos_simple():
"""Simple tests to find the various delimiters"""
assert properties._find_delimiter(u"key=value", [u"=", u":", u" "]) == ('=', 3)
assert properties._find_delimiter(u"key:value", [u"=", u":", u" "]) == (':', 3)
assert properties._find_delimiter(u"key value", [u"=", u":", u" "]) == (' ', 3)
# NOTE this is valid in Java properties, the key is then the empty string
assert properties._find_delimiter(u"= value", [u"=", u":", u" "]) == ('=', 0)
def test_find_delimiter_pos_multiple():
"""Find delimiters when multiple potential delimiters are involved"""
assert properties._find_delimiter(u"key=value:value", [u"=", u":", u" "]) == ('=', 3)
assert properties._find_delimiter(u"key:value=value", [u"=", u":", u" "]) == (':', 3)
assert properties._find_delimiter(u"key value=value", [u"=", u":", u" "]) == (' ', 3)
def test_find_delimiter_pos_none():
"""Find delimiters when there isn't one"""
assert properties._find_delimiter(u"key", [u"=", u":", u" "]) == (None, -1)
assert properties._find_delimiter(u"key\=\:\ ", [u"=", u":", u" "]) == (None, -1)
def test_find_delimiter_pos_whitespace():
"""Find delimiters when whitespace is involved"""
assert properties._find_delimiter(u"key = value", [u"=", u":", u" "]) == ('=', 4)
assert properties._find_delimiter(u"key : value", [u"=", u":", u" "]) == (':', 4)
assert properties._find_delimiter(u"key value", [u"=", u":", u" "]) == (' ', 3)
assert properties._find_delimiter(u"key value = value", [u"=", u":", u" "]) == (' ', 3)
assert properties._find_delimiter(u"key value value", [u"=", u":", u" "]) == (' ', 3)
assert properties._find_delimiter(u" key = value", [u"=", u":", u" "]) == ('=', 5)
def test_find_delimiter_pos_escapes():
"""Find delimiters when potential earlier delimiters are escaped"""
assert properties._find_delimiter(u"key\:=value", [u"=", u":", u" "]) == ('=', 5)
assert properties._find_delimiter(u"key\=: value", [u"=", u":", u" "]) == (':', 5)
assert properties._find_delimiter(u"key\ value", [u"=", u":", u" "]) == (' ', 5)
assert properties._find_delimiter(u"key\ key\ key\: = value", [u"=", u":", u" "]) == ('=', 16)
def test_find_delimiter_deprecated_fn():
"""Test that the deprecated function still actually works"""
assert properties.find_delimeter(u"key=value") == ('=', 3)
deprecated_call(properties.find_delimeter, u"key=value")
def test_is_line_continuation():
assert not properties.is_line_continuation(u"")
assert not properties.is_line_continuation(u"some text")
assert properties.is_line_continuation(u"""some text\\""")
assert not properties.is_line_continuation(u"""some text\\\\""") # Escaped \
assert properties.is_line_continuation(u"""some text\\\\\\""") # Odd num. \ is line continuation
assert properties.is_line_continuation(u"""\\\\\\""")
def test_key_strip():
assert properties._key_strip(u"key") == "key"
assert properties._key_strip(u" key") == "key"
assert properties._key_strip(u"\ key") == "\ key"
assert properties._key_strip(u"key ") == "key"
assert properties._key_strip(u"key\ ") == "key\ "
def test_is_comment_one_line():
assert properties.is_comment_one_line("# comment")
assert properties.is_comment_one_line("! comment")
assert properties.is_comment_one_line("// comment")
assert properties.is_comment_one_line(" # comment")
assert properties.is_comment_one_line("/* comment */")
assert not properties.is_comment_one_line("not = comment_line /* comment */")
assert not properties.is_comment_one_line("/* comment ")
def test_is_comment_start():
assert properties.is_comment_start("/* comment")
assert not properties.is_comment_start("/* comment */")
def test_is_comment_end():
assert properties.is_comment_end(" comment */")
assert not properties.is_comment_end("/* comment */")
class TestPropUnit(test_monolingual.TestMonolingualUnit):
UnitClass = properties.propunit
def test_rich_get(self):
pass
def test_rich_set(self):
pass
class TestProp(test_monolingual.TestMonolingualStore):
StoreClass = properties.propfile
def propparse(self, propsource, personality="java", encoding=None):
"""helper that parses properties source without requiring files"""
dummyfile = wStringIO.StringIO(propsource)
propfile = properties.propfile(dummyfile, personality, encoding)
return propfile
def propregen(self, propsource):
"""helper that converts properties source to propfile object and back"""
return str(self.propparse(propsource))
def test_simpledefinition(self):
"""checks that a simple properties definition is parsed correctly"""
propsource = 'test_me=I can code!'
propfile = self.propparse(propsource)
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == "test_me"
assert propunit.source == "I can code!"
def test_simpledefinition_source(self):
"""checks that a simple properties definition can be regenerated as source"""
propsource = 'test_me=I can code!'
propregen = self.propregen(propsource)
assert propsource + '\n' == propregen
def test_unicode_escaping(self):
"""check that escaped unicode is converted properly"""
propsource = "unicode=\u0411\u0416\u0419\u0428"
messagevalue = u'\u0411\u0416\u0419\u0428'.encode("UTF-8")
propfile = self.propparse(propsource, personality="mozilla")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == "unicode"
assert propunit.source.encode("UTF-8") == "БЖЙШ"
regensource = str(propfile)
assert messagevalue in regensource
assert "\\u" not in regensource
def test_newlines_startend(self):
"""check that we preserve \n that appear at start and end of properties"""
propsource = "newlines=\\ntext\\n"
propregen = self.propregen(propsource)
assert propsource + '\n' == propregen
def test_whitespace_handling(self):
"""check that we remove extra whitespace around property"""
whitespaces = (
('key = value', 'key', 'value'), # Standard for baseline
(' key = value', 'key', 'value'), # Extra \s before key and value
('\ key\ = value', '\ key\ ', 'value'), # extra space at start and end of key
('key = \ value ', 'key', ' value '), # extra space at start end end of value
)
for propsource, key, value in whitespaces:
propfile = self.propparse(propsource)
propunit = propfile.units[0]
print(repr(propsource), repr(propunit.name), repr(propunit.source))
assert propunit.name == key
assert propunit.source == value
# let's reparse the output to ensure good serialisation->parsing roundtrip:
propfile = self.propparse(str(propunit))
propunit = propfile.units[0]
assert propunit.name == key
assert propunit.source == value
def test_key_value_delimiters_simple(self):
"""test that we can handle colon, equals and space delimiter
between key and value. We don't test any space removal or escaping"""
delimiters = [":", "=", " "]
for delimiter in delimiters:
propsource = "key%svalue" % delimiter
print("source: '%s'\ndelimiter: '%s'" % (propsource, delimiter))
propfile = self.propparse(propsource)
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == "key"
assert propunit.source == "value"
def test_comments(self):
"""checks that we handle # and ! comments"""
markers = ['#', '!']
for comment_marker in markers:
propsource = '''%s A comment
key=value
''' % comment_marker
propfile = self.propparse(propsource)
print(repr(propsource))
print("Comment marker: '%s'" % comment_marker)
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.comments == ['%s A comment' % comment_marker]
def test_latin1(self):
"""checks that we handle non-escaped latin1 text"""
prop_source = u"key=valú".encode('latin1')
prop_store = self.propparse(prop_source)
assert len(prop_store.units) == 1
unit = prop_store.units[0]
assert unit.source == u"valú"
def test_fullspec_delimiters(self):
"""test the full definiation as found in Java docs"""
proplist = ['Truth = Beauty\n', ' Truth:Beauty', 'Truth :Beauty', 'Truth Beauty']
for propsource in proplist:
propfile = self.propparse(propsource)
propunit = propfile.units[0]
print(propunit)
assert propunit.name == "Truth"
assert propunit.source == "Beauty"
def test_fullspec_escaped_key(self):
"""Escaped delimeters can be in the key"""
prop_source = u"\:\="
prop_store = self.propparse(prop_source)
assert len(prop_store.units) == 1
unit = prop_store.units[0]
print(unit)
assert unit.name == u"\:\="
def test_fullspec_line_continuation(self):
"""Whitespace delimiter and pre whitespace in line continuation are dropped"""
prop_source = ur"""fruits apple, banana, pear, \
cantaloupe, watermelon, \
kiwi, mango
"""
prop_store = self.propparse(prop_source)
print(prop_store)
assert len(prop_store.units) == 1
unit = prop_store.units[0]
print(unit)
assert properties._find_delimiter(prop_source, [u"=", u":", u" "]) == (' ', 6)
assert unit.name == u"fruits"
assert unit.source == u"apple, banana, pear, cantaloupe, watermelon, kiwi, mango"
def test_fullspec_key_without_value(self):
"""A key can have no value in which case the value is the empty string"""
prop_source = u"cheeses"
prop_store = self.propparse(prop_source)
assert len(prop_store.units) == 1
unit = prop_store.units[0]
print(unit)
assert unit.name == u"cheeses"
assert unit.source == u""
def test_mac_strings(self):
"""test various items used in Mac OS X strings files"""
propsource = ur'''"I am a \"key\"" = "I am a \"value\"";'''.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == ur'I am a "key"'
assert propunit.source.encode('utf-8') == u'I am a "value"'
def test_mac_strings_unicode(self):
"""Ensure we can handle Unicode"""
propsource = ur'''"I am a “key”" = "I am a “value”";'''.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == ur'I am a “key”'
assert propfile.personality.encode(propunit.source) == u'I am a “value”'
def test_mac_strings_utf8(self):
"""Ensure we can handle Unicode"""
propsource = ur'''"I am a “key”" = "I am a “value”";'''.encode('utf-8')
propfile = self.propparse(propsource, personality="strings-utf8")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == ur'I am a “key”'
assert propfile.personality.encode(propunit.source) == u'I am a “value”'
def test_mac_strings_newlines(self):
"""test newlines \n within a strings files"""
propsource = ur'''"key" = "value\nvalue";'''.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == u'key'
assert propunit.source.encode('utf-8') == u'value\nvalue'
assert propfile.personality.encode(propunit.source) == ur'value\nvalue'
def test_mac_strings_comments(self):
"""test .string comment types"""
propsource = ur'''/* Comment */
// Comment
"key" = "value";'''.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == u'key'
assert propunit.source.encode('utf-8') == u'value'
assert propunit.getnotes() == u"/* Comment */\n// Comment"
def test_mac_strings_multilines_comments(self):
"""test .string multiline comments"""
propsource = (u'/* Foo\n'
u'Bar\n'
u'Baz */\n'
u'"key" = "value"').encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == u'key'
assert propunit.source.encode('utf-8') == u'value'
assert propunit.getnotes() == u"/* Foo\nBar\nBaz */"
def test_mac_strings_comments_dropping(self):
""".string generic (and unuseful) comments should be dropped"""
propsource = ur'''/* No comment provided by engineer. */
"key" = "value";'''.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == u'key'
assert propunit.source.encode('utf-8') == u'value'
assert propunit.getnotes() == u""
def test_mac_strings_quotes(self):
"""test that parser unescapes characters used as wrappers"""
propsource = ur'"key with \"quotes\"" = "value with \"quotes\"";'.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
propunit = propfile.units[0]
assert propunit.name == ur'key with "quotes"'
assert propunit.value == ur'value with "quotes"'
def test_mac_strings_serialization(self):
"""test that serializer quotes mac strings properly"""
propsource = ur'"key with \"quotes\"" = "value with \"quotes\"";'.encode('utf-16')
propfile = self.propparse(propsource, personality="strings")
# we don't care about leading and trailing newlines and zero bytes
# in the assert, we just want to make sure that
# - all quotes are in place
# - quotes inside are escaped
# - for the sake of beauty a pair of spaces encloses the equal mark
# - every line ends with ";"
assert str(propfile.units[0]).strip('\n\x00') == propsource.strip('\n\x00')
assert str(propfile).strip('\n\x00') == propsource.strip('\n\x00')
def test_override_encoding(self):
"""test that we can override the encoding of a properties file"""
propsource = u"key = value".encode("cp1252")
propfile = self.propparse(propsource, personality="strings", encoding="cp1252")
assert len(propfile.units) == 1
propunit = propfile.units[0]
assert propunit.name == u'key'
assert propunit.source == u'value'
def test_trailing_comments(self):
"""test that we handle non-unit data at the end of a file"""
propsource = u"key = value\n# END"
propfile = self.propparse(propsource)
assert len(propfile.units) == 2
propunit = propfile.units[1]
assert propunit.name == u''
assert propunit.source == u''
assert propunit.getnotes() == u"# END"
def test_utf16_byte_order_mark(self):
"""test that BOM appears in the resulting text once only"""
propsource = u"key1 = value1\nkey2 = value2\n".encode('utf-16')
propfile = self.propparse(propsource, encoding='utf-16')
result = str(propfile)
bom = propsource[:2]
assert result.startswith(bom)
assert bom not in result[2:]
def test_raise_ioerror_if_cannot_detect_encoding(self):
"""Test that IOError is thrown if file encoding cannot be detected."""
propsource = u"key = ąćęłńóśźż".encode("cp1250")
with raises(IOError):
self.propparse(propsource, personality="strings")
| mpl-2.0 |
jinghaomiao/apollo | cyber/python/cyber_py3/record.py | 3 | 7813 | #!/usr/bin/env python3
# ****************************************************************************
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
# -*- coding: utf-8 -*-
"""Module for wrapper cyber record."""
import collections
import importlib
import os
import sys
from google.protobuf.descriptor_pb2 import FileDescriptorProto
# Refer to the _cyber_record_wrapper.so with relative path so that it can be
# always addressed as a part of the runfiles.
wrapper_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../internal'))
sys.path.append(wrapper_lib_path)
_CYBER_RECORD = importlib.import_module('_cyber_record_wrapper')
PyBagMessage = collections.namedtuple('PyBagMessage',
'topic message data_type timestamp')
class RecordReader(object):
"""
Class for cyber RecordReader wrapper.
"""
##
# @brief the constructor function.
#
# @param file_name the record file name.
def __init__(self, file_name):
self.record_reader = _CYBER_RECORD.new_PyRecordReader(file_name)
def __del__(self):
_CYBER_RECORD.delete_PyRecordReader(self.record_reader)
##
# @brief Read message from bag file.
#
# @param start_time the start time to read.
# @param end_time the end time to read.
#
# @return return (channnel, data, data_type, timestamp)
def read_messages(self, start_time=0, end_time=18446744073709551615):
while True:
message = _CYBER_RECORD.PyRecordReader_ReadMessage(
self.record_reader, start_time, end_time)
if not message["end"]:
yield PyBagMessage(message["channel_name"], message["data"],
message["data_type"], message["timestamp"])
else:
# print "No message more."
break
##
# @brief Return message count of the channel in current record file.
#
# @param channel_name the channel name.
#
# @return return the message count.
def get_messagenumber(self, channel_name):
return _CYBER_RECORD.PyRecordReader_GetMessageNumber(
self.record_reader, channel_name)
##
# @brief Get the corresponding message type of channel.
#
# @param channel_name channel name.
#
# @return return the name of ther string type.
def get_messagetype(self, channel_name):
return _CYBER_RECORD.PyRecordReader_GetMessageType(
self.record_reader, channel_name).decode('utf-8')
def get_protodesc(self, channel_name):
"""
Return message protodesc.
"""
return _CYBER_RECORD.PyRecordReader_GetProtoDesc(
self.record_reader, channel_name)
def get_headerstring(self):
"""
Return message header string.
"""
return _CYBER_RECORD.PyRecordReader_GetHeaderString(self.record_reader)
def reset(self):
"""
Return reset.
"""
return _CYBER_RECORD.PyRecordReader_Reset(self.record_reader)
def get_channellist(self):
"""
Return current channel names list.
"""
return _CYBER_RECORD.PyRecordReader_GetChannelList(self.record_reader)
class RecordWriter(object):
"""
Class for cyber RecordWriter wrapper.
"""
##
# @brief the constructor function.
#
# @param file_segmentation_size_kb size to segment the file, 0 is no segmentation.
# @param file_segmentation_interval_sec size to segment the file, 0 is no segmentation.
def __init__(self, file_segmentation_size_kb=0,
file_segmentation_interval_sec=0):
self.record_writer = _CYBER_RECORD.new_PyRecordWriter()
_CYBER_RECORD.PyRecordWriter_SetSizeOfFileSegmentation(
self.record_writer, file_segmentation_size_kb)
_CYBER_RECORD.PyRecordWriter_SetIntervalOfFileSegmentation(
self.record_writer, file_segmentation_interval_sec)
def __del__(self):
_CYBER_RECORD.delete_PyRecordWriter(self.record_writer)
##
# @brief Open record file for write.
#
# @param path the file path.
#
# @return Success is True, other False.
def open(self, path):
return _CYBER_RECORD.PyRecordWriter_Open(self.record_writer, path)
##
# @brief Close record file.
def close(self):
"""
Close record file.
"""
_CYBER_RECORD.PyRecordWriter_Close(self.record_writer)
##
# @brief Writer channel by channelname, typename, protodesc.
#
# @param channel_name the channel name to write
# @param type_name a string of message type name.
# @param proto_desc the message descriptor.
#
# @return Success is True, other False.
def write_channel(self, channel_name, type_name, proto_desc):
"""
Writer channel by channelname,typename,protodesc
"""
return _CYBER_RECORD.PyRecordWriter_WriteChannel(
self.record_writer, channel_name, type_name, proto_desc)
##
# @brief Writer msg: channelname, data, writer time.
#
# @param channel_name channel name to write.
# @param data when raw is True, data processed as a rawdata, other it needs to SerializeToString
# @param time message time.
# @param raw the flag implies data whether or not a rawdata.
#
# @return Success is True, other False.
def write_message(self, channel_name, data, time, raw=True):
"""
Writer msg:channelname,rawmsg,writer time
"""
if raw:
return _CYBER_RECORD.PyRecordWriter_WriteMessage(
self.record_writer, channel_name, data, time, "")
file_desc = data.DESCRIPTOR.file
proto = FileDescriptorProto()
file_desc.CopyToProto(proto)
proto.name = file_desc.name
desc_str = proto.SerializeToString()
return _CYBER_RECORD.PyRecordWriter_WriteMessage(
self.record_writer,
channel_name, data.SerializeToString(), time, desc_str)
def set_size_fileseg(self, size_kilobytes):
"""
Return filesegment size.
"""
return _CYBER_RECORD.PyRecordWriter_SetSizeOfFileSegmentation(
self.record_writer, size_kilobytes)
def set_intervaltime_fileseg(self, time_sec):
"""
Return file interval time.
"""
return _CYBER_RECORD.PyRecordWriter_SetIntervalOfFileSegmentation(
self.record_writer, time_sec)
def get_messagenumber(self, channel_name):
"""
Return message count.
"""
return _CYBER_RECORD.PyRecordWriter_GetMessageNumber(
self.record_writer, channel_name)
def get_messagetype(self, channel_name):
"""
Return message type.
"""
return _CYBER_RECORD.PyRecordWriter_GetMessageType(
self.record_writer, channel_name).decode('utf-8')
def get_protodesc(self, channel_name):
"""
Return message protodesc.
"""
return _CYBER_RECORD.PyRecordWriter_GetProtoDesc(
self.record_writer, channel_name)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.