repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
achals/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/monkeypatch.py | 177 | 8939 | """ monkeypatching and mocking functionality. """
import os, sys
import re
from py.builtin import _basestring
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
def pytest_funcarg__monkeypatch(request):
"""The returned ``monkeypatch`` funcarg provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, value, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = monkeypatch()
request.addfinalizer(mpatch.undo)
return mpatch
def resolve(name):
# simplified from zope.dottedname
parts = name.split('.')
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += '.' + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError(
'import error in %s: %s' % (used, ex)
)
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
'%r object at %s has no attribute %r' % (
type(obj).__name__, ann, name
)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, _basestring) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" %
(import_path,))
module, attr = import_path.rsplit('.', 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset:
def __repr__(self):
return "<notset>"
notset = Notset()
class monkeypatch:
""" Object keeping a record of setattr/item/env/syspath changes. """
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda x: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, _basestring):
raise TypeError("use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string")
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
if name is notset:
if not isinstance(target, _basestring):
raise TypeError("use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string")
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
self._setattr.append((target, name, getattr(target, name, notset)))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError it does not
exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
| mpl-2.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/test/test_numeric_tower.py | 118 | 7447 | # test interactions between int, float, Decimal and Fraction
import unittest
import random
import math
import sys
import operator
from test.support import run_unittest
from decimal import Decimal as D
from fractions import Fraction as F
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
_PyHASH_INF = sys.hash_info.inf
class HashTest(unittest.TestCase):
def check_equal_hash(self, x, y):
# check both that x and y are equal and that their hashes are equal
self.assertEqual(hash(x), hash(y),
"got different hashes for {!r} and {!r}".format(x, y))
self.assertEqual(x, y)
def test_bools(self):
self.check_equal_hash(False, 0)
self.check_equal_hash(True, 1)
def test_integers(self):
# check that equal values hash equal
# exact integers
for i in range(-1000, 1000):
self.check_equal_hash(i, float(i))
self.check_equal_hash(i, D(i))
self.check_equal_hash(i, F(i))
# the current hash is based on reduction modulo 2**n-1 for some
# n, so pay special attention to numbers of the form 2**n and 2**n-1.
for i in range(100):
n = 2**i - 1
if n == int(float(n)):
self.check_equal_hash(n, float(n))
self.check_equal_hash(-n, -float(n))
self.check_equal_hash(n, D(n))
self.check_equal_hash(n, F(n))
self.check_equal_hash(-n, D(-n))
self.check_equal_hash(-n, F(-n))
n = 2**i
self.check_equal_hash(n, float(n))
self.check_equal_hash(-n, -float(n))
self.check_equal_hash(n, D(n))
self.check_equal_hash(n, F(n))
self.check_equal_hash(-n, D(-n))
self.check_equal_hash(-n, F(-n))
# random values of various sizes
for _ in range(1000):
e = random.randrange(300)
n = random.randrange(-10**e, 10**e)
self.check_equal_hash(n, D(n))
self.check_equal_hash(n, F(n))
if n == int(float(n)):
self.check_equal_hash(n, float(n))
def test_binary_floats(self):
# check that floats hash equal to corresponding Fractions and Decimals
# floats that are distinct but numerically equal should hash the same
self.check_equal_hash(0.0, -0.0)
# zeros
self.check_equal_hash(0.0, D(0))
self.check_equal_hash(-0.0, D(0))
self.check_equal_hash(-0.0, D('-0.0'))
self.check_equal_hash(0.0, F(0))
# infinities and nans
self.check_equal_hash(float('inf'), D('inf'))
self.check_equal_hash(float('-inf'), D('-inf'))
for _ in range(1000):
x = random.random() * math.exp(random.random()*200.0 - 100.0)
self.check_equal_hash(x, D.from_float(x))
self.check_equal_hash(x, F.from_float(x))
def test_complex(self):
# complex numbers with zero imaginary part should hash equal to
# the corresponding float
test_values = [0.0, -0.0, 1.0, -1.0, 0.40625, -5136.5,
float('inf'), float('-inf')]
for zero in -0.0, 0.0:
for value in test_values:
self.check_equal_hash(value, complex(value, zero))
def test_decimals(self):
# check that Decimal instances that have different representations
# but equal values give the same hash
zeros = ['0', '-0', '0.0', '-0.0e10', '000e-10']
for zero in zeros:
self.check_equal_hash(D(zero), D(0))
self.check_equal_hash(D('1.00'), D(1))
self.check_equal_hash(D('1.00000'), D(1))
self.check_equal_hash(D('-1.00'), D(-1))
self.check_equal_hash(D('-1.00000'), D(-1))
self.check_equal_hash(D('123e2'), D(12300))
self.check_equal_hash(D('1230e1'), D(12300))
self.check_equal_hash(D('12300'), D(12300))
self.check_equal_hash(D('12300.0'), D(12300))
self.check_equal_hash(D('12300.00'), D(12300))
self.check_equal_hash(D('12300.000'), D(12300))
def test_fractions(self):
# check special case for fractions where either the numerator
# or the denominator is a multiple of _PyHASH_MODULUS
self.assertEqual(hash(F(1, _PyHASH_MODULUS)), _PyHASH_INF)
self.assertEqual(hash(F(-1, 3*_PyHASH_MODULUS)), -_PyHASH_INF)
self.assertEqual(hash(F(7*_PyHASH_MODULUS, 1)), 0)
self.assertEqual(hash(F(-_PyHASH_MODULUS, 1)), 0)
def test_hash_normalization(self):
# Test for a bug encountered while changing long_hash.
#
# Given objects x and y, it should be possible for y's
# __hash__ method to return hash(x) in order to ensure that
# hash(x) == hash(y). But hash(x) is not exactly equal to the
# result of x.__hash__(): there's some internal normalization
# to make sure that the result fits in a C long, and is not
# equal to the invalid hash value -1. This internal
# normalization must therefore not change the result of
# hash(x) for any x.
class HalibutProxy:
def __hash__(self):
return hash('halibut')
def __eq__(self, other):
return other == 'halibut'
x = {'halibut', HalibutProxy()}
self.assertEqual(len(x), 1)
class ComparisonTest(unittest.TestCase):
def test_mixed_comparisons(self):
# ordered list of distinct test values of various types:
# int, float, Fraction, Decimal
test_values = [
float('-inf'),
D('-1e425000000'),
-1e308,
F(-22, 7),
-3.14,
-2,
0.0,
1e-320,
True,
F('1.2'),
D('1.3'),
float('1.4'),
F(275807, 195025),
D('1.414213562373095048801688724'),
F(114243, 80782),
F(473596569, 84615),
7e200,
D('infinity'),
]
for i, first in enumerate(test_values):
for second in test_values[i+1:]:
self.assertLess(first, second)
self.assertLessEqual(first, second)
self.assertGreater(second, first)
self.assertGreaterEqual(second, first)
def test_complex(self):
# comparisons with complex are special: equality and inequality
# comparisons should always succeed, but order comparisons should
# raise TypeError.
z = 1.0 + 0j
w = -3.14 + 2.7j
for v in 1, 1.0, F(1), D(1), complex(1):
self.assertEqual(z, v)
self.assertEqual(v, z)
for v in 2, 2.0, F(2), D(2), complex(2):
self.assertNotEqual(z, v)
self.assertNotEqual(v, z)
self.assertNotEqual(w, v)
self.assertNotEqual(v, w)
for v in (1, 1.0, F(1), D(1), complex(1),
2, 2.0, F(2), D(2), complex(2), w):
for op in operator.le, operator.lt, operator.ge, operator.gt:
self.assertRaises(TypeError, op, z, v)
self.assertRaises(TypeError, op, v, z)
def test_main():
run_unittest(HashTest, ComparisonTest)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
liangazhou/django-rdp | packages/eclipse/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/xml/etree/ElementTree.py | 22 | 56589 | #
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __nonzero__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
# assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
# for element in elements:
# assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
# assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, basestring) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName(object):
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree(object):
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
# @keyparam default_namespace Sets the default XML namespace (for "xmlns").
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
def write(self, file_or_filename,
# keyword arguments
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
# assert self._root is not None
if not method:
method = "xml"
elif method not in _serialize:
# FIXME: raise an ImportError for c14n if ElementC14N is missing?
raise ValueError("unknown method %r" % method)
if hasattr(file_or_filename, "write"):
file = file_or_filename
else:
file = open(file_or_filename, "wb")
write = file.write
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
elif xml_declaration or (xml_declaration is None and
encoding not in ("utf-8", "us-ascii")):
if method == "xml":
write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
if method == "text":
_serialize_text(write, self._root, encoding)
else:
qnames, namespaces = _namespaces(
self._root, encoding, default_namespace
)
serialize = _serialize[method]
serialize(write, self._root, encoding, qnames, namespaces)
if file_or_filename is not file:
file.close()
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _encode(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _encode(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def _serialize_text(write, elem, encoding):
for part in elem.itertext():
write(part.encode(encoding))
if elem.tail:
write(elem.tail.encode(encoding))
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in _namespace_map.items():
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
return "".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. The string is returned as a sequence of string fragments.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser, close_source)
class _IterParseIterator(object):
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = (uri or "").encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def next(self):
while 1:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder(object):
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser(object):
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return text.encode("ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = fixtext(value)
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(self._fixtext(text))
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _comment(self, data):
try:
comment = self.target.comment
except AttributeError:
pass
else:
return comment(self._fixtext(data))
def _pi(self, target, data):
try:
pi = self.target.pi
except AttributeError:
pass
else:
return pi(self._fixtext(target), self._fixtext(data))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error, v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error, v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
| apache-2.0 |
sodafree/backend | build/lib.linux-i686-2.7/django/contrib/gis/geos/collections.py | 92 | 4637 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import c_int, c_uint, byref
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOS_PREPARE
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos import prototypes as capi
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join([g.kml for g in self])
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple([g.tuple for g in self])
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
if GEOS_PREPARE:
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
else:
raise GEOSException('The cascaded union operation requires GEOS 3.1+.')
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause |
DmitryLoki/airtribune | sites/all/libraries/OpenLayers-2.12/tools/exampleparser.py | 111 | 8324 | #!/usr/bin/env python
import sys
import os
import re
import urllib2
import time
from xml.dom.minidom import Document
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
import lxml.etree as ElementTree
missing_deps = False
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError, E:
missing_deps = E
try:
from BeautifulSoup import BeautifulSoup
except ImportError, E:
missing_deps = E
feedName = "example-list.xml"
feedPath = "http://openlayers.org/dev/examples/"
def getListOfOnlineExamples(baseUrl):
"""
useful if you want to get a list of examples a url. not used by default.
"""
html = urllib2.urlopen(baseUrl)
soup = BeautifulSoup(html)
examples = soup.findAll('li')
examples = [example.find('a').get('href') for example in examples]
examples = [example for example in examples if example.endswith('.html')]
examples = [example for example in examples]
return examples
def getListOfExamples(relPath):
"""
returns list of .html filenames within a given path - excludes example-list.html
"""
examples = os.listdir(relPath)
examples = [example for example in examples if example.endswith('.html') and example != "example-list.html"]
return examples
def getExampleHtml(location):
"""
returns html of a specific example that is available online or locally
"""
print '.',
if location.startswith('http'):
return urllib2.urlopen(location).read()
else:
f = open(location)
html = f.read()
f.close()
return html
def extractById(soup, tagId, value=None):
"""
returns full contents of a particular tag id
"""
beautifulTag = soup.find(id=tagId)
if beautifulTag:
if beautifulTag.contents:
value = str(beautifulTag.renderContents()).strip()
value = value.replace('\t','')
value = value.replace('\n','')
return value
def getRelatedClasses(html):
"""
parses the html, and returns a list of all OpenLayers Classes
used within (ie what parts of OL the javascript uses).
"""
rawstr = r'''(?P<class>OpenLayers\..*?)\('''
return re.findall(rawstr, html)
def parseHtml(html,ids):
"""
returns dictionary of items of interest
"""
soup = BeautifulSoup(html)
d = {}
for tagId in ids:
d[tagId] = extractById(soup,tagId)
#classes should eventually be parsed from docs - not automatically created.
classes = getRelatedClasses(html)
d['classes'] = classes
return d
def getSvnInfo(path):
h = os.popen("svn info %s --xml" % path)
tree = ElementTree.fromstring(h.read())
h.close()
d = {
'url': tree.findtext('entry/url'),
'author': tree.findtext('entry/commit/author'),
'date': tree.findtext('entry/commit/date')
}
return d
def createFeed(examples):
doc = Document()
atomuri = "http://www.w3.org/2005/Atom"
feed = doc.createElementNS(atomuri, "feed")
feed.setAttribute("xmlns", atomuri)
title = doc.createElementNS(atomuri, "title")
title.appendChild(doc.createTextNode("OpenLayers Examples"))
feed.appendChild(title)
link = doc.createElementNS(atomuri, "link")
link.setAttribute("rel", "self")
link.setAttribute("href", feedPath + feedName)
modtime = time.strftime("%Y-%m-%dT%I:%M:%SZ", time.gmtime())
id = doc.createElementNS(atomuri, "id")
id.appendChild(doc.createTextNode("%s%s#%s" % (feedPath, feedName, modtime)))
feed.appendChild(id)
updated = doc.createElementNS(atomuri, "updated")
updated.appendChild(doc.createTextNode(modtime))
feed.appendChild(updated)
examples.sort(key=lambda x:x["modified"])
for example in sorted(examples, key=lambda x:x["modified"], reverse=True):
entry = doc.createElementNS(atomuri, "entry")
title = doc.createElementNS(atomuri, "title")
title.appendChild(doc.createTextNode(example["title"] or example["example"]))
entry.appendChild(title)
tags = doc.createElementNS(atomuri, "tags")
tags.appendChild(doc.createTextNode(example["tags"] or example["example"]))
entry.appendChild(tags)
link = doc.createElementNS(atomuri, "link")
link.setAttribute("href", "%s%s" % (feedPath, example["example"]))
entry.appendChild(link)
summary = doc.createElementNS(atomuri, "summary")
summary.appendChild(doc.createTextNode(example["shortdesc"] or example["example"]))
entry.appendChild(summary)
updated = doc.createElementNS(atomuri, "updated")
updated.appendChild(doc.createTextNode(example["modified"]))
entry.appendChild(updated)
author = doc.createElementNS(atomuri, "author")
name = doc.createElementNS(atomuri, "name")
name.appendChild(doc.createTextNode(example["author"]))
author.appendChild(name)
entry.appendChild(author)
id = doc.createElementNS(atomuri, "id")
id.appendChild(doc.createTextNode("%s%s#%s" % (feedPath, example["example"], example["modified"])))
entry.appendChild(id)
feed.appendChild(entry)
doc.appendChild(feed)
return doc
def wordIndex(examples):
"""
Create an inverted index based on words in title and shortdesc. Keys are
lower cased words. Values are dictionaries with example index keys and
count values.
"""
index = {}
unword = re.compile("\\W+")
keys = ["shortdesc", "title", "tags"]
for i in range(len(examples)):
for key in keys:
text = examples[i][key]
if text:
words = unword.split(text)
for word in words:
if word:
word = word.lower()
if index.has_key(word):
if index[word].has_key(i):
index[word][i] += 1
else:
index[word][i] = 1
else:
index[word] = {i: 1}
return index
if __name__ == "__main__":
if missing_deps:
print "This script requires json or simplejson and BeautifulSoup. You don't have them. \n(%s)" % E
sys.exit()
if len(sys.argv) > 1:
outFile = open(sys.argv[1],'w')
else:
outFile = open('../examples/example-list.js','w')
examplesLocation = '../examples'
print 'Reading examples from %s and writing out to %s' % (examplesLocation, outFile.name)
exampleList = []
docIds = ['title','shortdesc','tags']
#comment out option to create docs from online resource
#examplesLocation = 'http://svn.openlayers.org/sandbox/docs/examples/'
#examples = getListOfOnlineExamples(examplesLocation)
examples = getListOfExamples(examplesLocation)
modtime = time.strftime("%Y-%m-%dT%I:%M:%SZ", time.gmtime())
for example in examples:
url = os.path.join(examplesLocation,example)
html = getExampleHtml(url)
tagvalues = parseHtml(html,docIds)
tagvalues['example'] = example
# add in svn info
d = getSvnInfo(url)
tagvalues["modified"] = d["date"] or modtime
tagvalues["author"] = d["author"] or "anonymous"
tagvalues['link'] = example
exampleList.append(tagvalues)
print
exampleList.sort(key=lambda x:x['example'].lower())
index = wordIndex(exampleList)
json = json.dumps({"examples": exampleList, "index": index})
#give the json a global variable we can use in our js. This should be replaced or made optional.
json = 'var info=' + json
outFile.write(json)
outFile.close()
print "writing feed to ../examples/%s " % feedName
atom = open('../examples/%s' % feedName, 'w')
doc = createFeed(exampleList)
atom.write(doc.toxml())
atom.close()
print 'complete'
| gpl-2.0 |
richardcs/ansible | lib/ansible/modules/cloud/google/gcp_compute_image.py | 7 | 27882 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_image
description:
- Represents an Image resource.
- Google Compute Engine uses operating system images to create the root persistent
disks for your instances. You specify an image when you create an instance. Images
contain a boot loader, an operating system, and a root file system. Linux operating
system images are also capable of running containers on Compute Engine.
- Images can be either public or custom.
- Public images are provided and maintained by Google, open-source communities, and
third-party vendors. By default, all projects have access to these images and can
use them to create instances. Custom images are available only to your project.
You can create a custom image from root persistent disks and other images. Then,
use the custom image to create an instance.
short_description: Creates a GCP Image
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
disk_size_gb:
description:
- Size of the image when restored onto a persistent disk (in GB).
required: false
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
required: false
guest_os_features:
description:
- A list of features to enable on the guest OS. Applicable for bootable images
only. Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which
allows each virtual CPU to have its own queue. For Windows images, you can only
enable VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher.
Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.
- For new Windows images, the server might also populate this field with the value
WINDOWS, to indicate that this is a Windows image.
- This value is purely informational and does not enable or disable any features.
required: false
suboptions:
type:
description:
- The type of supported feature. Currenty only VIRTIO_SCSI_MULTIQUEUE is supported.
For newer Windows images, the server might also populate this property with
the value WINDOWS to indicate that this is a Windows image. This value is
purely informational and does not enable or disable any features.
required: false
choices:
- VIRTIO_SCSI_MULTIQUEUE
image_encryption_key:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
required: false
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
required: false
licenses:
description:
- Any applicable license URI.
required: false
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
raw_disk:
description:
- The parameters of the raw disk image.
required: false
suboptions:
container_type:
description:
- The format used to encode and transmit the block device, which should be
TAR. This is just a container and transmission format and not a runtime
format. Provided by the client when the disk image is created.
required: false
choices:
- TAR
sha1_checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
required: false
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must
provide either this property or the sourceDisk property but not both.
required: false
source_disk:
description:
- Refers to a gcompute_disk object You must provide either this property or the
rawDisk.source property but not both to create an image.
- 'This field represents a link to a Disk resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_disk
task and then set this source_disk field to "{{ name-of-resource }}" Alternatively,
you can set this source_disk to a dictionary with the selfLink key where the
value is the selfLink of your Disk'
required: false
source_disk_encryption_key:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
required: false
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
required: false
source_disk_id:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
required: false
source_type:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
required: false
choices:
- RAW
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: "disk-image"
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: disk
- name: create a image
gcp_compute_image:
name: "test_object"
source_disk: "{{ disk }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
archiveSizeBytes:
description:
- Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
deprecated:
description:
- The deprecation status associated with this image.
returned: success
type: complex
contains:
deleted:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DELETED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
deprecated:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DEPRECATED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
obsolete:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to OBSOLETE. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
replacement:
description:
- The URL of the suggested replacement for a deprecated resource.
- The suggested replacement resource must be the same kind of resource as the
deprecated resource.
returned: success
type: str
state:
description:
- The deprecation state of this resource. This can be DEPRECATED, OBSOLETE,
or DELETED. Operations which create a new resource using a DEPRECATED resource
will return successfully, but with a warning indicating the deprecated resource
and recommending its replacement. Operations which use OBSOLETE or DELETED
resources will be rejected and result in an error.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
diskSizeGb:
description:
- Size of the image when restored onto a persistent disk (in GB).
returned: success
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
returned: success
type: str
guestOsFeatures:
description:
- A list of features to enable on the guest OS. Applicable for bootable images only.
Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows
each virtual CPU to have its own queue. For Windows images, you can only enable
VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux
images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.
- For new Windows images, the server might also populate this field with the value
WINDOWS, to indicate that this is a Windows image.
- This value is purely informational and does not enable or disable any features.
returned: success
type: complex
contains:
type:
description:
- The type of supported feature. Currenty only VIRTIO_SCSI_MULTIQUEUE is supported.
For newer Windows images, the server might also populate this property with
the value WINDOWS to indicate that this is a Windows image. This value is
purely informational and does not enable or disable any features.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
imageEncryptionKey:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
licenses:
description:
- Any applicable license URI.
returned: success
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
rawDisk:
description:
- The parameters of the raw disk image.
returned: success
type: complex
contains:
containerType:
description:
- The format used to encode and transmit the block device, which should be TAR.
This is just a container and transmission format and not a runtime format.
Provided by the client when the disk image is created.
returned: success
type: str
sha1Checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
returned: success
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must provide
either this property or the sourceDisk property but not both.
returned: success
type: str
sourceDisk:
description:
- Refers to a gcompute_disk object You must provide either this property or the
rawDisk.source property but not both to create an image.
returned: success
type: dict
sourceDiskEncryptionKey:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceDiskId:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
returned: success
type: str
sourceType:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
disk_size_gb=dict(type='int'),
family=dict(type='str'),
guest_os_features=dict(type='list', elements='dict', options=dict(
type=dict(type='str', choices=['VIRTIO_SCSI_MULTIQUEUE'])
)),
image_encryption_key=dict(type='dict', options=dict(
raw_key=dict(type='str'),
sha256=dict(type='str')
)),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
raw_disk=dict(type='dict', options=dict(
container_type=dict(type='str', choices=['TAR']),
sha1_checksum=dict(type='str'),
source=dict(type='str')
)),
source_disk=dict(type='dict'),
source_disk_encryption_key=dict(type='dict', options=dict(
raw_key=dict(type='str'),
sha256=dict(type='str')
)),
source_disk_id=dict(type='str'),
source_type=dict(type='str', choices=['RAW'])
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#image'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#image',
u'description': module.params.get('description'),
u'diskSizeGb': module.params.get('disk_size_gb'),
u'family': module.params.get('family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(module.params.get('guest_os_features', []), module).to_request(),
u'imageEncryptionKey': ImageImageencryptionkey(module.params.get('image_encryption_key', {}), module).to_request(),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'rawDisk': ImageRawdisk(module.params.get('raw_disk', {}), module).to_request(),
u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'selfLink'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(module.params.get('source_disk_encryption_key', {}), module).to_request(),
u'sourceDiskId': module.params.get('source_disk_id'),
u'sourceType': module.params.get('source_type')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'archiveSizeBytes': response.get(u'archiveSizeBytes'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'deprecated': ImageDeprecated(response.get(u'deprecated', {}), module).from_response(),
u'description': response.get(u'description'),
u'diskSizeGb': response.get(u'diskSizeGb'),
u'family': response.get(u'family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(response.get(u'guestOsFeatures', []), module).from_response(),
u'id': response.get(u'id'),
u'imageEncryptionKey': ImageImageencryptionkey(response.get(u'imageEncryptionKey', {}), module).from_response(),
u'licenses': response.get(u'licenses'),
u'name': response.get(u'name'),
u'rawDisk': ImageRawdisk(response.get(u'rawDisk', {}), module).from_response(),
u'sourceDisk': response.get(u'sourceDisk'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(response.get(u'sourceDiskEncryptionKey', {}), module).from_response(),
u'sourceDiskId': response.get(u'sourceDiskId'),
u'sourceType': response.get(u'sourceType')
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#image')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class ImageDeprecated(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'deleted': self.request.get('deleted'),
u'deprecated': self.request.get('deprecated'),
u'obsolete': self.request.get('obsolete'),
u'replacement': self.request.get('replacement'),
u'state': self.request.get('state')
})
def from_response(self):
return remove_nones_from_dict({
u'deleted': self.request.get(u'deleted'),
u'deprecated': self.request.get(u'deprecated'),
u'obsolete': self.request.get(u'obsolete'),
u'replacement': self.request.get(u'replacement'),
u'state': self.request.get(u'state')
})
class ImageGuestosfeaturesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({
u'type': item.get('type')
})
def _response_from_item(self, item):
return remove_nones_from_dict({
u'type': item.get(u'type')
})
class ImageImageencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'rawKey': self.request.get('raw_key'),
u'sha256': self.request.get('sha256')
})
def from_response(self):
return remove_nones_from_dict({
u'rawKey': self.request.get(u'rawKey'),
u'sha256': self.request.get(u'sha256')
})
class ImageRawdisk(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'containerType': self.request.get('container_type'),
u'sha1Checksum': self.request.get('sha1_checksum'),
u'source': self.request.get('source')
})
def from_response(self):
return remove_nones_from_dict({
u'containerType': self.request.get(u'containerType'),
u'sha1Checksum': self.request.get(u'sha1Checksum'),
u'source': self.request.get(u'source')
})
class ImageSourcediskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'rawKey': self.request.get('raw_key'),
u'sha256': self.request.get('sha256')
})
def from_response(self):
return remove_nones_from_dict({
u'rawKey': self.request.get(u'rawKey'),
u'sha256': self.request.get(u'sha256')
})
if __name__ == '__main__':
main()
| gpl-3.0 |
google-research/valan | touchdown/sdr/train_eager.py | 1 | 14627 | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SDR model training with TensorFlow eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
import numpy as np
from valan.touchdown.sdr import lingunet
FLAGS = flags.FLAGS
flags.DEFINE_string(
"train_input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"dev_input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"test_input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string("model_dir", "/tmp/tensorflow/generalization/checkpoints/",
"Directory to write TensorBoard summaries")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
flags.DEFINE_integer(
"batch_size", 64, "Batch size for training and evaluation. When using "
"multiple gpus, this is the global batch size for "
"all devices. For example, if the batch size is 32 "
"and there are 4 GPUs, each GPU will get 8 examples on "
"each step.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_boolean("use_tpu", True, "Name of the TPU to use.")
flags.DEFINE_string("tpu", None, "Name of the TPU to use.")
# copybara:strip_begin
flags.DEFINE_integer("vm_config", 2, "Running in 2VM or 1VM mode.")
# copybara:strip_end
NUM_PERSPECTIVE_IMAGES = 8
DOWNSCALED_IMAGE_HEIGHT = 100
DOWNSCALED_IMAGE_WIDTH = 58
DOWNSCALED_PANO_HEIGHT = DOWNSCALED_IMAGE_HEIGHT
DOWNSCALED_PANO_WIDTH = DOWNSCALED_IMAGE_WIDTH * NUM_PERSPECTIVE_IMAGES
NUM_CHANNELS = 128
NUM_EXAMPLES_TRAIN = 17000
NUM_EXAMPLES_EVAL = 3835
NUM_EPOCHS = 15
RESOLUTION_MULTIPLIER = 8
# Increase num_cpu_threads if the perf is input bound.
def get_batched_dataset(pattern,
max_seq_length,
batch_size,
is_training=True,
num_cpu_threads=64):
"""tf.data.Dataset object for MNIST training data."""
input_files = tf.io.gfile.glob(pattern)
logging.info("*** Input Files ***")
for input_file in input_files:
logging.info(" %s", input_file)
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn"t matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=128)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don"t want to encounter
# out-of-range exceptions.
d = d.repeat()
# Create a description of the features.
feature_description = {
"input_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"input_ids_length":
tf.io.FixedLenFeature([], tf.int64),
"pano_features":
tf.io.FixedLenFeature(
[DOWNSCALED_PANO_HEIGHT * DOWNSCALED_PANO_WIDTH * NUM_CHANNELS],
tf.float32),
"target_features":
tf.io.FixedLenFeature(
[DOWNSCALED_PANO_HEIGHT * DOWNSCALED_PANO_WIDTH], tf.float32),
}
def parse_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
features = tf.io.parse_single_example(example_proto, feature_description)
features["pano_features"] = tf.reshape(
features["pano_features"],
[DOWNSCALED_PANO_HEIGHT, DOWNSCALED_PANO_WIDTH, NUM_CHANNELS])
return features
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don"t* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.data.experimental.map_and_batch(
parse_function,
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
# Prefetch 1 batch.
d = d.prefetch(1)
return d
def distance_metric(preds, targets):
"""Calculate distances between model predictions and targets within a batch."""
batch_size = preds.shape[0]
preds = tf.reshape(
preds, [batch_size, DOWNSCALED_PANO_HEIGHT, DOWNSCALED_PANO_WIDTH])
targets = tf.reshape(
targets, [batch_size, DOWNSCALED_PANO_HEIGHT, DOWNSCALED_PANO_WIDTH])
distances = []
for pred, target in zip(preds, targets):
pred_coord = np.unravel_index(np.argmax(pred), pred.shape)
target_coord = np.unravel_index(np.argmax(target), target.shape)
dist = np.sqrt((target_coord[0] - pred_coord[0])**2 +
(target_coord[1] - pred_coord[1])**2)
dist = dist * RESOLUTION_MULTIPLIER
distances.append(dist)
return distances
def accuracy(distances, margin=10):
"""Calculating accuracy at 80 pixel by default."""
num_correct = 0
for distance in distances:
num_correct = num_correct + 1 if distance < margin else num_correct
return num_correct / len(distances)
def get_features(features):
input_ids = features["input_ids"]
input_ids_length = features["input_ids_length"]
pano_features = features["pano_features"]
target_features = features["target_features"]
return input_ids, input_ids_length, pano_features, target_features
def run_eager():
"""Run MNIST training and eval loop in eager mode."""
tf.enable_v2_behavior()
if FLAGS.use_tpu:
job_name = "worker"
primary_cpu_task = "/job:%s" % job_name
logging.info("Use TPU at %s",
FLAGS.tpu if FLAGS.tpu is not None else "local")
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
else:
primary_cpu_task = "/task:0"
strategy = tf.distribute.MirroredStrategy()
logging.info("Distribution strategy: %s", strategy)
model_dir = FLAGS.model_dir
logging.info("Saving checkpoints at %s", model_dir)
steps_per_epoch = int(NUM_EXAMPLES_TRAIN // FLAGS.batch_size)
steps_per_eval = int(NUM_EXAMPLES_EVAL // FLAGS.batch_size)
with tf.device(primary_cpu_task):
def get_dataset_fn(input_file):
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(FLAGS.batch_size)
d = get_batched_dataset(input_file, FLAGS.max_seq_length, batch_size)
return d.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return dataset_fn
train_dataset = strategy.experimental_distribute_datasets_from_function(
get_dataset_fn(FLAGS.train_input_file))
test_dataset = strategy.experimental_distribute_datasets_from_function(
get_dataset_fn(FLAGS.test_input_file))
dev_dataset = strategy.experimental_distribute_datasets_from_function(
get_dataset_fn(FLAGS.dev_input_file))
with strategy.scope():
# Create the model and optimizer
model = lingunet.LingUNet(num_channels=NUM_CHANNELS)
for var in model.trainable_variables:
tf.logging.info(" name = %s, shape = %s", var.name, var.shape)
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
training_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32)
test_loss = tf.keras.metrics.Mean("test_loss", dtype=tf.float32)
logging.info("Finished building Keras LingUNet model")
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info("Loaded checkpoint %s", latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
# Create summary writers
train_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, "summaries/train"))
test_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, "summaries/test"))
dev_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, "summaries/dev"))
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
input_ids, input_ids_length, pano_features, target_features = get_features(
inputs)
with tf.GradientTape() as tape:
predicted_targets = model(pano_features, input_ids, input_ids_length,
True)
loss = tf.keras.losses.kullback_leibler_divergence(
target_features, predicted_targets)
loss = tf.reduce_mean(loss) / strategy.num_replicas_in_sync
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_loss.update_state(loss)
return predicted_targets, target_features
predicted_targets, target_features = strategy.run(
step_fn, args=(next(iterator),))
predicted_targets = tf.concat(
strategy.experimental_local_results(predicted_targets), axis=0)
target_features = tf.concat(
strategy.experimental_local_results(target_features), axis=0)
return predicted_targets, target_features
@tf.function
def test_step(iterator):
"""Evaluation StepFn."""
def step_fn(inputs):
input_ids, input_ids_length, pano_features, target_features = get_features(
inputs)
predicted_targets = model(pano_features, input_ids, input_ids_length,
False)
loss = tf.keras.losses.kullback_leibler_divergence(
target_features, predicted_targets)
loss = tf.reduce_mean(loss) / strategy.num_replicas_in_sync
test_loss.update_state(loss)
return predicted_targets, target_features
predicted_targets, target_features = strategy.run(
step_fn, args=(next(iterator),))
predicted_targets = tf.concat(
strategy.experimental_local_results(predicted_targets), axis=0)
target_features = tf.concat(
strategy.experimental_local_results(target_features), axis=0)
return predicted_targets, target_features
train_iterator = iter(train_dataset)
for epoch in range(initial_epoch, NUM_EPOCHS):
logging.info("Starting to run epoch: %s", epoch)
with train_summary_writer.as_default():
for step in range(steps_per_epoch):
if step % 5 == 0:
logging.info("Running step %s in epoch %s", step, epoch)
predicted_targets, target_features = train_step(train_iterator)
distances = distance_metric(predicted_targets, target_features)
mean_dist = np.mean(distances)
tf.summary.scalar(
"mean_dist", mean_dist, step=optimizer.iterations)
tf.summary.scalar(
"loss", training_loss.result(), step=optimizer.iterations)
tf.summary.scalar(
"accuracy@40",
accuracy(distances, margin=40),
step=optimizer.iterations)
tf.summary.scalar(
"accuracy@80",
accuracy(distances, margin=80),
step=optimizer.iterations)
tf.summary.scalar(
"accuracy@120",
accuracy(distances, margin=120),
step=optimizer.iterations)
training_loss.reset_states()
test_iterator = iter(test_dataset)
dev_iterator = iter(dev_dataset)
for iterator, summary_writer in zip(
[test_iterator, dev_iterator],
[test_summary_writer, dev_summary_writer]):
with summary_writer.as_default():
eval_distances = []
for step in range(steps_per_eval):
if step % 5 == 0:
logging.info("Starting to run eval step %s of epoch: %s", step,
epoch)
predicted_targets, target_features = test_step(iterator)
distances = distance_metric(predicted_targets, target_features)
eval_distances.extend(distances)
tf.summary.scalar(
"mean_dist", np.mean(eval_distances), step=optimizer.iterations)
tf.summary.scalar(
"loss", test_loss.result(), step=optimizer.iterations)
tf.summary.scalar(
"accuracy@40",
accuracy(eval_distances, margin=40),
step=optimizer.iterations)
tf.summary.scalar(
"accuracy@80",
accuracy(eval_distances, margin=80),
step=optimizer.iterations)
tf.summary.scalar(
"accuracy@120",
accuracy(eval_distances, margin=120),
step=optimizer.iterations)
test_loss.reset_states()
checkpoint_name = checkpoint.save(os.path.join(model_dir, "checkpoint"))
logging.info("Saved checkpoint to %s", checkpoint_name)
def main(_):
run_eager()
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
Omegaphora/external_chromium_org_third_party_skia | gm/rebaseline_server/compare_rendered_pictures.py | 20 | 21541 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Compare results of two render_pictures runs.
TODO(epoger): Start using this module to compare ALL images (whether they
were generated from GMs or SKPs), and rename it accordingly.
"""
# System-level imports
import logging
import os
import shutil
import subprocess
import tempfile
import time
# Must fix up PYTHONPATH before importing from within Skia
import rs_fixpypath # pylint: disable=W0611
# Imports from within Skia
from py.utils import git_utils
from py.utils import gs_utils
from py.utils import url_utils
import buildbot_globals
import column
import gm_json
import imagediffdb
import imagepair
import imagepairset
import results
# URL under which all render_pictures images can be found in Google Storage.
#
# TODO(epoger): In order to allow live-view of GMs and other images, read this
# from the input summary files, or allow the caller to set it within the
# GET_live_results call.
DEFAULT_IMAGE_BASE_GS_URL = 'gs://' + buildbot_globals.Get('skp_images_bucket')
# Column descriptors, and display preferences for them.
COLUMN__RESULT_TYPE = results.KEY__EXTRACOLUMNS__RESULT_TYPE
COLUMN__SOURCE_SKP = 'sourceSkpFile'
COLUMN__TILED_OR_WHOLE = 'tiledOrWhole'
COLUMN__TILENUM = 'tilenum'
COLUMN__BUILDER_A = 'builderA'
COLUMN__RENDER_MODE_A = 'renderModeA'
COLUMN__BUILDER_B = 'builderB'
COLUMN__RENDER_MODE_B = 'renderModeB'
# Known values for some of those columns.
COLUMN__TILED_OR_WHOLE__TILED = 'tiled'
COLUMN__TILED_OR_WHOLE__WHOLE = 'whole'
FREEFORM_COLUMN_IDS = [
COLUMN__SOURCE_SKP,
COLUMN__TILENUM,
]
ORDERED_COLUMN_IDS = [
COLUMN__RESULT_TYPE,
COLUMN__SOURCE_SKP,
COLUMN__TILED_OR_WHOLE,
COLUMN__TILENUM,
COLUMN__BUILDER_A,
COLUMN__RENDER_MODE_A,
COLUMN__BUILDER_B,
COLUMN__RENDER_MODE_B,
]
# A special "repo:" URL type that we use to refer to Skia repo contents.
# (Useful for comparing against expectations files we store in our repo.)
REPO_URL_PREFIX = 'repo:'
REPO_BASEPATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
# Which sections within a JSON summary file can contain results.
ALLOWED_SECTION_NAMES = [
gm_json.JSONKEY_ACTUALRESULTS,
gm_json.JSONKEY_EXPECTEDRESULTS,
]
class RenderedPicturesComparisons(results.BaseComparisons):
"""Loads results from multiple render_pictures runs into an ImagePairSet.
"""
def __init__(self,
setA_dir, setB_dir,
setA_section, setB_section,
image_diff_db,
image_base_gs_url=DEFAULT_IMAGE_BASE_GS_URL, diff_base_url=None,
setA_label=None, setB_label=None,
gs=None, truncate_results=False, prefetch_only=False,
download_all_images=False):
"""Constructor: downloads images and generates diffs.
Once the object has been created (which may take a while), you can call its
get_packaged_results_of_type() method to quickly retrieve the results...
unless you have set prefetch_only to True, in which case we will
asynchronously warm up the ImageDiffDB cache but not fill in self._results.
Args:
setA_dir: root directory to copy all JSON summaries from, and to use as
setA within the comparisons. This directory may be specified as a
gs:// URL, special "repo:" URL, or local filepath.
setB_dir: root directory to copy all JSON summaries from, and to use as
setB within the comparisons. This directory may be specified as a
gs:// URL, special "repo:" URL, or local filepath.
setA_section: which section within setA to examine; must be one of
ALLOWED_SECTION_NAMES
setB_section: which section within setB to examine; must be one of
ALLOWED_SECTION_NAMES
image_diff_db: ImageDiffDB instance
image_base_gs_url: "gs://" URL pointing at the Google Storage bucket/dir
under which all render_pictures result images can
be found; this will be used to read images for comparison within
this code, and included in the ImagePairSet (as an HTTP URL) so its
consumers know where to download the images from
diff_base_url: base URL within which the client should look for diff
images; if not specified, defaults to a "file:///" URL representation
of image_diff_db's storage_root
setA_label: description to use for results in setA; if None, will be
set to a reasonable default
setB_label: description to use for results in setB; if None, will be
set to a reasonable default
gs: instance of GSUtils object we can use to download summary files
truncate_results: FOR MANUAL TESTING: if True, truncate the set of images
we process, to speed up testing.
prefetch_only: if True, return the new object as quickly as possible
with empty self._results (just queue up all the files to process,
don't wait around for them to be processed and recorded); otherwise,
block until the results have been assembled and recorded in
self._results.
download_all_images: if True, download all images, even if we don't
need them to generate diffs. This will take much longer to complete,
but is useful for warming up the bitmap cache on local disk.
"""
super(RenderedPicturesComparisons, self).__init__()
self._image_diff_db = image_diff_db
self._image_base_gs_url = image_base_gs_url
self._diff_base_url = (
diff_base_url or
url_utils.create_filepath_url(image_diff_db.storage_root))
self._gs = gs
self.truncate_results = truncate_results
self._prefetch_only = prefetch_only
self._download_all_images = download_all_images
# If we are comparing two different section types, we can use those
# as the default labels for setA and setB.
if setA_section != setB_section:
self._setA_label = setA_label or setA_section
self._setB_label = setB_label or setB_section
else:
self._setA_label = setA_label or 'setA'
self._setB_label = setB_label or 'setB'
tempdir = tempfile.mkdtemp()
try:
setA_root = os.path.join(tempdir, 'setA')
setB_root = os.path.join(tempdir, 'setB')
# TODO(stephana): There is a potential race condition here... we copy
# the contents out of the source_dir, and THEN we get the commithash
# of source_dir. If source_dir points at a git checkout, and that
# checkout is updated (by a different thread/process) during this
# operation, then the contents and commithash will be out of sync.
self._copy_dir_contents(source_dir=setA_dir, dest_dir=setA_root)
setA_repo_revision = self._get_repo_revision(source_dir=setA_dir)
self._copy_dir_contents(source_dir=setB_dir, dest_dir=setB_root)
setB_repo_revision = self._get_repo_revision(source_dir=setB_dir)
self._setA_descriptions = {
results.KEY__SET_DESCRIPTIONS__DIR: setA_dir,
results.KEY__SET_DESCRIPTIONS__REPO_REVISION: setA_repo_revision,
results.KEY__SET_DESCRIPTIONS__SECTION: setA_section,
}
self._setB_descriptions = {
results.KEY__SET_DESCRIPTIONS__DIR: setB_dir,
results.KEY__SET_DESCRIPTIONS__REPO_REVISION: setB_repo_revision,
results.KEY__SET_DESCRIPTIONS__SECTION: setB_section,
}
time_start = int(time.time())
self._results = self._load_result_pairs(
setA_root=setA_root, setB_root=setB_root,
setA_section=setA_section, setB_section=setB_section)
if self._results:
self._timestamp = int(time.time())
logging.info('Number of download file collisions: %s' %
imagediffdb.global_file_collisions)
logging.info('Results complete; took %d seconds.' %
(self._timestamp - time_start))
finally:
shutil.rmtree(tempdir)
def _load_result_pairs(self, setA_root, setB_root,
setA_section, setB_section):
"""Loads all JSON image summaries from 2 directory trees and compares them.
TODO(stephana): This method is only called from within __init__(); it might
make more sense to just roll the content of this method into __init__().
Args:
setA_root: root directory containing JSON summaries of rendering results
setB_root: root directory containing JSON summaries of rendering results
setA_section: which section (gm_json.JSONKEY_ACTUALRESULTS or
gm_json.JSONKEY_EXPECTEDRESULTS) to load from the summaries in setA
setB_section: which section (gm_json.JSONKEY_ACTUALRESULTS or
gm_json.JSONKEY_EXPECTEDRESULTS) to load from the summaries in setB
Returns the summary of all image diff results (or None, depending on
self._prefetch_only).
"""
logging.info('Reading JSON image summaries from dirs %s and %s...' % (
setA_root, setB_root))
setA_dicts = self.read_dicts_from_root(setA_root)
setB_dicts = self.read_dicts_from_root(setB_root)
logging.info('Comparing summary dicts...')
all_image_pairs = imagepairset.ImagePairSet(
descriptions=(self._setA_label, self._setB_label),
diff_base_url=self._diff_base_url)
failing_image_pairs = imagepairset.ImagePairSet(
descriptions=(self._setA_label, self._setB_label),
diff_base_url=self._diff_base_url)
# Override settings for columns that should be filtered using freeform text.
for column_id in FREEFORM_COLUMN_IDS:
factory = column.ColumnHeaderFactory(
header_text=column_id, use_freeform_filter=True)
all_image_pairs.set_column_header_factory(
column_id=column_id, column_header_factory=factory)
failing_image_pairs.set_column_header_factory(
column_id=column_id, column_header_factory=factory)
all_image_pairs.ensure_extra_column_values_in_summary(
column_id=COLUMN__RESULT_TYPE, values=[
results.KEY__RESULT_TYPE__FAILED,
results.KEY__RESULT_TYPE__NOCOMPARISON,
results.KEY__RESULT_TYPE__SUCCEEDED,
])
failing_image_pairs.ensure_extra_column_values_in_summary(
column_id=COLUMN__RESULT_TYPE, values=[
results.KEY__RESULT_TYPE__FAILED,
results.KEY__RESULT_TYPE__NOCOMPARISON,
])
logging.info('Starting to add imagepairs to queue.')
self._image_diff_db.log_queue_size_if_changed(limit_verbosity=False)
union_dict_paths = sorted(set(setA_dicts.keys() + setB_dicts.keys()))
num_union_dict_paths = len(union_dict_paths)
dict_num = 0
for dict_path in union_dict_paths:
dict_num += 1
logging.info(
'Asynchronously requesting pixel diffs for dict #%d of %d, "%s"...' %
(dict_num, num_union_dict_paths, dict_path))
dictA = self.get_default(setA_dicts, None, dict_path)
self._validate_dict_version(dictA)
dictA_results = self.get_default(dictA, {}, setA_section)
dictB = self.get_default(setB_dicts, None, dict_path)
self._validate_dict_version(dictB)
dictB_results = self.get_default(dictB, {}, setB_section)
image_A_base_url = self.get_default(
setA_dicts, self._image_base_gs_url, dict_path,
gm_json.JSONKEY_IMAGE_BASE_GS_URL)
image_B_base_url = self.get_default(
setB_dicts, self._image_base_gs_url, dict_path,
gm_json.JSONKEY_IMAGE_BASE_GS_URL)
# get the builders and render modes for each set
builder_A = self.get_default(dictA, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_BUILDER)
render_mode_A = self.get_default(dictA, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_RENDER_MODE)
builder_B = self.get_default(dictB, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_BUILDER)
render_mode_B = self.get_default(dictB, None,
gm_json.JSONKEY_DESCRIPTIONS,
gm_json.JSONKEY_DESCRIPTIONS_RENDER_MODE)
skp_names = sorted(set(dictA_results.keys() + dictB_results.keys()))
# Just for manual testing... truncate to an arbitrary subset.
if self.truncate_results:
skp_names = skp_names[1:3]
for skp_name in skp_names:
imagepairs_for_this_skp = []
whole_image_A = self.get_default(
dictA_results, None,
skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE)
whole_image_B = self.get_default(
dictB_results, None,
skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE)
imagepairs_for_this_skp.append(self._create_image_pair(
image_dict_A=whole_image_A, image_dict_B=whole_image_B,
image_A_base_url=image_A_base_url,
image_B_base_url=image_B_base_url,
builder_A=builder_A, render_mode_A=render_mode_A,
builder_B=builder_B, render_mode_B=render_mode_B,
source_json_file=dict_path,
source_skp_name=skp_name, tilenum=None))
tiled_images_A = self.get_default(
dictA_results, [],
skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES)
tiled_images_B = self.get_default(
dictB_results, [],
skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES)
if tiled_images_A or tiled_images_B:
num_tiles_A = len(tiled_images_A)
num_tiles_B = len(tiled_images_B)
num_tiles = max(num_tiles_A, num_tiles_B)
for tile_num in range(num_tiles):
imagepairs_for_this_skp.append(self._create_image_pair(
image_dict_A=(tiled_images_A[tile_num]
if tile_num < num_tiles_A else None),
image_dict_B=(tiled_images_B[tile_num]
if tile_num < num_tiles_B else None),
image_A_base_url=image_A_base_url,
image_B_base_url=image_B_base_url,
builder_A=builder_A, render_mode_A=render_mode_A,
builder_B=builder_B, render_mode_B=render_mode_B,
source_json_file=dict_path,
source_skp_name=skp_name, tilenum=tile_num))
for one_imagepair in imagepairs_for_this_skp:
if one_imagepair:
all_image_pairs.add_image_pair(one_imagepair)
result_type = one_imagepair.extra_columns_dict\
[COLUMN__RESULT_TYPE]
if result_type != results.KEY__RESULT_TYPE__SUCCEEDED:
failing_image_pairs.add_image_pair(one_imagepair)
logging.info('Finished adding imagepairs to queue.')
self._image_diff_db.log_queue_size_if_changed(limit_verbosity=False)
if self._prefetch_only:
return None
else:
return {
results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict(
column_ids_in_order=ORDERED_COLUMN_IDS),
results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict(
column_ids_in_order=ORDERED_COLUMN_IDS),
}
def _validate_dict_version(self, result_dict):
"""Raises Exception if the dict is not the type/version we know how to read.
Args:
result_dict: dictionary holding output of render_pictures; if None,
this method will return without raising an Exception
"""
# TODO(stephana): These values should be defined as constants somewhere,
# to be kept in sync between this file and writable_expectations.py
expected_header_type = 'ChecksummedImages'
expected_header_revision = 1
if result_dict == None:
return
header = result_dict[gm_json.JSONKEY_HEADER]
header_type = header[gm_json.JSONKEY_HEADER_TYPE]
if header_type != expected_header_type:
raise Exception('expected header_type "%s", but got "%s"' % (
expected_header_type, header_type))
header_revision = header[gm_json.JSONKEY_HEADER_REVISION]
if header_revision != expected_header_revision:
raise Exception('expected header_revision %d, but got %d' % (
expected_header_revision, header_revision))
def _create_image_pair(self, image_dict_A, image_dict_B,
image_A_base_url, image_B_base_url,
builder_A, render_mode_A,
builder_B, render_mode_B,
source_json_file,
source_skp_name, tilenum):
"""Creates an ImagePair object for this pair of images.
Args:
image_dict_A: dict with JSONKEY_IMAGE_* keys, or None if no image
image_dict_B: dict with JSONKEY_IMAGE_* keys, or None if no image
image_A_base_url: base URL for image A
image_B_base_url: base URL for image B
builder_A: builder that created image set A or None if unknow
render_mode_A: render mode used to generate image set A or None if
unknown.
builder_B: builder that created image set A or None if unknow
render_mode_B: render mode used to generate image set A or None if
unknown.
source_json_file: string; relative path of the JSON file where this
result came from, within setA and setB.
source_skp_name: string; name of the source SKP file
tilenum: which tile, or None if a wholeimage
Returns:
An ImagePair object, or None if both image_dict_A and image_dict_B are
None.
"""
if (not image_dict_A) and (not image_dict_B):
return None
def _checksum_and_relative_url(dic):
if dic:
return ((dic[gm_json.JSONKEY_IMAGE_CHECKSUMALGORITHM],
int(dic[gm_json.JSONKEY_IMAGE_CHECKSUMVALUE])),
dic[gm_json.JSONKEY_IMAGE_FILEPATH])
else:
return None, None
imageA_checksum, imageA_relative_url = _checksum_and_relative_url(
image_dict_A)
imageB_checksum, imageB_relative_url = _checksum_and_relative_url(
image_dict_B)
if not imageA_checksum:
result_type = results.KEY__RESULT_TYPE__NOCOMPARISON
elif not imageB_checksum:
result_type = results.KEY__RESULT_TYPE__NOCOMPARISON
elif imageA_checksum == imageB_checksum:
result_type = results.KEY__RESULT_TYPE__SUCCEEDED
else:
result_type = results.KEY__RESULT_TYPE__FAILED
extra_columns_dict = {
COLUMN__RESULT_TYPE: result_type,
COLUMN__SOURCE_SKP: source_skp_name,
COLUMN__BUILDER_A: builder_A,
COLUMN__RENDER_MODE_A: render_mode_A,
COLUMN__BUILDER_B: builder_B,
COLUMN__RENDER_MODE_B: render_mode_B,
}
if tilenum == None:
extra_columns_dict[COLUMN__TILED_OR_WHOLE] = COLUMN__TILED_OR_WHOLE__WHOLE
extra_columns_dict[COLUMN__TILENUM] = 'N/A'
else:
extra_columns_dict[COLUMN__TILED_OR_WHOLE] = COLUMN__TILED_OR_WHOLE__TILED
extra_columns_dict[COLUMN__TILENUM] = str(tilenum)
try:
return imagepair.ImagePair(
image_diff_db=self._image_diff_db,
imageA_base_url=image_A_base_url,
imageB_base_url=image_B_base_url,
imageA_relative_url=imageA_relative_url,
imageB_relative_url=imageB_relative_url,
extra_columns=extra_columns_dict,
source_json_file=source_json_file,
download_all_images=self._download_all_images)
except (KeyError, TypeError):
logging.exception(
'got exception while creating ImagePair for'
' urlPair=("%s","%s"), source_skp_name="%s", tilenum="%s"' % (
imageA_relative_url, imageB_relative_url, source_skp_name,
tilenum))
return None
def _copy_dir_contents(self, source_dir, dest_dir):
"""Copy all contents of source_dir into dest_dir, recursing into subdirs.
Args:
source_dir: path to source dir (GS URL, local filepath, or a special
"repo:" URL type that points at a file within our Skia checkout)
dest_dir: path to destination dir (local filepath)
The copy operates as a "merge with overwrite": any files in source_dir will
be "overlaid" on top of the existing content in dest_dir. Existing files
with the same names will be overwritten.
"""
if gs_utils.GSUtils.is_gs_url(source_dir):
(bucket, path) = gs_utils.GSUtils.split_gs_url(source_dir)
self._gs.download_dir_contents(source_bucket=bucket, source_dir=path,
dest_dir=dest_dir)
elif source_dir.lower().startswith(REPO_URL_PREFIX):
repo_dir = os.path.join(REPO_BASEPATH, source_dir[len(REPO_URL_PREFIX):])
shutil.copytree(repo_dir, dest_dir)
else:
shutil.copytree(source_dir, dest_dir)
def _get_repo_revision(self, source_dir):
"""Get the commit hash of source_dir, IF it refers to a git checkout.
Args:
source_dir: path to source dir (GS URL, local filepath, or a special
"repo:" URL type that points at a file within our Skia checkout;
only the "repo:" URL type will have a commit hash.
"""
if source_dir.lower().startswith(REPO_URL_PREFIX):
repo_dir = os.path.join(REPO_BASEPATH, source_dir[len(REPO_URL_PREFIX):])
return subprocess.check_output(
args=[git_utils.GIT, 'rev-parse', 'HEAD'], cwd=repo_dir).strip()
else:
return None
| bsd-3-clause |
mo-g/iris | lib/iris/tests/unit/fileformats/grib/load_convert/test_grid_definition_template_0_and_1.py | 13 | 2355 | # (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test function
:func:`iris.fileformats.grib._load_convert.grid_definition_template_0_and_1`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
from iris.exceptions import TranslationError
from iris.fileformats.grib._load_convert import \
grid_definition_template_0_and_1
class Test(tests.IrisTest):
def test_unsupported_quasi_regular__number_of_octets(self):
section = {'numberOfOctectsForNumberOfPoints': 1}
cs = None
metadata = None
with self.assertRaisesRegexp(TranslationError, 'quasi-regular'):
grid_definition_template_0_and_1(section,
metadata,
'latitude',
'longitude',
cs)
def test_unsupported_quasi_regular__interpretation(self):
section = {'numberOfOctectsForNumberOfPoints': 1,
'interpretationOfNumberOfPoints': 1}
cs = None
metadata = None
with self.assertRaisesRegexp(TranslationError, 'quasi-regular'):
grid_definition_template_0_and_1(section,
metadata,
'latitude',
'longitude',
cs)
if __name__ == '__main__':
tests.main()
| gpl-3.0 |
Tschis/NabBot | utils/context.py | 1 | 10749 | import asyncio
import functools
import re
from typing import Union, Optional, Callable, TypeVar, List, Any, Sequence
import discord
from discord.ext import commands
from utils.config import config
from utils.database import get_server_property
_mention = re.compile(r'<@!?([0-9]{1,19})>')
T = TypeVar('T')
class NabCtx(commands.Context):
"""An override of :class:`commands.Context` that provides properties and methods for NabBot."""
guild: discord.Guild
message: discord.Message
channel: discord.TextChannel
author: Union[discord.User, discord.Member]
me: Union[discord.Member, discord.ClientUser]
command: commands.Command
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.yes_no_reactions = ("🇾", "🇳")
self.check_reactions = (config.true_emoji, config.false_emoji)
# Properties
@property
def author_permissions(self) -> discord.Permissions:
"""Shortcut to check the command author's permission to the current channel.
:return: The permissions for the author in the current channel.
"""
return self.channel.permissions_for(self.author)
@property
def ask_channel_name(self) -> Optional[str]:
"""Gets the name of the ask channel for the current server.
:return: The name of the ask channel if applicable
:rtype: str or None"""
if self.guild is None:
return None
ask_channel_id = get_server_property(self.guild.id, "ask_channel", is_int=True)
ask_channel = self.guild.get_channel(ask_channel_id)
if ask_channel is None:
return config.ask_channel_name
return ask_channel.name
@property
def bot_permissions(self) -> discord.Permissions:
"""Shortcut to check the bot's permission to the current channel.
:return: The permissions for the author in the current channel."""
return self.channel.permissions_for(self.me)
@property
def clean_prefix(self) -> str:
"""Gets the clean prefix used in the command invocation.
This is used to clean mentions into plain text."""
m = _mention.match(self.prefix)
if m:
user = self.bot.get_user(int(m.group(1)))
if user:
return f'@{user.name} '
return self.prefix
@property
def is_askchannel(self):
"""Checks if the current channel is the command channel"""
ask_channel_id = get_server_property(self.guild.id, "ask_channel", is_int=True)
ask_channel = self.guild.get_channel(ask_channel_id)
if ask_channel is None:
return self.channel.name == config.ask_channel_name
return ask_channel == self.channel
@property
def is_lite(self) -> bool:
"""Checks if the current context is limited to lite mode.
If the guild is in the lite_guilds list, the context is in lite mode.
If the guild is in private message, and the message author is in at least ONE guild that is not in lite_guilds,
then context is not lite"""
if self.guild is not None:
return self.guild.id in config.lite_servers
if self.is_private:
for g in self.bot.get_user_guilds(self.author.id):
if g.id not in config.lite_servers:
return False
return False
@property
def is_private(self) -> bool:
"""Whether the current context is a private channel or not."""
return self.guild is None
@property
def long(self) -> bool:
"""Whether the current context allows long replies or not
Private messages and command channels allow long replies.
"""
if self.guild is None:
return True
return self.is_askchannel
@property
def usage(self) -> str:
"""Shows the parameters signature of the invoked command"""
if self.command.usage:
return self.command.usage
else:
params = self.command.clean_params
if not params:
return ''
result = []
for name, param in params.items():
if param.default is not param.empty:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
result.append(f'[{name}={param.default!r}]')
else:
result.append(f'[{name}]')
elif param.kind == param.VAR_POSITIONAL:
result.append(f'[{name}...]')
else:
result.append(f'<{name}>')
return ' '.join(result)
@property
def world(self) -> Optional[str]:
"""Check the world that is currently being tracked by the guild
:return: The world that the server is tracking.
:rtype: str | None
"""
if self.guild is None:
return None
else:
return self.bot.tracked_worlds.get(self.guild.id, None)
async def choose(self, matches: Sequence[Any], title="Suggestions"):
if len(matches) == 0:
raise ValueError('No results found.')
if len(matches) == 1:
return matches[0]
embed = discord.Embed(colour=discord.Colour.blurple(), title=title,
description='\n'.join(f'{index}: {item}' for index, item in enumerate(matches, 1)))
msg = await self.send("I couldn't find what you were looking for, maybe you mean one of these?\n"
"**Only say the number** (*0 to cancel*)", embed=embed)
def check(m: discord.Message):
return m.content.isdigit() and m.author.id == self.author.id and m.channel.id == self.channel.id
message = None
try:
message = await self.bot.wait_for('message', check=check, timeout=30.0)
index = int(message.content)
if index == 0:
await self.send("Alright, choosing cancelled.", delete_after=10)
return None
try:
await msg.delete()
return matches[index - 1]
except IndexError:
await self.send(f"{self.tick(False)} That wasn't in the choices.", delete_after=10)
except asyncio.TimeoutError:
return None
finally:
try:
if message:
await message.delete()
except (discord.Forbidden, discord.NotFound):
pass
async def execute_async(self, func: Callable[..., T], *args, **kwargs) -> T:
"""Executes a synchronous function inside an executor.
:param func: The function to call inside the executor.
:param args: The function's arguments
:param kwargs: The function's keyword arguments.
:return: The value returned by the function, if any.
"""
ret = await self.bot.loop.run_in_executor(None, functools.partial(func, *args, **kwargs))
return ret
async def input(self, *, timeout=60.0, clean=False, delete_response=False) \
-> Optional[str]:
"""Waits for text input from the author.
:param timeout: Maximum time to wait for a message.
:param clean: Whether the content should be cleaned or not.
:param delete_response: Whether to delete the author's message after.
:return: The content of the message replied by the author
"""
def check(_message):
return _message.channel == self.channel and _message.author == self.author
try:
value = await self.bot.wait_for("message", timeout=timeout, check=check)
if clean:
ret = value.clean_content
else:
ret = value.content
if delete_response:
try:
await value.delete()
except discord.HTTPException:
pass
return ret
except asyncio.TimeoutError:
return None
async def react_confirm(self, message: discord.Message, *, timeout=60.0, delete_after=False,
use_checkmark=False) -> Optional[bool]:
"""Waits for the command author to reply with a Y or N reaction.
Returns True if the user reacted with Y
Returns False if the user reacted with N
Returns None if the user didn't react at all
:param message: The message that will contain the reactions.
:param timeout: The maximum time to wait for reactions
:param delete_after: Whether to delete or not the message after finishing.
:param use_checkmark: Whether to use or not checkmarks instead of Y/N
:return: True if reacted with Y, False if reacted with N, None if timeout.
"""
if not self.channel.permissions_for(self.me).add_reactions:
raise RuntimeError('Bot does not have Add Reactions permission.')
reactions = self.check_reactions if use_checkmark else self.yes_no_reactions
for emoji in reactions:
emoji = emoji.replace("<", "").replace(">", "")
await message.add_reaction(emoji)
def check_react(reaction: discord.Reaction, user: discord.User):
if reaction.message.id != message.id:
return False
if user.id != self.author.id:
return False
if reaction.emoji not in reactions:
return False
return True
try:
react = await self.bot.wait_for("reaction_add", timeout=timeout, check=check_react)
if react[0].emoji == reactions[1]:
return False
except asyncio.TimeoutError:
return None
finally:
if delete_after:
await message.delete()
elif self.guild is not None:
try:
await message.clear_reactions()
except discord.Forbidden:
pass
return True
def tick(self, value: bool = True, label: str = None) -> str:
"""Displays a checkmark or a cross depending on the value.
:param value: The value to evaluate
:param label: An optional label to display
:return: A checkmark or cross
"""
emoji = self.check_reactions[int(not value)]
if label:
return emoji + label
return emoji
| apache-2.0 |
netscaler/neutron | neutron/plugins/openvswitch/ovs_models_v2.py | 7 | 3764 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Aaron Rosen, Nicira Networks, Inc.
# @author: Bob Kukura, Red Hat, Inc.
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.schema import UniqueConstraint
from neutron.db.models_v2 import model_base
class VlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'ovs_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
def __repr__(self):
return "<VlanAllocation(%s,%d,%s)>" % (self.physical_network,
self.vlan_id, self.allocated)
class TunnelAllocation(model_base.BASEV2):
"""Represents allocation state of tunnel_id."""
__tablename__ = 'ovs_tunnel_allocations'
tunnel_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, tunnel_id):
self.tunnel_id = tunnel_id
self.allocated = False
def __repr__(self):
return "<TunnelAllocation(%d,%s)>" % (self.tunnel_id, self.allocated)
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'ovs_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'gre', 'vlan', 'flat', 'local'
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer) # tunnel_id or vlan_id
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%d)>" % (self.network_id,
self.network_type,
self.physical_network,
self.segmentation_id)
class TunnelEndpoint(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ovs_tunnel_endpoints'
__table_args__ = (
UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'),
)
ip_address = Column(String(64), primary_key=True)
id = Column(Integer, nullable=False)
def __init__(self, ip_address, id):
self.ip_address = ip_address
self.id = id
def __repr__(self):
return "<TunnelEndpoint(%s,%s)>" % (self.ip_address, self.id)
| apache-2.0 |
icereval/osf.io | osf/migrations/0032_unquote_gd_nodesettings_folder_path.py | 14 | 1331 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-30 19:21
from __future__ import unicode_literals
from urllib2 import quote, unquote
from django_bulk_update.helper import bulk_update
from django.db import migrations
def unquote_folder_paths(state, schema):
try:
NodeSettings = state.get_model('addons_googledrive', 'nodesettings')
targets = NodeSettings.objects.filter(folder_path__isnull=False)
except LookupError:
return
for obj in targets:
try:
obj.folder_path = unquote(obj.folder_path).decode('utf-8')
except UnicodeEncodeError:
obj.folder_path = unquote(obj.folder_path)
bulk_update(targets, update_fields=['folder_path'])
def quote_folder_paths(state, schema):
try:
NodeSettings = state.get_model('addons_googledrive', 'nodesettings')
targets = NodeSettings.objects.filter(folder_path__isnull=False)
except LookupError:
return
for obj in targets:
obj.folder_path = quote(obj.folder_path.encode('utf-8'))
bulk_update(targets, update_fields=['folder_path'])
class Migration(migrations.Migration):
dependencies = [
('osf', '0031_preprintprovider_share_source'),
]
operations = [
migrations.RunPython(unquote_folder_paths, quote_folder_paths),
]
| apache-2.0 |
dubourg/openturns | python/test/t_features.py | 1 | 2344 | #! /usr/bin/env python
from __future__ import print_function
import os
width = 40
# check that python can load OpenTURNS module
print('1: Python module load'.ljust(width), end=' ')
try:
import openturns as ot
print('OK')
except:
print('no')
# check that python can find the Viewer module
# If it fails, check that matplotlib package is installed
print('2: Viewer (matplotlib)'.ljust(width), end=' ')
try:
import openturns.viewer
print('OK')
except:
print('no')
# check that OpenTURNS can run R
# It should produce a file named testDraw.png
print('3: drawing (R)'.ljust(width), end=' ')
try:
graph = ot.Normal().drawPDF()
fname = 'testDraw.png'
try:
graph.draw(fname)
os.remove(fname)
except:
raise
print('OK')
except:
print('no')
# check that rot package is installed
print('4: linear model (R.rot)'.ljust(width), end=' ')
try:
lm = ot.LinearModelFactory().build(
ot.Normal(2).getSample(10), ot.Normal().getSample(10))
print('OK')
except:
print('no')
# check XML support
print('5: serialization (LibXML2)'.ljust(width), end=' ')
try:
storageManager = ot.XMLStorageManager('myFile.xml')
print('OK')
except:
print('no')
# check that analytical function are available
print('6: analytical function (muParser)'.ljust(width), end=' ')
try:
f = ot.NumericalMathFunction(['x1', 'x2'], ['y'], ['x1+x2'])
print('OK')
except:
print('no')
# check that hmat library was found
print('7: HMatrix (hmat-oss)'.ljust(width), end=' ')
try:
# This is a little bit tricky because HMat 1.0 fails with 1x1 matrices
ot.ResourceMap.SetAsUnsignedInteger(
'TemporalNormalProcess-SamplingMethod', 1)
vertices = [[0.0, 0.0, 0.0]]
vertices.append([1.0, 0.0, 0.0])
vertices.append([0.0, 1.0, 0.0])
vertices.append([0.0, 0.0, 1.0])
simplices = [[0, 1, 2, 3]]
# Discard messages from HMat
ot.Log.Show(0)
process = ot.TemporalNormalProcess(
ot.ExponentialModel(3), ot.Mesh(vertices, simplices))
f = process.getRealization()
print('OK')
except:
print('no')
# check that nlopt library was found
print('8: optimization (NLopt)'.ljust(width), end=' ')
try:
problem = ot.OptimizationProblem()
algo = ot.SLSQP()
algo.setProblem(problem)
print('OK')
except:
print('no')
| gpl-3.0 |
lnielsen/invenio-records-rest | invenio_records_rest/serializers.py | 1 | 5710 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""REST API resources."""
from __future__ import absolute_import, print_function
import pytz
from flask import jsonify, url_for
def record_self_link(pid_value, pid_type, record, **kwargs):
"""Create self link to a given record.
:param pid_value: pid value.
:type pid_value: str
:param pid_type: type of the pid.
:type pid_type: str
:param record: record to which the generated link will point.
:type record: Record
:param **kwargs: additional parameters given to flask.url_for.
:Returns: link pointing to the given record.
:Returns Type: str
"""
return url_for(
'invenio_records_rest.{0}_item'.format(pid_type),
pid_value=pid_value, **kwargs)
def record_to_json_serializer(pid, record, code=200,
headers=None):
"""Build a json flask response using the given record.
:param pid: record's pid.
:param record: record to which the generated link will point.
:type record: Record
:param code: http code of the response.
:type code: int
:param header: header to extend for the response.
:Returns: A flask response with json data.
:Returns Type: :py:class:`flask.Response`
"""
# FIXME: use a formatter instead once it is implemented
self_link = record_self_link(pid.pid_value, pid.pid_type, record,
_external=True)
formatted_record = {
'id': pid.pid_value,
'metadata': record,
'links': {
'self': self_link
},
# FIXME: ISO8601 encoded timestamps in UTC
'created': pytz.utc.localize(record.created).isoformat(),
'updated': pytz.utc.localize(record.updated).isoformat(),
'revision': record.revision_id,
}
response = jsonify(formatted_record)
response.status_code = code
if headers is not None:
response.headers.extend(headers)
response.headers['location'] = self_link
response.set_etag(str(record.model.version_id))
return response
def search_to_json_serializer_factory(hit_formatter,
aggregations_formatter=None):
"""Create a search result to flask response serializers.
:param hit_formatter: function formatting a single hit. It should return
a dict.
:param aggregation_formatter: function formatting aggregations returned
by invenio_search.
:Return: a function formatting search results.
"""
def serializer(pid_fetcher, search_result, links=None,
code=200, headers=None):
"""Build a json flask response using the given search result.
:param pid_fetcher: function extracting pid type and value from a
import record metadata.
:param search_result: search result as returned by invenio_search.
:param code: http code of the response.
:type code: int
:param header: header to extend for the response.
"""
result = {
'hits': {
'hits': [hit_formatter(hit, pid_fetcher)
for hit in search_result['hits']['hits']],
'total': search_result['hits']['total'],
},
'links': links or {},
}
if 'aggregations' in search_result:
if aggregations_formatter:
result['aggregations'] = aggregations_formatter(
search_result['aggregations'])
else:
result['aggregations'] = search_result['aggregations']
response = jsonify(result)
response.status_code = code
if headers is not None:
response.headers.extend(headers)
return response
return serializer
def record_hit_formatter(hit, pid_fetcher):
"""Format a single record returned by a search result.
:param hit: record returned by a search result.
:param pid_fetcher: function fetching pid from a search hit.
"""
# retrieve the pid value and pid type of a given hit
fetched_pid = pid_fetcher(hit['_id'], hit['_source'])
self_link = record_self_link(fetched_pid.pid_value, fetched_pid.pid_type,
hit['_source'], _external=True)
data = {
'id': fetched_pid.pid_value,
'metadata': hit['_source'],
'links': {
'self': self_link
},
'revision': hit['_version'],
}
for key in ['_created', '_updated']:
if key in data['metadata']:
data[key[1:]] = data['metadata'][key]
del data['metadata'][key]
return data
search_to_json_serializer = search_to_json_serializer_factory(
hit_formatter=record_hit_formatter
)
"""Example of search result formatting function."""
| gpl-2.0 |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/pyasn1-modules/pyasn1_modules/rfc1905.py | 114 | 4230 | #
# SNMPv2c PDU syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1905.txt
#
from pyasn1.type import univ, namedtype, namedval, tag, constraint
from pyasn1_modules import rfc1902
max_bindings = rfc1902.Integer(2147483647)
class _BindValue(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('value', rfc1902.ObjectSyntax()),
namedtype.NamedType('unSpecified', univ.Null()),
namedtype.NamedType('noSuchObject', univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('noSuchInstance', univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('endOfMibView', univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1902.ObjectName()),
namedtype.NamedType('', _BindValue())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(
0, max_bindings
)
class PDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', rfc1902.Integer32()),
namedtype.NamedType('error-status', univ.Integer(namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3), ('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7), ('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10), ('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13), ('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16), ('notWritable', 17), ('inconsistentName', 18)))),
namedtype.NamedType('error-index', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('variable-bindings', VarBindList())
)
class BulkPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', rfc1902.Integer32()),
namedtype.NamedType('non-repeaters', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('max-repetitions', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class ResponsePDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class GetBulkRequestPDU(BulkPDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
)
class InformRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
)
class SNMPv2TrapPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
)
class ReportPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
)
class PDUs(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()),
namedtype.NamedType('response', ResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('inform-request', InformRequestPDU()),
namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()),
namedtype.NamedType('report', ReportPDU())
)
| bsd-3-clause |
newtonne/trellis | lib/trellis/plugins/vars/version.py | 4 | 2049 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import __version__
from ansible.errors import AnsibleError
from distutils.version import LooseVersion
from operator import eq, ge, gt
from sys import version_info
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
version_requirement = '2.8.0'
version_tested_max = '2.9.10'
python3_required_version = '2.5.3'
if version_info[0] == 3 and not ge(LooseVersion(__version__), LooseVersion(python3_required_version)):
raise AnsibleError(('Ansible >= {} is required when using Python 3.\n'
'Either downgrade to Python 2 or update your Ansible version to {}.').format(python3_required_version, python3_required_version))
if not ge(LooseVersion(__version__), LooseVersion(version_requirement)):
raise AnsibleError(('Trellis no longer supports Ansible {}.\n'
'Please upgrade to Ansible {} or higher.').format(__version__, version_requirement))
elif gt(LooseVersion(__version__), LooseVersion(version_tested_max)):
display.warning(u'Your Ansible version is {} but this version of Trellis has only been tested for '
u'compatability with Ansible {} -> {}. It is advisable to check for Trellis updates or '
u'downgrade your Ansible version.'.format(__version__, version_requirement, version_tested_max))
if eq(LooseVersion(__version__), LooseVersion('2.5.0')):
display.warning(u'Your Ansible version is {}. Consider upgrading your Ansible version to avoid '
u'erroneous warnings such as `Removed restricted key from module data...`'.format(__version__))
# Import BaseVarsPlugin after Ansible version check.
# Otherwise import error for Ansible versions older than 2.4 would prevent display of version check message.
from ansible.plugins.vars import BaseVarsPlugin
class VarsModule(BaseVarsPlugin):
def get_vars(self, loader, path, entities, cache=True):
return {}
| mit |
jordanemedlock/psychtruths | temboo/Library/Kiva/Loans/GetLoanUpdates.py | 5 | 3471 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetLoanUpdates
# Returns all status updates for a loan, newest first.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetLoanUpdates(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetLoanUpdates Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetLoanUpdates, self).__init__(temboo_session, '/Library/Kiva/Loans/GetLoanUpdates')
def new_input_set(self):
return GetLoanUpdatesInputSet()
def _make_result_set(self, result, path):
return GetLoanUpdatesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetLoanUpdatesChoreographyExecution(session, exec_id, path)
class GetLoanUpdatesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetLoanUpdates
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((optional, string) Your unique application ID, usually in reverse DNS notation.)
"""
super(GetLoanUpdatesInputSet, self)._set_input('AppID', value)
def set_LoanID(self, value):
"""
Set the value of the LoanID input for this Choreo. ((required, string) The ID of the loan for which to get details.)
"""
super(GetLoanUpdatesInputSet, self)._set_input('LoanID', value)
def set_ResponseType(self, value):
"""
Set the value of the ResponseType input for this Choreo. ((optional, string) Output returned can be XML or JSON. Defaults to JSON.)
"""
super(GetLoanUpdatesInputSet, self)._set_input('ResponseType', value)
class GetLoanUpdatesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetLoanUpdates Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Kiva.)
"""
return self._output.get('Response', None)
class GetLoanUpdatesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetLoanUpdatesResultSet(response, path)
| apache-2.0 |
goliate/sarakha63-persomov | couchpotato/core/media/movie/providers/automation/moviemeter.py | 38 | 1179 | from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'Moviemeter'
class Moviemeter(Automation, RSS):
interval = 1800
rss_url = 'http://www.moviemeter.nl/rss/cinema'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
imdb = self.search(self.getTextElement(movie, 'title'))
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'moviemeter',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'moviemeter_automation',
'label': 'Moviemeter',
'description': 'Imports movies from the current top 10 of moviemeter.nl.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]
| gpl-3.0 |
israelbenatar/boto | boto/dynamodb2/exceptions.py | 164 | 1990 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class ProvisionedThroughputExceededException(JSONResponseError):
pass
class LimitExceededException(JSONResponseError):
pass
class ConditionalCheckFailedException(JSONResponseError):
pass
class ResourceInUseException(JSONResponseError):
pass
class ResourceNotFoundException(JSONResponseError):
pass
class InternalServerError(JSONResponseError):
pass
class ValidationException(JSONResponseError):
pass
class ItemCollectionSizeLimitExceededException(JSONResponseError):
pass
class DynamoDBError(Exception):
pass
class UnknownSchemaFieldError(DynamoDBError):
pass
class UnknownIndexFieldError(DynamoDBError):
pass
class UnknownFilterTypeError(DynamoDBError):
pass
class QueryError(DynamoDBError):
pass
class ItemNotFound(DynamoDBError):
pass
| mit |
UXJera/JeremiahNyman.com | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/__init__.py | 574 | 21473 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check,
options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| cc0-1.0 |
soarpenguin/ansible | lib/ansible/modules/remote_management/foreman/katello.py | 9 | 16607 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Eric D Helms <ericdhelms@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
description:
- Allows the management of Katello resources inside your Foreman server
version_added: "2.3"
author: "Eric D Helms (@ehelms)"
requirements:
- "nailgun >= 0.28.0"
- "python >= 2.6"
- datetime
options:
server_url:
description:
- URL of Foreman server
required: true
username:
description:
- Username on Foreman server
required: true
password:
description:
- Password for user accessing Foreman server
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host)
required: true
params:
description:
- Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
required: true
'''
EXAMPLES = '''
---
# Simple Example:
- name: "Create Product"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "product"
params:
name: "Centos 7"
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "{{ entity }}"
params: "{{ params }}"
# tasks.yml
---
- include: katello.yml
vars:
name: "Create Dev Environment"
entity: "lifecycle_environment"
params:
name: "Dev"
prior: "Library"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create Centos Product"
entity: "product"
params:
name: "Centos 7"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create 7.2 Repository"
entity: "repository"
params:
name: "Centos 7.2"
product: "Centos 7"
organization: "Default Organization"
content_type: "yum"
url: "http://mirror.centos.org/centos/7/os/x86_64/"
- include: katello.yml
vars:
name: "Create Centos 7 View"
entity: "content_view"
params:
name: "Centos 7 View"
organization: "Default Organization"
repositories:
- name: "Centos 7.2"
product: "Centos 7"
- include: katello.yml
vars:
name: "Enable RHEL Product"
entity: "repository_set"
params:
name: "Red Hat Enterprise Linux 7 Server (RPMs)"
product: "Red Hat Enterprise Linux Server"
organization: "Default Organization"
basearch: "x86_64"
releasever: "7"
'''
RETURN = '''# '''
import datetime
import os
import traceback
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except:
HAS_NAILGUN_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NailGun(object):
def __init__(self, server, entities, module):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = 1000
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception as e:
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
exception=traceback.format_exc())
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if 'releasever' in params:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else:
reposet.enable(data={'basearch': params['basearch']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
username=dict(required=True, no_log=True),
password=dict(required=True, no_log=True),
entity=dict(required=True, no_log=False),
action=dict(required=False, no_log=False),
verify_ssl=dict(required=False, type='bool', default=False),
params=dict(required=True, no_log=True, type='dict'),
),
supports_check_mode=True
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
if __name__ == '__main__':
main()
| gpl-3.0 |
poo12138/gem5-stable | src/arch/x86/isa/insts/x87/no_operation.py | 91 | 2149 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FNOP
'''
| bsd-3-clause |
comjoy91/SKorean-Election_result-Crawler | crawlers/electorates/local_eduParliament.py | 2 | 2825 | #!/usr/bin/env python
# -*- encoding=utf-8 -*-
from crawlers.electorates.base_provincePage import *
from utils import sanitize, InvalidCrawlerError
def Crawler(nth, election_name, electionType, target):
if nth == 1:
raise NotImplementedError('Educational Parliament Election was not held in 1995.')
elif nth == 2:
raise NotImplementedError('Educational Parliament Election was not held in 1998.')
elif nth == 3:
raise NotImplementedError('Educational Parliament Election was not held in 2002.')
elif 4 <= nth <= 6:
crawler = Constituency_ElectorCrawler_Old(int(nth), election_name, electionType, target)
elif nth == 7:
raise InvalidCrawlerError('townCode', nth, election_name, electionType, target)
#"최근선거"로 들어갈 때의 code: crawler = LocalDivision_ElectorCrawler_Recent(int(nth), election_name, electionType, target)
else:
raise InvalidCrawlerError('townCode', nth, election_name, electionType, target)
return crawler
class Constituency_ElectorCrawler_Old(MultiCityCrawler_province):
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.elemType = 'constituency_in_province'
self.isRecent = False
# 여기서 크롤링된 데이터는 광역자치단체별 교육의원 지역 선거구 단위로 분류됨.
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson_Old.json'
self.urlParam_city_codes = dict(electionId='0000000000', \
electionCode=_election_name, subElectionCode =_election_type)
self.urlPath_sgg_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_sgg_list = dict(electionId='0000000000', electionName=_election_name,\
requestURI='/WEB-INF/jsp/electioninfo/0000000000/bi/bipb02.jsp',\
statementId='BIPB02_#3_10',\
oldElectionType=1, electionType=4, electionCode=_election_type,\
searchType=3, townCode=-1, sggCityCode=-1)
class Constituency_ElectorCrawler_Recent(MultiCityCrawler_province):
def __init__(self, nth, _election_name, _election_type, _target):
self.nth = nth
self.target = _target
self.elemType = 'constituency_in_province'
self.isRecent = True
# 여기서 크롤링된 데이터는 광역자치단체별 교육의원 지역 선거구 단위로 분류됨.
self.urlPath_city_codes = 'http://info.nec.go.kr/bizcommon/selectbox/selectbox_cityCodeBySgJson.json'
self.urlParam_city_codes = dict(electionCode=_election_type)
self.urlPath_town_list = 'http://info.nec.go.kr/electioninfo/electionInfo_report.xhtml'
self.urlParam_town_list = dict(electionId=_election_name, statementId='BIPB02_#3_10',\
requestURI='/WEB-INF/jsp/electioninfo/'+_election_name+'/bi/bipb02.jsp',
electionCode=-1, searchType=3, townCode=-1)
| apache-2.0 |
rversteegen/commandergenius | project/jni/python/src/Lib/ssl.py | 57 | 15427 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from socket import socket, _fileobject
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
class SSLSocket (socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
socket.__init__(self, _sock=sock._sock)
# the initializer for socket trashes the methods (tsk, tsk), so...
self.send = lambda data, flags=0: SSLSocket.send(self, data, flags)
self.sendto = lambda data, addr, flags=0: SSLSocket.sendto(self, data, addr, flags)
self.recv = lambda buflen=1024, flags=0: SSLSocket.recv(self, buflen, flags)
self.recvfrom = lambda addr, buflen=1024, flags=0: SSLSocket.recvfrom(self, addr, buflen, flags)
self.recv_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recv_into(self, buffer, nbytes, flags)
self.recvfrom_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recvfrom_into(self, buffer, nbytes, flags)
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except:
# no, no connection yet
self._sslobj = None
else:
# yes, create the SSL object
self._sslobj = _ssl.sslwrap(self._sock, server_side,
keyfile, certfile,
cert_reqs, ssl_version, ca_certs)
if do_handshake_on_connect:
timeout = self.gettimeout()
try:
self.settimeout(None)
self.do_handshake()
finally:
self.settimeout(timeout)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher (self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send (self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto (self, data, addr, flags=0):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
return socket.sendto(self, data, addr, flags)
def sendall (self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv (self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
while True:
try:
return self.read(buflen)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
continue
else:
raise x
else:
return socket.recv(self, buflen, flags)
def recv_into (self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
while True:
try:
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
continue
else:
raise x
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom (self, addr, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, addr, buflen, flags)
def recvfrom_into (self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def pending (self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap (self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown (self, how):
self._sslobj = None
socket.shutdown(self, how)
def close (self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake (self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
socket.connect(self, addr)
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
return _fileobject(self, mode, bufsize)
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate (addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name (protocol_code):
if protocol_code == PROTOCOL_TLSv1:
return "TLSv1"
elif protocol_code == PROTOCOL_SSLv23:
return "SSLv23"
elif protocol_code == PROTOCOL_SSLv2:
return "SSLv2"
elif protocol_code == PROTOCOL_SSLv3:
return "SSLv3"
else:
return "<unknown>"
# a replacement for the old socket.ssl function
def sslwrap_simple (sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE,
PROTOCOL_SSLv23, None)
try:
sock.getpeername()
except:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
| lgpl-2.1 |
noironetworks/nova | nova/tests/functional/api_sample_tests/test_evacuate.py | 21 | 4531 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
from nova.servicegroup import api as service_group_api
from nova.tests.functional.api_sample_tests import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class EvacuateJsonTest(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-evacuate"
def _get_flags(self):
f = super(EvacuateJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.evacuate.Evacuate')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_evacuate_find_host.'
'Extended_evacuate_find_host')
return f
def _test_evacuate(self, req_subs, server_req, server_resp,
expected_resp_code):
self.uuid = self._post_server()
def fake_service_is_up(self, service):
"""Simulate validation of instance host is down."""
return False
def fake_service_get_by_compute_host(self, context, host):
"""Simulate that given host is a valid host."""
return {
'host_name': host,
'service': 'compute',
'zone': 'nova'
}
def fake_check_instance_exists(self, context, instance):
"""Simulate validation of instance does not exist."""
return False
self.stubs.Set(service_group_api.API, 'service_is_up',
fake_service_is_up)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stubs.Set(compute_manager.ComputeManager,
'_check_instance_exists',
fake_check_instance_exists)
response = self._do_post('servers/%s/action' % self.uuid,
server_req, req_subs)
subs = self._get_regexes()
self._verify_response(server_resp, subs, response, expected_resp_code)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate(self, rebuild_mock):
# Note (wingwj): The host can't be the same one
req_subs = {
'host': 'testHost',
"adminPass": "MySecretPass",
"onSharedStorage": 'False'
}
self._test_evacuate(req_subs, 'server-evacuate-req',
'server-evacuate-resp', 200)
rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
orig_image_ref=mock.ANY, image_ref=mock.ANY,
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host='testHost')
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
req_subs = {
"adminPass": "MySecretPass",
"onSharedStorage": 'False'
}
self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
'server-evacuate-find-host-resp', 200)
rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
orig_image_ref=mock.ANY, image_ref=mock.ANY,
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host=None)
| apache-2.0 |
beardypig/streamlink | src/streamlink/packages/flashmedia/ordereddict.py | 11 | 8879 | # Source: http://code.activestate.com/recipes/576693/
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| bsd-2-clause |
maratonato/slides | node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py | 1789 | 10585 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
| mit |
AnhellO/DAS_Sistemas | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/pip/_vendor/distlib/util.py | 327 | 52991 | #
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on macOS
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
| mit |
shiblon/pytour | static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/_sha512.py | 10 | 14164 | """
This code was Ported from CPython's sha512module.c
"""
import struct
SHA_BLOCKSIZE = 128
SHA_DIGESTSIZE = 64
def new_shaobject():
return {
'digest': [0]*8,
'count_lo': 0,
'count_hi': 0,
'data': [0]* SHA_BLOCKSIZE,
'local': 0,
'digestsize': 0
}
ROR64 = lambda x, y: (((x & 0xffffffffffffffff) >> (y & 63)) | (x << (64 - (y & 63)))) & 0xffffffffffffffff
Ch = lambda x, y, z: (z ^ (x & (y ^ z)))
Maj = lambda x, y, z: (((x | y) & z) | (x & y))
S = lambda x, n: ROR64(x, n)
R = lambda x, n: (x & 0xffffffffffffffff) >> n
Sigma0 = lambda x: (S(x, 28) ^ S(x, 34) ^ S(x, 39))
Sigma1 = lambda x: (S(x, 14) ^ S(x, 18) ^ S(x, 41))
Gamma0 = lambda x: (S(x, 1) ^ S(x, 8) ^ R(x, 7))
Gamma1 = lambda x: (S(x, 19) ^ S(x, 61) ^ R(x, 6))
def sha_transform(sha_info):
W = []
d = sha_info['data']
for i in xrange(0,16):
W.append( (d[8*i]<<56) + (d[8*i+1]<<48) + (d[8*i+2]<<40) + (d[8*i+3]<<32) + (d[8*i+4]<<24) + (d[8*i+5]<<16) + (d[8*i+6]<<8) + d[8*i+7])
for i in xrange(16,80):
W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffffffffffff )
ss = sha_info['digest'][:]
def RND(a,b,c,d,e,f,g,h,i,ki):
t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xffffffffffffffff
t1 = (Sigma0(a) + Maj(a, b, c)) & 0xffffffffffffffff
d = (d + t0) & 0xffffffffffffffff
h = (t0 + t1) & 0xffffffffffffffff
return d & 0xffffffffffffffff, h & 0xffffffffffffffff
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98d728ae22)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x7137449123ef65cd)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcfec4d3b2f)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba58189dbbc)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b)
ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84)
ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493)
ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc)
ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c)
ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6)
ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a)
ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec)
ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817)
dig = []
for i, x in enumerate(sha_info['digest']):
dig.append( (x + ss[i]) & 0xffffffffffffffff )
sha_info['digest'] = dig
def sha_init():
sha_info = new_shaobject()
sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
sha_info['count_lo'] = 0
sha_info['count_hi'] = 0
sha_info['local'] = 0
sha_info['digestsize'] = 64
return sha_info
def sha384_init():
sha_info = new_shaobject()
sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4]
sha_info['count_lo'] = 0
sha_info['count_hi'] = 0
sha_info['local'] = 0
sha_info['digestsize'] = 48
return sha_info
def getbuf(s):
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return str(s)
else:
return buffer(s)
def sha_update(sha_info, buffer):
count = len(buffer)
buffer_idx = 0
clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff
if clo < sha_info['count_lo']:
sha_info['count_hi'] += 1
sha_info['count_lo'] = clo
sha_info['count_hi'] += (count >> 29)
if sha_info['local']:
i = SHA_BLOCKSIZE - sha_info['local']
if i > count:
i = count
# copy buffer
for x in enumerate(buffer[buffer_idx:buffer_idx+i]):
sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0]
count -= i
buffer_idx += i
sha_info['local'] += i
if sha_info['local'] == SHA_BLOCKSIZE:
sha_transform(sha_info)
sha_info['local'] = 0
else:
return
while count >= SHA_BLOCKSIZE:
# copy buffer
sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]]
count -= SHA_BLOCKSIZE
buffer_idx += SHA_BLOCKSIZE
sha_transform(sha_info)
# copy buffer
pos = sha_info['local']
sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]]
sha_info['local'] = count
def sha_final(sha_info):
lo_bit_count = sha_info['count_lo']
hi_bit_count = sha_info['count_hi']
count = (lo_bit_count >> 3) & 0x7f
sha_info['data'][count] = 0x80;
count += 1
if count > SHA_BLOCKSIZE - 16:
# zero the bytes in data after the count
sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
sha_transform(sha_info)
# zero bytes in data
sha_info['data'] = [0] * SHA_BLOCKSIZE
else:
sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
sha_info['data'][112] = 0;
sha_info['data'][113] = 0;
sha_info['data'][114] = 0;
sha_info['data'][115] = 0;
sha_info['data'][116] = 0;
sha_info['data'][117] = 0;
sha_info['data'][118] = 0;
sha_info['data'][119] = 0;
sha_info['data'][120] = (hi_bit_count >> 24) & 0xff
sha_info['data'][121] = (hi_bit_count >> 16) & 0xff
sha_info['data'][122] = (hi_bit_count >> 8) & 0xff
sha_info['data'][123] = (hi_bit_count >> 0) & 0xff
sha_info['data'][124] = (lo_bit_count >> 24) & 0xff
sha_info['data'][125] = (lo_bit_count >> 16) & 0xff
sha_info['data'][126] = (lo_bit_count >> 8) & 0xff
sha_info['data'][127] = (lo_bit_count >> 0) & 0xff
sha_transform(sha_info)
dig = []
for i in sha_info['digest']:
dig.extend([ ((i>>56) & 0xff), ((i>>48) & 0xff), ((i>>40) & 0xff), ((i>>32) & 0xff), ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
return ''.join([chr(i) for i in dig])
class sha512(object):
digest_size = digestsize = SHA_DIGESTSIZE
block_size = SHA_BLOCKSIZE
def __init__(self, s=None):
self._sha = sha_init()
if s:
sha_update(self._sha, getbuf(s))
def update(self, s):
sha_update(self._sha, getbuf(s))
def digest(self):
return sha_final(self._sha.copy())[:self._sha['digestsize']]
def hexdigest(self):
return ''.join(['%.2x' % ord(i) for i in self.digest()])
def copy(self):
new = sha512.__new__(sha512)
new._sha = self._sha.copy()
return new
class sha384(sha512):
digest_size = digestsize = 48
def __init__(self, s=None):
self._sha = sha384_init()
if s:
sha_update(self._sha, getbuf(s))
def copy(self):
new = sha384.__new__(sha384)
new._sha = self._sha.copy()
return new
def test():
import _sha512
a_str = "just a test string"
assert _sha512.sha512().hexdigest() == sha512().hexdigest()
assert _sha512.sha512(a_str).hexdigest() == sha512(a_str).hexdigest()
assert _sha512.sha512(a_str*7).hexdigest() == sha512(a_str*7).hexdigest()
s = sha512(a_str)
s.update(a_str)
assert _sha512.sha512(a_str+a_str).hexdigest() == s.hexdigest()
if __name__ == "__main__":
test()
| apache-2.0 |
t794104/ansible | lib/ansible/modules/network/netvisor/pn_access_list_ip.py | 38 | 4598 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_access_list_ip
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to add/remove access-list-ip
description:
- This modules can be used to add and remove IPs associated with access list.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use 'present' to add access-list-ip and
'absent' to remove access-list-ip.
required: True
choices: ["present", "absent"]
pn_ip:
description:
- IP associated with the access list.
required: False
default: '::'
type: str
pn_name:
description:
- Access List Name.
required: False
type: str
"""
EXAMPLES = """
- name: access list ip functionality
pn_access_list_ip:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_ip: "172.16.3.1"
state: "present"
- name: access list ip functionality
pn_access_list_ip:
pn_cliswitch: "sw01"
pn_name: "foo"
pn_ip: "172.16.3.1"
state: "absent"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the access-list-ip command.
returned: always
type: list
stderr:
description: set of error responses from the access-list-ip command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the access-list-ip-show command.
If ip exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
ip = module.params['pn_ip']
clicopy = cli
cli += ' access-list-show name %s no-show-headers ' % name
out = run_commands(module, cli)[1]
if name not in out:
module.fail_json(
failed=True,
msg='access-list with name %s does not exist' % name
)
cli = clicopy
cli += ' access-list-ip-show name %s format ip no-show-headers' % name
out = run_commands(module, cli)[1]
out = out.split()
return True if ip in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='access-list-ip-add',
absent='access-list-ip-remove',
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_ip=dict(required=False, type='str', default='::'),
pn_name=dict(required=False, type='str'),
),
required_if=(
["state", "present", ["pn_name"]],
["state", "absent", ["pn_name", "pn_ip"]],
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
ip = module.params['pn_ip']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
IP_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'access-list-ip-remove':
if IP_EXISTS is False:
module.exit_json(
skipped=True,
msg='access-list with ip %s does not exist' % ip
)
if ip:
cli += ' ip ' + ip
else:
if command == 'access-list-ip-add':
if IP_EXISTS is True:
module.exit_json(
skipped=True,
msg='access list with ip %s already exists' % ip
)
if ip:
cli += ' ip ' + ip
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
gtrdotmcs/gunicorn | docs/sitemap_gen.py | 24 | 70786 | #!/usr/bin/env python
#
# Copyright (c) 2004, 2005 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Google nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The sitemap_gen.py script is written in Python 2.2 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
__usage__ = \
"""A simple script to automatically produce sitemaps for a webserver,
in the Google Sitemap Protocol (GSP).
Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
--config=config.xml, specifies config file location
--help, displays usage message
--testing, specified when user is experimenting
"""
# Please be careful that all syntax used in this file can be parsed on
# Python 1.5 -- this version check is not evaluated until after the
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import fnmatch
import glob
import gzip
import hashlib
import os
import re
import stat
import time
import types
import urllib
import urlparse
import xml.sax
# True and False were introduced in Python2.2.2
try:
testTrue=True
del testTrue
except NameError:
True=1
False=0
# Text encodings
ENC_ASCII = 'ASCII'
ENC_UTF8 = 'UTF-8'
ENC_IDNA = 'IDNA'
ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
'ANSI_X3.4-1986', 'CPASCII' ]
ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
# Maximum number of urls in each sitemap, before next Sitemap is created
MAXURLS_PER_SITEMAP = 50000
# Suffix on a Sitemap index file
SITEINDEX_SUFFIX = '_index.xml'
# Regular expressions tried for extracting URLs from access logs.
ACCESSLOG_CLF_PATTERN = re.compile(
r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
)
# Match patterns for lastmod attributes
LASTMOD_PATTERNS = map(re.compile, [
r'^\d\d\d\d$',
r'^\d\d\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
])
# Match patterns for changefreq attributes
CHANGEFREQ_PATTERNS = [
'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
]
# XML formats
SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
' http://www.google.com/schemas/sitemap/0.84/' \
'siteindex.xsd">\n'
SITEINDEX_FOOTER = '</sitemapindex>\n'
SITEINDEX_ENTRY = \
' <sitemap>\n' \
' <loc>%(loc)s</loc>\n' \
' <lastmod>%(lastmod)s</lastmod>\n' \
' </sitemap>\n'
SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
' http://www.google.com/schemas/sitemap/0.84/' \
'sitemap.xsd">\n'
SITEMAP_FOOTER = '</urlset>\n'
SITEURL_XML_PREFIX = ' <url>\n'
SITEURL_XML_SUFFIX = ' </url>\n'
# Search engines to notify with the updated sitemaps
#
# This list is very non-obvious in what's going on. Here's the gist:
# Each item in the list is a 6-tuple of items. The first 5 are "almost"
# the same as the input arguments to urlparse.urlunsplit():
# 0 - schema
# 1 - netloc
# 2 - path
# 3 - query <-- EXCEPTION: specify a query map rather than a string
# 4 - fragment
# Additionally, add item 5:
# 5 - query attribute that should be set to the new Sitemap URL
# Clear as mud, I know.
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
]
class Error(Exception):
"""
Base exception class. In this module we tend not to use our own exception
types for very much, but they come in very handy on XML parsing with SAX.
"""
pass
#end class Error
class SchemaError(Error):
"""Failure to process an XML file according to the schema we know."""
pass
#end class SchemeError
class Encoder:
"""
Manages wide-character/narrow-character conversions for just about all
text that flows into or out of the script.
You should always use this class for string coercion, as opposed to
letting Python handle coercions automatically. Reason: Python
usually assumes ASCII (7-bit) as a default narrow character encoding,
which is not the kind of data we generally deal with.
General high-level methodologies used in sitemap_gen:
[PATHS]
File system paths may be wide or narrow, depending on platform.
This works fine, just be aware of it and be very careful to not
mix them. That is, if you have to pass several file path arguments
into a library call, make sure they are all narrow or all wide.
This class has MaybeNarrowPath() which should be called on every
file system path you deal with.
[URLS]
URL locations are stored in Narrow form, already escaped. This has the
benefit of keeping escaping and encoding as close as possible to the format
we read them in. The downside is we may end up with URLs that have
intermingled encodings -- the root path may be encoded in one way
while the filename is encoded in another. This is obviously wrong, but
it should hopefully be an issue hit by very few users. The workaround
from the user level (assuming they notice) is to specify a default_encoding
parameter in their config file.
[OTHER]
Other text, such as attributes of the URL class, configuration options,
etc, are generally stored in Unicode for simplicity.
"""
def __init__(self):
self._user = None # User-specified default encoding
self._learned = [] # Learned default encodings
self._widefiles = False # File system can be wide
# Can the file system be Unicode?
try:
self._widefiles = os.path.supports_unicode_filenames
except AttributeError:
try:
self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
except AttributeError:
pass
# Try to guess a working default
try:
encoding = sys.getfilesystemencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
except AttributeError:
pass
if not self._learned:
encoding = sys.getdefaultencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
# If we had no guesses, start with some European defaults
if not self._learned:
self._learned = ENC_DEFAULT_LIST
#end def __init__
def SetUserEncoding(self, encoding):
self._user = encoding
#end def SetUserEncoding
def NarrowText(self, text, encoding):
""" Narrow a piece of arbitrary text """
if type(text) != types.UnicodeType:
return text
# Try the passed in preference
if encoding:
try:
result = text.encode(encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return text.encode(self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return text.encode(self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return text.encode(ENC_UTF8)
except UnicodeError:
pass
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText
def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
return text
return self.NarrowText(text, None)
#end def MaybeNarrowPath
def WidenText(self, text, encoding):
""" Widen a piece of arbitrary text """
if type(text) != types.StringType:
return text
# Try the passed in preference
if encoding:
try:
result = unicode(text, encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return unicode(text, self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return unicode(text, self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return unicode(text, ENC_UTF8)
except UnicodeError:
pass
# Getting here means it wasn't UTF-8 and we had no working default.
# We really don't have anything "right" we can do anymore.
output.Warn('Unrecognized encoding in text: %s' % text)
if not self._user:
output.Warn('You may need to set a default_encoding in your '
'configuration file.')
return text.decode(ENC_ASCII, 'ignore')
#end def WidenText
#end class Encoder
encoder = Encoder()
class Output:
"""
Exposes logging functionality, and tracks how many errors
we have thus output.
Logging levels should be used as thus:
Fatal -- extremely sparingly
Error -- config errors, entire blocks of user 'intention' lost
Warn -- individual URLs lost
Log(,0) -- Un-suppressable text that's not an error
Log(,1) -- touched files, major actions
Log(,2) -- parsing notes, filtered or duplicated URLs
Log(,3) -- each accepted URL
"""
def __init__(self):
self.num_errors = 0 # Count of errors
self.num_warns = 0 # Count of warnings
self._errors_shown = {} # Shown errors
self._warns_shown = {} # Shown warnings
self._verbose = 0 # Level of verbosity
#end def __init__
def Log(self, text, level):
""" Output a blurb of diagnostic text, if the verbose level allows it """
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
#end def Log
def Warn(self, text):
""" Output and count a warning. Suppress duplicate warnings. """
if text:
text = encoder.NarrowText(text, None)
hash = hashlib.md5(text).hexdigest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
#end def Warn
def Error(self, text):
""" Output and count an error. Suppress duplicate errors. """
if text:
text = encoder.NarrowText(text, None)
hash = hashlib.md5(text).hexdigest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
#end def Error
def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
else:
print 'Fatal error.'
sys.exit(1)
#end def Fatal
def SetVerbose(self, level):
""" Sets the verbose level. """
try:
if type(level) != types.IntType:
level = int(level)
if (level >= 0) and (level <= 3):
self._verbose = level
return
except ValueError:
pass
self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
#end def SetVerbose
#end class Output
output = Output()
class URL(object):
""" URL is a smart structure grouping together the properties we
care about for a single web reference. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
def __init__(self):
self.loc = None # URL -- in Narrow characters
self.lastmod = None # ISO8601 timestamp of last modify
self.changefreq = None # Text term for update frequency
self.priority = None # Float between 0 and 1 (inc)
#end def __init__
def __cmp__(self, other):
if self.loc < other.loc:
return -1
if self.loc > other.loc:
return 1
return 0
#end def __cmp__
def TrySetAttribute(self, attribute, value):
""" Attempt to set the attribute to the value, with a pretty try
block around it. """
if attribute == 'loc':
self.loc = self.Canonicalize(value)
else:
try:
setattr(self, attribute, value)
except AttributeError:
output.Warn('Unknown URL attribute: %s' % attribute)
#end def TrySetAttribute
def IsAbsolute(loc):
""" Decide if the URL is absolute or not """
if not loc:
return False
narrow = encoder.NarrowText(loc, None)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
if (not scheme) or (not netloc):
return False
return True
#end def IsAbsolute
IsAbsolute = staticmethod(IsAbsolute)
def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc
# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)
# Escape components individually
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
unr = '-._~'
sub = '!$&\'()*+,;='
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
path = urllib.quote(path, unr + sub + '%:@/')
query = urllib.quote(query, unr + sub + '%:@/?')
frag = urllib.quote(frag, unr + sub + '%:@/?')
# Try built-in IDNA encoding on the netloc
try:
(ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
for c in widenetloc:
if c >= unichr(128):
netloc = widenetloc.encode(ENC_IDNA)
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
break
except UnicodeError:
# urlsplit must have failed, based on implementation differences in the
# library. There is not much we can do here, except ignore it.
pass
except LookupError:
output.Warn('An International Domain Name (IDN) is being used, but this '
'version of Python does not have support for IDNA encoding. '
' (IDNA support was introduced in Python 2.3) The encoding '
'we have used instead is wrong and will probably not yield '
'valid URLs.')
bad_netloc = False
if '%' in netloc:
bad_netloc = True
# Put it all back together
narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
# I let '%' through. Fix any that aren't pre-existing escapes.
HEXDIG = '0123456789abcdefABCDEF'
list = narrow.split('%')
narrow = list[0]
del list[0]
for item in list:
if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
narrow = narrow + '%' + item
else:
narrow = narrow + '%25' + item
# Issue a warning if this is a bad URL
if bad_netloc:
output.Warn('Invalid characters in the host or domain portion of a URL: '
+ narrow)
return narrow
#end def Canonicalize
Canonicalize = staticmethod(Canonicalize)
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
return False
if allow_fragment:
self.loc = urlparse.urljoin(base_url, self.loc)
if not self.loc.startswith(base_url):
output.Warn('Discarded URL for not starting with the base_url: %s' %
self.loc)
self.loc = None
return False
# Test the lastmod
if self.lastmod:
match = False
self.lastmod = self.lastmod.upper()
for pattern in LASTMOD_PATTERNS:
match = pattern.match(self.lastmod)
if match:
break
if not match:
output.Warn('Lastmod "%s" does not appear to be in ISO8601 format on '
'URL: %s' % (self.lastmod, self.loc))
self.lastmod = None
# Test the changefreq
if self.changefreq:
match = False
self.changefreq = self.changefreq.lower()
for pattern in CHANGEFREQ_PATTERNS:
if self.changefreq == pattern:
match = True
break
if not match:
output.Warn('Changefreq "%s" is not a valid change frequency on URL '
': %s' % (self.changefreq, self.loc))
self.changefreq = None
# Test the priority
if self.priority:
priority = -1.0
try:
priority = float(self.priority)
except ValueError:
pass
if (priority < 0.0) or (priority > 1.0):
output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
'on URL: %s' % (self.priority, self.loc))
self.priority = None
return True
#end def Validate
def MakeHash(self):
""" Provides a uniform way of hashing URLs """
if not self.loc:
return None
if self.loc.endswith('/'):
return hashlib.md5(self.loc[:-1]).hexdigest()
return hashlib.md5(self.loc).hexdigest()
#end def MakeHash
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'
for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
value = ''
out = out + (' %s=[%s]' % (attribute, value))
output.Log('%s' % encoder.NarrowText(out, None), level)
#end def Log
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class URL
class Filter:
"""
A filter on the stream of URLs we find. A filter is, in essence,
a wildcard applied to the stream. You can think of this as an
operator that returns a tri-state when given a URL:
True -- this URL is to be included in the sitemap
None -- this URL is undecided
False -- this URL is to be dropped from the sitemap
"""
def __init__(self, attributes):
self._wildcard = None # Pattern for wildcard match
self._regexp = None # Pattern for regexp match
self._pass = False # "Drop" filter vs. "Pass" filter
if not ValidateAttributes('FILTER', attributes,
('pattern', 'type', 'action')):
return
# Check error count on the way in
num_errors = output.num_errors
# Fetch the attributes
pattern = attributes.get('pattern')
type = attributes.get('type', 'wildcard')
action = attributes.get('action', 'drop')
if type:
type = type.lower()
if action:
action = action.lower()
# Verify the attributes
if not pattern:
output.Error('On a filter you must specify a "pattern" to match')
elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
output.Error('On a filter you must specify either \'type="wildcard"\' '
'or \'type="regexp"\'')
elif (action != 'pass') and (action != 'drop'):
output.Error('If you specify a filter action, it must be either '
'\'action="pass"\' or \'action="drop"\'')
# Set the rule
if action == 'drop':
self._pass = False
elif action == 'pass':
self._pass = True
if type == 'wildcard':
self._wildcard = pattern
elif type == 'regexp':
try:
self._regexp = re.compile(pattern)
except re.error:
output.Error('Bad regular expression: %s' % pattern)
# Log the final results iff we didn't add any errors
if num_errors == output.num_errors:
output.Log('Filter: %s any URL that matches %s "%s"' %
(action, type, pattern), 2)
#end def __init__
def Apply(self, url):
""" Process the URL, as above. """
if (not url) or (not url.loc):
return None
if self._wildcard:
if fnmatch.fnmatchcase(url.loc, self._wildcard):
return self._pass
return None
if self._regexp:
if self._regexp.search(url.loc):
return self._pass
return None
assert False # unreachable
#end def Apply
#end class Filter
class InputURL:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a single URL, manually specified in the config file.
"""
def __init__(self, attributes):
self._url = None # The lonely URL
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return
url = URL()
for attr in attributes.keys():
if attr == 'href':
url.TrySetAttribute('loc', attributes[attr])
else:
url.TrySetAttribute(attr, attributes[attr])
if not url.loc:
output.Error('Url entries must have an href attribute.')
return
self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if self._url:
consumer(self._url, True)
#end def ProduceURLs
#end class InputURL
class InputURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of URLs
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on space
url = URL()
cols = line.split(' ')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
url.TrySetAttribute('loc', cols[0])
# Extract attributes from the other columns
for i in range(1,len(cols)):
if cols[i]:
try:
(attr_name, attr_val) = cols[i].split('=', 1)
url.TrySetAttribute(attr_name, attr_val)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputURLList
class InputDirectory:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a directory that acts as base for walking the filesystem.
"""
def __init__(self, attributes, base_url):
self._path = None # The directory
self._url = None # The URL equivelant
self._default_file = None
if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
'default_file')):
return
# Prep the path -- it MUST end in a sep
path = attributes.get('path')
if not path:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
path = encoder.MaybeNarrowPath(path)
if not path.endswith(os.sep):
path = path + os.sep
if not os.path.isdir(path):
output.Error('Can not locate directory: %s' % path)
return
# Prep the URL -- it MUST end in a sep
url = attributes.get('url')
if not url:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
url = URL.Canonicalize(url)
if not url.endswith('/'):
url = url + '/'
if not url.startswith(base_url):
url = urlparse.urljoin(base_url, url)
if not url.startswith(base_url):
output.Error('The directory URL "%s" is not relative to the '
'base_url: %s' % (url, base_url))
return
# Prep the default file -- it MUST be just a filename
file = attributes.get('default_file')
if file:
file = encoder.MaybeNarrowPath(file)
if os.sep in file:
output.Error('The default_file "%s" can not include path information.'
% file)
file = None
self._path = path
self._url = url
self._default_file = file
if file:
output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
% (path, url, file), 2)
else:
output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
% (path, url), 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if not self._path:
return
root_path = self._path
root_URL = self._url
root_file = self._default_file
def PerFile(dirpath, name):
"""
Called once per file.
Note that 'name' will occasionally be None -- for a directory itself
"""
# Pull a timestamp
url = URL()
isdir = False
try:
if name:
path = os.path.join(dirpath, name)
else:
path = dirpath
isdir = os.path.isdir(path)
time = None
if isdir and root_file:
file = os.path.join(path, root_file)
try:
time = os.stat(file)[stat.ST_MTIME];
except OSError:
pass
if not time:
time = os.stat(path)[stat.ST_MTIME];
url.lastmod = TimestampISO8601(time)
except OSError:
pass
except ValueError:
pass
# Build a URL
middle = dirpath[len(root_path):]
if os.sep != '/':
middle = middle.replace(os.sep, '/')
if middle:
middle = middle + '/'
if name:
middle = middle + name
if isdir:
middle = middle + '/'
url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
# Suppress default files. (All the way down here so we can log it.)
if name and (root_file == name):
url.Log(prefix='IGNORED (default file)', level=2)
return
consumer(url, False)
#end def PerFile
def PerDirectory(ignore, dirpath, namelist):
"""
Called once per directory with a list of all the contained files/dirs.
"""
ignore = ignore # Avoid warnings of an unused parameter
if not dirpath.startswith(root_path):
output.Warn('Unable to decide what the root path is for directory: '
'%s' % dirpath)
return
for name in namelist:
PerFile(dirpath, name)
#end def PerDirectory
output.Log('Walking DIRECTORY "%s"' % self._path, 1)
PerFile(self._path, None)
os.path.walk(self._path, PerDirectory, None)
#end def ProduceURLs
#end class InputDirectory
class InputAccessLog:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles access logs. It's non-trivial in that we want to
auto-detect log files in the Common Logfile Format (as used by Apache,
for instance) and the Extended Log File Format (as used by IIS, for
instance).
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._is_elf = False # Extended Log File Format?
self._is_clf = False # Common Logfile Format?
self._elf_status = -1 # ELF field: '200'
self._elf_method = -1 # ELF field: 'HEAD'
self._elf_uri = -1 # ELF field: '/foo?bar=1'
self._elf_urifrag1 = -1 # ELF field: '/foo'
self._elf_urifrag2 = -1 # ELF field: 'bar=1'
if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Accesslog entries must have a "path" attribute.')
#end def __init__
def RecognizeELFLine(self, line):
""" Recognize the Fields directive that heads an ELF file """
if not line.startswith('#Fields:'):
return False
fields = line.split(' ')
del fields[0]
for i in range(0, len(fields)):
field = fields[i].strip()
if field == 'sc-status':
self._elf_status = i
elif field == 'cs-method':
self._elf_method = i
elif field == 'cs-uri':
self._elf_uri = i
elif field == 'cs-uri-stem':
self._elf_urifrag1 = i
elif field == 'cs-uri-query':
self._elf_urifrag2 = i
output.Log('Recognized an Extended Log File Format file.', 2)
return True
#end def RecognizeELFLine
def GetELFLine(self, line):
""" Fetch the requested URL from an ELF line """
fields = line.split(' ')
count = len(fields)
# Verify status was Ok
if self._elf_status >= 0:
if self._elf_status >= count:
return None
if not fields[self._elf_status].strip() == '200':
return None
# Verify method was HEAD or GET
if self._elf_method >= 0:
if self._elf_method >= count:
return None
if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
return None
# Pull the full URL if we can
if self._elf_uri >= 0:
if self._elf_uri >= count:
return None
url = fields[self._elf_uri].strip()
if url != '-':
return url
# Put together a fragmentary URL
if self._elf_urifrag1 >= 0:
if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
return None
urlfrag1 = fields[self._elf_urifrag1].strip()
urlfrag2 = None
if self._elf_urifrag2 >= 0:
urlfrag2 = fields[self._elf_urifrag2]
if urlfrag1 and (urlfrag1 != '-'):
if urlfrag2 and (urlfrag2 != '-'):
urlfrag1 = urlfrag1 + '?' + urlfrag2
return urlfrag1
return None
#end def GetELFLine
def RecognizeCLFLine(self, line):
""" Try to tokenize a logfile line according to CLF pattern and see if
it works. """
match = ACCESSLOG_CLF_PATTERN.match(line)
recognize = match and (match.group(1) in ('HEAD', 'GET'))
if recognize:
output.Log('Recognized a Common Logfile Format file.', 2)
return recognize
#end def RecognizeCLFLine
def GetCLFLine(self, line):
""" Fetch the requested URL from a CLF line """
match = ACCESSLOG_CLF_PATTERN.match(line)
if match:
request = match.group(1)
if request in ('HEAD', 'GET'):
return match.group(2)
return None
#end def GetCLFLine
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
if not file:
return
# Iterate lines
for line in file.readlines():
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
# If we don't know the format yet, try them both
if (not self._is_clf) and (not self._is_elf):
self._is_elf = self.RecognizeELFLine(line)
self._is_clf = self.RecognizeCLFLine(line)
# Digest the line
match = None
if self._is_elf:
match = self.GetELFLine(line)
elif self._is_clf:
match = self.GetCLFLine(line)
if not match:
continue
# Pass it on
url = URL()
url.TrySetAttribute('loc', match)
consumer(url, True)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputAccessLog
class InputSitemap(xml.sax.handler.ContentHandler):
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles Sitemap files and Sitemap index files. For the sake
of simplicity in design (and simplicity in interfacing with the SAX
package), we do not handle these at the same time, recursively. Instead
we read an index file completely and make a list of Sitemap files, then
go back and process each Sitemap.
"""
class _ContextBase(object):
"""Base class for context handlers in our SAX processing. A context
handler is a class that is responsible for understanding one level of
depth in the XML schema. The class knows what sub-tags are allowed,
and doing any processing specific for the tag we're in.
This base class is the API filled in by specific context handlers,
all defined below.
"""
def __init__(self, subtags):
"""Initialize with a sequence of the sub-tags that would be valid in
this context."""
self._allowed_tags = subtags # Sequence of sub-tags we can have
self._last_tag = None # Most recent seen sub-tag
#end def __init__
def AcceptTag(self, tag):
"""Returns True iff opening a sub-tag is valid in this context."""
valid = tag in self._allowed_tags
if valid:
self._last_tag = tag
else:
self._last_tag = None
return valid
#end def AcceptTag
def AcceptText(self, text):
"""Returns True iff a blurb of text is valid in this context."""
return False
#end def AcceptText
def Open(self):
"""The context is opening. Do initialization."""
pass
#end def Open
def Close(self):
"""The context is closing. Return our result, if any."""
pass
#end def Close
def Return(self, result):
"""We're returning to this context after handling a sub-tag. This
method is called with the result data from the sub-tag that just
closed. Here in _ContextBase, if we ever see a result it means
the derived child class forgot to override this method."""
if result:
raise NotImplementedError
#end def Return
#end class _ContextBase
class _ContextUrlSet(_ContextBase):
"""Context handler for the document node in a Sitemap."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ('url',))
#end def __init__
#end class _ContextUrlSet
class _ContextUrl(_ContextBase):
"""Context handler for a URL node in a Sitemap."""
def __init__(self, consumer):
"""Initialize this context handler with the callable consumer that
wants our URLs."""
InputSitemap._ContextBase.__init__(self, URL.__slots__)
self._url = None # The URL object we're building
self._consumer = consumer # Who wants to consume it
#end def __init__
def Open(self):
"""Initialize the URL."""
assert not self._url
self._url = URL()
#end def Open
def Close(self):
"""Pass the URL to the consumer and reset it to None."""
assert self._url
self._consumer(self._url, False)
self._url = None
#end def Close
def Return(self, result):
"""A value context has closed, absorb the data it gave us."""
assert self._url
if result:
self._url.TrySetAttribute(self._last_tag, result)
#end def Return
#end class _ContextUrl
class _ContextSitemapIndex(_ContextBase):
"""Context handler for the document node in an index file."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ('sitemap',))
self._loclist = [] # List of accumulated Sitemap URLs
#end def __init__
def Open(self):
"""Just a quick verify of state."""
assert not self._loclist
#end def Open
def Close(self):
"""Return our list of accumulated URLs."""
if self._loclist:
temp = self._loclist
self._loclist = []
return temp
#end def Close
def Return(self, result):
"""Getting a new loc URL, add it to the collection."""
if result:
self._loclist.append(result)
#end def Return
#end class _ContextSitemapIndex
class _ContextSitemap(_ContextBase):
"""Context handler for a Sitemap entry in an index file."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ('loc', 'lastmod'))
self._loc = None # The URL to the Sitemap
#end def __init__
def Open(self):
"""Just a quick verify of state."""
assert not self._loc
#end def Open
def Close(self):
"""Return our URL to our parent."""
if self._loc:
temp = self._loc
self._loc = None
return temp
output.Warn('In the Sitemap index file, a "sitemap" entry had no "loc".')
#end def Close
def Return(self, result):
"""A value has closed. If it was a 'loc', absorb it."""
if result and (self._last_tag == 'loc'):
self._loc = result
#end def Return
#end class _ContextSitemap
class _ContextValue(_ContextBase):
"""Context handler for a single value. We return just the value. The
higher level context has to remember what tag led into us."""
def __init__(self):
InputSitemap._ContextBase.__init__(self, ())
self._text = None
#end def __init__
def AcceptText(self, text):
"""Allow all text, adding it to our buffer."""
if self._text:
self._text = self._text + text
else:
self._text = text
return True
#end def AcceptText
def Open(self):
"""Initialize our buffer."""
self._text = None
#end def Open
def Close(self):
"""Return what's in our buffer."""
text = self._text
self._text = None
if text:
text = text.strip()
return text
#end def Close
#end class _ContextValue
def __init__(self, attributes):
"""Initialize with a dictionary of attributes from our entry in the
config file."""
xml.sax.handler.ContentHandler.__init__(self)
self._pathlist = None # A list of files
self._current = -1 # Current context in _contexts
self._contexts = None # The stack of contexts we allow
self._contexts_idx = None # ...contexts for index files
self._contexts_stm = None # ...contexts for Sitemap files
if not ValidateAttributes('SITEMAP', attributes, ['path']):
return
# Init the first file path
path = attributes.get('path')
if path:
path = encoder.MaybeNarrowPath(path)
if os.path.isfile(path):
output.Log('Input: From SITEMAP "%s"' % path, 2)
self._pathlist = [path]
else:
output.Error('Can not locate file "%s"' % path)
else:
output.Error('Sitemap entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
"""In general: Produces URLs from our data source, hand them to the
callable consumer.
In specific: Iterate over our list of paths and delegate the actual
processing to helper methods. This is a complexity no other data source
needs to suffer. We are unique in that we can have files that tell us
to bring in other files.
Note the decision to allow an index file or not is made in this method.
If we call our parser with (self._contexts == None) the parser will
grab whichever context stack can handle the file. IE: index is allowed.
If instead we set (self._contexts = ...) before parsing, the parser
will only use the stack we specify. IE: index not allowed.
"""
# Set up two stacks of contexts
self._contexts_idx = [InputSitemap._ContextSitemapIndex(),
InputSitemap._ContextSitemap(),
InputSitemap._ContextValue()]
self._contexts_stm = [InputSitemap._ContextUrlSet(),
InputSitemap._ContextUrl(consumer),
InputSitemap._ContextValue()]
# Process the first file
assert self._pathlist
path = self._pathlist[0]
self._contexts = None # We allow an index file here
self._ProcessFile(path)
# Iterate over remaining files
self._contexts = self._contexts_stm # No index files allowed
for path in self._pathlist[1:]:
self._ProcessFile(path)
#end def ProduceURLs
def _ProcessFile(self, path):
"""Do per-file reading/parsing/consuming for the file path passed in."""
assert path
# Open our file
(frame, file) = OpenFileForRead(path, 'SITEMAP')
if not file:
return
# Rev up the SAX engine
try:
self._current = -1
xml.sax.parse(file, self)
except SchemaError:
output.Error('An error in file "%s" made us abort reading the Sitemap.'
% path)
except IOError:
output.Error('Cannot read from file "%s"' % path)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the file "%s" (line %d, column %d): %s' %
(path, e._linenum, e._colnum, e.getMessage()))
# Clean up
file.close()
if frame:
frame.close()
#end def _ProcessFile
def _MungeLocationListIntoFiles(self, urllist):
"""Given a list of URLs, munge them into our self._pathlist property.
We do this by assuming all the files live in the same directory as
the first file in the existing pathlist. That is, we assume a
Sitemap index points to Sitemaps only in the same directory. This
is not true in general, but will be true for any output produced
by this script.
"""
assert self._pathlist
path = self._pathlist[0]
path = os.path.normpath(path)
dir = os.path.dirname(path)
wide = False
if type(path) == types.UnicodeType:
wide = True
for url in urllist:
url = URL.Canonicalize(url)
output.Log('Index points to Sitemap file at: %s' % url, 2)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url)
file = os.path.basename(path)
file = urllib.unquote(file)
if wide:
file = encoder.WidenText(file)
if dir:
file = dir + os.sep + file
if file:
self._pathlist.append(file)
output.Log('Will attempt to read Sitemap file: %s' % file, 1)
#end def _MungeLocationListIntoFiles
def startElement(self, tag, attributes):
"""SAX processing, called per node in the config stream.
As long as the new tag is legal in our current context, this
becomes an Open call on one context deeper.
"""
# If this is the document node, we may have to look for a context stack
if (self._current < 0) and not self._contexts:
assert self._contexts_idx and self._contexts_stm
if tag == 'urlset':
self._contexts = self._contexts_stm
elif tag == 'sitemapindex':
self._contexts = self._contexts_idx
output.Log('File is a Sitemap index.', 2)
else:
output.Error('The document appears to be neither a Sitemap nor a '
'Sitemap index.')
raise SchemaError
# Display a kinder error on a common mistake
if (self._current < 0) and (self._contexts == self._contexts_stm) and (
tag == 'sitemapindex'):
output.Error('A Sitemap index can not refer to another Sitemap index.')
raise SchemaError
# Verify no unexpected attributes
if attributes:
text = ''
for attr in attributes.keys():
# The document node will probably have namespaces
if self._current < 0:
if attr.find('xmlns') >= 0:
continue
if attr.find('xsi') >= 0:
continue
if text:
text = text + ', '
text = text + attr
if text:
output.Warn('Did not expect any attributes on any tag, instead tag '
'"%s" had attributes: %s' % (tag, text))
# Switch contexts
if (self._current < 0) or (self._contexts[self._current].AcceptTag(tag)):
self._current = self._current + 1
assert self._current < len(self._contexts)
self._contexts[self._current].Open()
else:
output.Error('Can not accept tag "%s" where it appears.' % tag)
raise SchemaError
#end def startElement
def endElement(self, tag):
"""SAX processing, called per node in the config stream.
This becomes a call to Close on one context followed by a call
to Return on the previous.
"""
tag = tag # Avoid warning on unused argument
assert self._current >= 0
retval = self._contexts[self._current].Close()
self._current = self._current - 1
if self._current >= 0:
self._contexts[self._current].Return(retval)
elif retval and (self._contexts == self._contexts_idx):
self._MungeLocationListIntoFiles(retval)
#end def endElement
def characters(self, text):
"""SAX processing, called when text values are read. Important to
note that one single text value may be split across multiple calls
of this method.
"""
if (self._current < 0) or (
not self._contexts[self._current].AcceptText(text)):
if text.strip():
output.Error('Can not accept text "%s" where it appears.' % text)
raise SchemaError
#end def characters
#end class InputSitemap
class FilePathGenerator:
"""
This class generates filenames in a series, upon request.
You can request any iteration number at any time, you don't
have to go in order.
Example of iterations for '/path/foo.xml.gz':
0 --> /path/foo.xml.gz
1 --> /path/foo1.xml.gz
2 --> /path/foo2.xml.gz
_index.xml --> /path/foo_index.xml
"""
def __init__(self):
self.is_gzip = False # Is this a GZIP file?
self._path = None # '/path/'
self._prefix = None # 'foo'
self._suffix = None # '.xml.gz'
#end def __init__
def Preload(self, path):
""" Splits up a path into forms ready for recombination. """
path = encoder.MaybeNarrowPath(path)
# Get down to a base name
path = os.path.normpath(path)
base = os.path.basename(path).lower()
if not base:
output.Error('Couldn\'t parse the file path: %s' % path)
return False
lenbase = len(base)
# Recognize extension
lensuffix = 0
compare_suffix = ['.xml', '.xml.gz', '.gz']
for suffix in compare_suffix:
if base.endswith(suffix):
lensuffix = len(suffix)
break
if not lensuffix:
output.Error('The path "%s" doesn\'t end in a supported file '
'extension.' % path)
return False
self.is_gzip = suffix.endswith('.gz')
# Split the original path
lenpath = len(path)
self._path = path[:lenpath-lenbase]
self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
self._suffix = path[lenpath-lensuffix:]
return True
#end def Preload
def GeneratePath(self, instance):
""" Generates the iterations, as described above. """
prefix = self._path + self._prefix
if type(instance) == types.IntType:
if instance:
return '%s%d%s' % (prefix, instance, self._suffix)
return prefix + self._suffix
return prefix + instance
#end def GeneratePath
def GenerateURL(self, instance, root_url):
""" Generates iterations, but as a URL instead of a path. """
prefix = root_url + self._prefix
retval = None
if type(instance) == types.IntType:
if instance:
retval = '%s%d%s' % (prefix, instance, self._suffix)
else:
retval = prefix + self._suffix
else:
retval = prefix + instance
return URL.Canonicalize(retval)
#end def GenerateURL
def GenerateWildURL(self, root_url):
""" Generates a wildcard that should match all our iterations """
prefix = URL.Canonicalize(root_url + self._prefix)
temp = URL.Canonicalize(prefix + self._suffix)
suffix = temp[len(prefix):]
return prefix + '*' + suffix
#end def GenerateURL
#end class FilePathGenerator
class PerURLStatistics:
""" Keep track of some simple per-URL statistics, like file extension. """
def __init__(self):
self._extensions = {} # Count of extension instances
#end def __init__
def Consume(self, url):
""" Log some stats for the URL. At the moment, that means extension. """
if url and url.loc:
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
if not path:
return
# Recognize directories
if path.endswith('/'):
if self._extensions.has_key('/'):
self._extensions['/'] = self._extensions['/'] + 1
else:
self._extensions['/'] = 1
return
# Strip to a filename
i = path.rfind('/')
if i >= 0:
assert i < len(path)
path = path[i:]
# Find extension
i = path.rfind('.')
if i > 0:
assert i < len(path)
ext = path[i:].lower()
if self._extensions.has_key(ext):
self._extensions[ext] = self._extensions[ext] + 1
else:
self._extensions[ext] = 1
else:
if self._extensions.has_key('(no extension)'):
self._extensions['(no extension)'] = self._extensions[
'(no extension)'] + 1
else:
self._extensions['(no extension)'] = 1
#end def Consume
def Log(self):
""" Dump out stats to the output. """
if len(self._extensions):
output.Log('Count of file extensions on URLs:', 1)
set = self._extensions.keys()
set.sort()
for ext in set:
output.Log(' %7d %s' % (self._extensions[ext], ext), 1)
#end def Log
class Sitemap(xml.sax.handler.ContentHandler):
"""
This is the big workhorse class that processes your inputs and spits
out sitemap files. It is built as a SAX handler for set up purposes.
That is, it processes an XML stream to bring itself up.
"""
def __init__(self, suppress_notify):
xml.sax.handler.ContentHandler.__init__(self)
self._filters = [] # Filter objects
self._inputs = [] # Input objects
self._urls = {} # Maps URLs to count of dups
self._set = [] # Current set of URLs
self._filegen = None # Path generator for output files
self._wildurl1 = None # Sitemap URLs to filter out
self._wildurl2 = None # Sitemap URLs to filter out
self._sitemaps = 0 # Number of output files
# We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
self._dup_max = 2 # Max number of duplicate URLs
self._stat = PerURLStatistics() # Some simple stats
self._in_site = False # SAX: are we in a Site node?
self._in_Site_ever = False # SAX: were we ever in a Site?
self._default_enc = None # Best encoding to try on URLs
self._base_url = None # Prefix to all valid URLs
self._store_into = None # Output filepath
self._suppress = suppress_notify # Suppress notify of servers
#end def __init__
def ValidateBasicConfig(self):
""" Verifies (and cleans up) the basic user-configurable options. """
all_good = True
if self._default_enc:
encoder.SetUserEncoding(self._default_enc)
# Canonicalize the base_url
if all_good and not self._base_url:
output.Error('A site needs a "base_url" attribute.')
all_good = False
if all_good and not URL.IsAbsolute(self._base_url):
output.Error('The "base_url" must be absolute, not relative: %s' %
self._base_url)
all_good = False
if all_good:
self._base_url = URL.Canonicalize(self._base_url)
if not self._base_url.endswith('/'):
self._base_url = self._base_url + '/'
output.Log('BaseURL is set to: %s' % self._base_url, 2)
# Load store_into into a generator
if all_good:
if self._store_into:
self._filegen = FilePathGenerator()
if not self._filegen.Preload(self._store_into):
all_good = False
else:
output.Error('A site needs a "store_into" attribute.')
all_good = False
# Ask the generator for patterns on what its output will look like
if all_good:
self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
self._base_url)
# Unify various forms of False
if all_good:
if self._suppress:
if (type(self._suppress) == types.StringType) or (type(self._suppress)
== types.UnicodeType):
if (self._suppress == '0') or (self._suppress.lower() == 'false'):
self._suppress = False
# Done
if not all_good:
output.Log('See "example_config.xml" for more information.', 0)
return all_good
#end def ValidateBasicConfig
def Generate(self):
""" Run over all the Inputs and ask them to Produce """
# Run the inputs
for input in self._inputs:
input.ProduceURLs(self.ConsumeURL)
# Do last flushes
if len(self._set):
self.FlushSet()
if not self._sitemaps:
output.Warn('No URLs were recorded, writing an empty sitemap.')
self.FlushSet()
# Write an index as needed
if self._sitemaps > 1:
self.WriteIndex()
# Notify
self.NotifySearch()
# Dump stats
self._stat.Log()
#end def Generate
def ConsumeURL(self, url, allow_fragment):
"""
All per-URL processing comes together here, regardless of Input.
Here we run filters, remove duplicates, spill to disk as needed, etc.
"""
if not url:
return
# Validate
if not url.Validate(self._base_url, allow_fragment):
return
# Run filters
accept = None
for filter in self._filters:
accept = filter.Apply(url)
if accept != None:
break
if not (accept or (accept == None)):
url.Log(prefix='FILTERED', level=2)
return
# Ignore our out output URLs
if fnmatch.fnmatchcase(url.loc, self._wildurl1) or fnmatch.fnmatchcase(
url.loc, self._wildurl2):
url.Log(prefix='IGNORED (output file)', level=2)
return
# Note the sighting
hash = url.MakeHash()
if self._urls.has_key(hash):
dup = self._urls[hash]
if dup > 0:
dup = dup + 1
self._urls[hash] = dup
if self._dup_max < dup:
self._dup_max = dup
url.Log(prefix='DUPLICATE')
return
# Acceptance -- add to set
self._urls[hash] = 1
self._set.append(url)
self._stat.Consume(url)
url.Log()
# Flush the set if needed
if len(self._set) >= MAXURLS_PER_SITEMAP:
self.FlushSet()
#end def ConsumeURL
def FlushSet(self):
"""
Flush the current set of URLs to the output. This is a little
slow because we like to sort them all and normalize the priorities
before dumping.
"""
# Sort and normalize
output.Log('Sorting and normalizing collected URLs.', 1)
self._set.sort()
for url in self._set:
hash = url.MakeHash()
dup = self._urls[hash]
if dup > 0:
self._urls[hash] = -1
if not url.priority:
url.priority = '%.4f' % (float(dup) / float(self._dup_max))
# Get the filename we're going to write to
filename = self._filegen.GeneratePath(self._sitemaps)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output filename.')
self._sitemaps = self._sitemaps + 1
output.Log('Writing Sitemap file "%s" with %d URLs' %
(filename, len(self._set)), 1)
# Write to it
frame = None
file = None
try:
if self._filegen.is_gzip:
basename = os.path.basename(filename);
frame = open(filename, 'wb')
file = gzip.GzipFile(fileobj=frame, filename=basename, mode='wt')
else:
file = open(filename, 'wt')
file.write(SITEMAP_HEADER)
for url in self._set:
url.WriteXML(file)
file.write(SITEMAP_FOOTER)
file.close()
if frame:
frame.close()
frame = None
file = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
# Flush
self._set = []
#end def FlushSet
def WriteIndex(self):
""" Write the master index of all Sitemap files """
# Make a filename
filename = self._filegen.GeneratePath(SITEINDEX_SUFFIX)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output index filename.')
output.Log('Writing index file "%s" with %d Sitemaps' %
(filename, self._sitemaps), 1)
# Make a lastmod time
lastmod = TimestampISO8601(time.time())
# Write to it
try:
fd = open(filename, 'wt')
fd.write(SITEINDEX_HEADER)
for mapnumber in range(0,self._sitemaps):
# Write the entry
mapurl = self._filegen.GenerateURL(mapnumber, self._base_url)
mapattributes = { 'loc' : mapurl, 'lastmod' : lastmod }
fd.write(SITEINDEX_ENTRY % mapattributes)
fd.write(SITEINDEX_FOOTER)
fd.close()
fd = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
#end def WriteIndex
def NotifySearch(self):
""" Send notification of the new Sitemap(s) to the search engines. """
if self._suppress:
output.Log('Search engine notification is suppressed.', 1)
return
output.Log('Notifying search engines.', 1)
# Override the urllib's opener class with one that doesn't ignore 404s
class ExceptionURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
output.Log('HTTP error %d: %s' % (errcode, errmsg), 2)
raise IOError
#end def http_error_default
#end class ExceptionURLOpener
old_opener = urllib._urlopener
urllib._urlopener = ExceptionURLopener()
# Build the URL we want to send in
if self._sitemaps > 1:
url = self._filegen.GenerateURL(SITEINDEX_SUFFIX, self._base_url)
else:
url = self._filegen.GenerateURL(0, self._base_url)
# Test if we can hit it ourselves
try:
u = urllib.urlopen(url)
u.close()
except IOError:
output.Error('When attempting to access our generated Sitemap at the '
'following URL:\n %s\n we failed to read it. Please '
'verify the store_into path you specified in\n'
' your configuration file is web-accessable. Consult '
'the FAQ for more\n information.' % url)
output.Warn('Proceeding to notify with an unverifyable URL.')
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
output.Log('Notifying: %s' % ping[1], 1)
output.Log('Notification URL: %s' % notify, 2)
try:
u = urllib.urlopen(notify)
u.read()
u.close()
except IOError:
output.Warn('Cannot contact: %s' % ping[1])
if old_opener:
urllib._urlopener = old_opener
#end def NotifySearch
def startElement(self, tag, attributes):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
if self._in_site:
output.Error('Can not nest Site entries in the configuration.')
else:
self._in_site = True
if not ValidateAttributes('SITE', attributes,
('verbose', 'default_encoding', 'base_url', 'store_into',
'suppress_search_engine_notify')):
return
verbose = attributes.get('verbose', 0)
if verbose:
output.SetVerbose(verbose)
self._default_enc = attributes.get('default_encoding')
self._base_url = attributes.get('base_url')
self._store_into = attributes.get('store_into')
if not self._suppress:
self._suppress = attributes.get('suppress_search_engine_notify',
False)
self.ValidateBasicConfig()
elif tag == 'filter':
self._filters.append(Filter(attributes))
elif tag == 'url':
self._inputs.append(InputURL(attributes))
elif tag == 'urllist':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputURLList(attributeset))
elif tag == 'directory':
self._inputs.append(InputDirectory(attributes, self._base_url))
elif tag == 'accesslog':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputAccessLog(attributeset))
elif tag == 'sitemap':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputSitemap(attributeset))
else:
output.Error('Unrecognized tag in the configuration: %s' % tag)
#end def startElement
def endElement(self, tag):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
assert self._in_site
self._in_site = False
self._in_site_ever = True
#end def endElement
def endDocument(self):
""" End of SAX, verify we can proceed. """
if not self._in_site_ever:
output.Error('The configuration must specify a "site" element.')
else:
if not self._inputs:
output.Warn('There were no inputs to generate a sitemap from.')
#end def endDocument
#end class Sitemap
def ValidateAttributes(tag, attributes, goodattributes):
""" Makes sure 'attributes' does not contain any attribute not
listed in 'goodattributes' """
all_good = True
for attr in attributes.keys():
if not attr in goodattributes:
output.Error('Unknown %s attribute: %s' % (tag, attr))
all_good = False
return all_good
#end def ValidateAttributes
def ExpandPathAttribute(src, attrib):
""" Given a dictionary of attributes, return a list of dictionaries
with all the same attributes except for the one named attrib.
That one, we treat as a file path and expand into all its possible
variations. """
# Do the path expansion. On any error, just return the source dictionary.
path = src.get(attrib)
if not path:
return [src]
path = encoder.MaybeNarrowPath(path);
pathlist = glob.glob(path)
if not pathlist:
return [src]
# If this isn't actually a dictionary, make it one
if type(src) != types.DictionaryType:
tmp = {}
for key in src.keys():
tmp[key] = src[key]
src = tmp
# Create N new dictionaries
retval = []
for path in pathlist:
dst = src.copy()
dst[attrib] = path
retval.append(dst)
return retval
#end def ExpandPathAttribute
def OpenFileForRead(path, logtext):
""" Opens a text file, be it GZip or plain """
frame = None
file = None
if not path:
return (frame, file)
try:
if path.endswith('.gz'):
frame = open(path, 'rb')
file = gzip.GzipFile(fileobj=frame, mode='rt')
else:
file = open(path, 'rt')
if logtext:
output.Log('Opened %s file: %s' % (logtext, path), 1)
else:
output.Log('Opened file: %s' % path, 1)
except IOError:
output.Error('Can not open file: %s' % path)
return (frame, file)
#end def OpenFileForRead
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
#end def TimestampISO8601
def CreateSitemapFromFile(configpath, suppress_notify):
""" Sets up a new Sitemap object from the specified configuration file. """
# Remember error count on the way in
num_errors = output.num_errors
# Rev up SAX to parse the config
sitemap = Sitemap(suppress_notify)
try:
output.Log('Reading configuration file: %s' % configpath, 0)
xml.sax.parse(configpath, sitemap)
except IOError:
output.Error('Cannot read configuration file: %s' % configpath)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the config file (line %d, column %d): %s' %
(e._linenum, e._colnum, e.getMessage()))
except xml.sax._exceptions.SAXReaderNotAvailable:
output.Error('Some installs of Python 2.2 did not include complete support'
' for XML.\n Please try upgrading your version of Python'
' and re-running the script.')
# If we added any errors, return no sitemap
if num_errors == output.num_errors:
return sitemap
return None
#end def CreateSitemapFromFile
def ProcessCommandFlags(args):
"""
Parse command line flags per specified usage, pick off key, value pairs
All flags of type "--key=value" will be processed as __flags[key] = value,
"--option" will be processed as __flags[option] = option
"""
flags = {}
rkeyval = '--(?P<key>\S*)[=](?P<value>\S*)' # --key=val
roption = '--(?P<option>\S*)' # --key
r = '(' + rkeyval + ')|(' + roption + ')'
rc = re.compile(r)
for a in args:
try:
rcg = rc.search(a).groupdict()
if rcg.has_key('key'):
flags[rcg['key']] = rcg['value']
if rcg.has_key('option'):
flags[rcg['option']] = rcg['option']
except AttributeError:
return None
return flags
#end def ProcessCommandFlags
#
# __main__
#
if __name__ == '__main__':
flags = ProcessCommandFlags(sys.argv[1:])
if not flags or not flags.has_key('config') or flags.has_key('help'):
output.Log(__usage__, 0)
else:
suppress_notify = flags.has_key('testing')
sitemap = CreateSitemapFromFile(flags['config'], suppress_notify)
if not sitemap:
output.Log('Configuration file errors -- exiting.', 0)
else:
sitemap.Generate()
output.Log('Number of errors: %d' % output.num_errors, 1)
output.Log('Number of warnings: %d' % output.num_warns, 1)
| mit |
kjedruczyk/phabricator-tools | py/abd/abdt_differresultcache__t.py | 2 | 4813 | """Test suite for abdt_differresultcache."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [ ]
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_Breathing
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import unittest
import phlgitu_fixture
import phlgitx_refcache
import abdt_differ
import abdt_differresultcache
import abdt_exception
class _BreakableRepoUsedError(Exception):
pass
class _BreakableRepo(object):
def __init__(self, repo):
super(_BreakableRepo, self).__init__()
self._repo = repo
self._is_enabled = True
def __call__(self, *args):
if self._is_enabled:
return self._repo(*args)
else:
raise _BreakableRepoUsedError(str(args))
@contextlib.contextmanager
def disabled_context(self):
self._is_enabled = False
try:
yield
finally:
self._is_enabled = True
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_A_Breathing(self):
with phlgitu_fixture.lone_worker_context() as worker:
branch_name = 'diff_branch'
breakable_repo = _BreakableRepo(worker.repo)
refcache_repo = phlgitx_refcache.Repo(breakable_repo)
differ = abdt_differresultcache.Cache(refcache_repo)
worker.repo('checkout', '-b', branch_name)
def make_diff(max_bytes):
return differ.checkout_make_raw_diff(
"refs/heads/master",
"refs/heads/{}".format(branch_name),
max_bytes)
# make sure that the repo raises if used when disabled
with self.assertRaises(_BreakableRepoUsedError):
with breakable_repo.disabled_context():
with self.assertRaises(abdt_differ.NoDiffError):
make_diff(1)
# An empty diff raises abdt_differ.NoDiffError
with self.assertRaises(abdt_differ.NoDiffError):
make_diff(1)
# An empty diff raises abdt_differ.NoDiffError again, this time the
# refcache_repo won't be used to do diffing so it won't need to
# reset the cache
with self.assertRaises(abdt_differ.NoDiffError):
make_diff(1)
# An empty diff raises abdt_differ.NoDiffError again
# make sure that the repo isn't used at all, by disabling it
with breakable_repo.disabled_context():
with self.assertRaises(abdt_differ.NoDiffError):
make_diff(1)
# the differ will detach HEAD so attach to branch again before
# making any more commits on the branch
worker.repo('checkout', branch_name)
worker.commit_new_file(
"make a test diff", "newfile", "test content")
# a diff within the limits passes straight through
diff_result = make_diff(1000)
self.assertIn("test content", diff_result.diff)
# a diff within the limits passes straight through again
diff_result = make_diff(1000)
self.assertIn("test content", diff_result.diff)
# raise if a diff cannot be reduced to the limits
with self.assertRaises(abdt_exception.LargeDiffException):
make_diff(1)
# raise if a diff cannot be reduced to the limits again
with self.assertRaises(abdt_exception.LargeDiffException):
make_diff(1)
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 |
gerashegalov/Impala | tests/query_test/test_hdfs_caching.py | 13 | 8247 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Validates limit on scan nodes
#
import logging
import os
import pytest
from copy import copy
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import *
from tests.common.test_vector import *
from tests.common.impala_cluster import ImpalaCluster
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIfS3, SkipIfIsilon
from tests.util.shell_util import exec_process
# End to end test that hdfs caching is working.
@SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
@SkipIfIsilon.caching
class TestHdfsCaching(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCaching, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('exec_option')['batch_size'] == 0)
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == "text")
# The tpch nation table is cached as part of data loading. We'll issue a query
# against this table and verify the metric is updated correctly.
@pytest.mark.execute_serially
def test_table_is_cached(self, vector):
cached_read_metric = "impala-server.io-mgr.cached-bytes-read"
query_string = "select count(*) from tpch.nation"
expected_bytes_delta = 2199
impala_cluster = ImpalaCluster()
# Collect the cached read metric on all the impalads before running the query
cached_bytes_before = list()
for impalad in impala_cluster.impalads:
cached_bytes_before.append(impalad.service.get_metric_value(cached_read_metric))
# Execute the query.
result = self.execute_query(query_string)
assert(len(result.data) == 1)
assert(result.data[0] == '25')
# Read the metrics again.
cached_bytes_after = list()
for impalad in impala_cluster.impalads:
cached_bytes_after.append(impalad.service.get_metric_value(cached_read_metric))
# Verify that the cached bytes increased by the expected number on exactly one of
# the impalads.
num_metrics_increased = 0
assert(len(cached_bytes_before) == len(cached_bytes_after))
for i in range(0, len(cached_bytes_before)):
assert(cached_bytes_before[i] == cached_bytes_after[i] or\
cached_bytes_before[i] + expected_bytes_delta == cached_bytes_after[i])
if cached_bytes_after[i] > cached_bytes_before[i]:
num_metrics_increased = num_metrics_increased + 1
if num_metrics_increased != 1:
# Test failed, print the metrics
for i in range(0, len(cached_bytes_before)):
print "%d %d" % (cached_bytes_before[i], cached_bytes_after[i])
assert(False)
def test_cache_cancellation(self, vector):
""" This query runs on some mix of cached and not cached tables. The query has
a limit so it exercises the cancellation paths. Regression test for
IMPALA-1019. """
num_iters = 100
query_string = """
with t1 as (select int_col x, bigint_col y from functional.alltypes limit 2),
t2 as (select int_col x, bigint_col y from functional.alltypestiny limit 2),
t3 as (select int_col x, bigint_col y from functional.alltypessmall limit 2)
select * from t1, t2, t3 where t1.x = t2.x and t2.x = t3.x """
# Run this query for some iterations since it is timing dependent.
for x in xrange(1, num_iters):
result = self.execute_query(query_string)
assert(len(result.data) == 2)
@SkipIfS3.caching
@SkipIfIsilon.caching
class TestHdfsCachingDdl(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCachingDdl, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and \
v.get_value('table_format').compression_codec == 'none')
def setup_method(self, method):
self.cleanup_db("cachedb")
self.client.execute("create database cachedb")
def teardown_method(self, method):
self.cleanup_db("cachedb")
@pytest.mark.execute_serially
def test_caching_ddl(self, vector):
# Get the number of cache requests before starting the test
num_entries_pre = get_num_cache_requests()
self.run_test_case('QueryTest/hdfs-caching', vector)
# After running this test case we should be left with 8 cache requests.
# In this case, 1 for each table + 7 more for each cached partition.
assert num_entries_pre == get_num_cache_requests() - 8
self.client.execute("drop table cachedb.cached_tbl_part")
self.client.execute("drop table cachedb.cached_tbl_nopart")
# Dropping the tables should cleanup cache entries leaving us with the same
# total number of entries
assert num_entries_pre == get_num_cache_requests()
@pytest.mark.execute_serially
def test_cache_reload_validation(self, vector):
"""This is a set of tests asserting that cache directives modified
outside of Impala are picked up after reload, cf IMPALA-1645"""
num_entries_pre = get_num_cache_requests()
create_table = ("create table cachedb.cached_tbl_reload "
"(id int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Access the table once to load the metadata
self.client.execute("select count(*) from cachedb.cached_tbl_reload")
create_table = ("create table cachedb.cached_tbl_reload_part (i int) "
"partitioned by (j int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Add two partitions
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=1)")
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=2)")
assert num_entries_pre + 4 == get_num_cache_requests(), \
"Adding the tables should be reflected by the number of cache directives."
# Modify the cache directive outside of Impala and reload the table to verify
# that changes are visible
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload")
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part")
drop_cache_directives_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=1")
change_cache_directive_repl_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2", 3)
# Create a bogus cached table abusing an existing cache directive ID, IMPALA-1750
dirid = get_cache_directive_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
self.client.execute(("create table cachedb.no_replication_factor (id int) " \
"tblproperties(\"cache_directive_id\"=\"%s\")" % dirid))
self.run_test_case('QueryTest/hdfs-caching-validation', vector)
def drop_cache_directives_for_path(path):
"""Drop the cache directive for a given path"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -removeDirectives -path %s" % path)
assert rc == 0, \
"Error removing cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_cache_directive_for_path(path):
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -path %s" % path)
assert rc == 0
dirid = re.search('^\s+?(\d+)\s+?testPool\s+?.*?$', stdout, re.MULTILINE).group(1)
return dirid
def change_cache_directive_repl_for_path(path, repl):
"""Drop the cache directive for a given path"""
dirid = get_cache_directive_for_path(path)
rc, stdout, stderr = exec_process(
"hdfs cacheadmin -modifyDirective -id %s -replication %s" % (dirid, repl))
assert rc == 0, \
"Error modifying cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_num_cache_requests():
"""Returns the number of outstanding cache requests"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -stats")
assert rc == 0, 'Error executing hdfs cacheadmin: %s %s' % (stdout, stderr)
return len(stdout.split('\n'))
| apache-2.0 |
jeremycline/pulp | repoauth/pulp/repoauth/auth_enabled_validation.py | 16 | 1202 | '''
Logic for checking if the Pulp server is configured to apply *any* repo
authentication. This is meant to be used as a short-circuit validation
to prevent the more costly tests from being run in the case the Pulp server
doesn't care at all about repo authentication.
'''
from ConfigParser import SafeConfigParser
# This needs to be accessible on both Pulp and the CDS instances, so a
# separate config file for repo auth purposes is used.
CONFIG_FILENAME = '/etc/pulp/repo_auth.conf'
# -- framework------------------------------------------------------------------
def authenticate(environ):
'''
Framework hook method.
'''
config = _config()
is_enabled = config.getboolean('main', 'enabled')
is_verbose = config.getboolean('main', 'log_failed_cert_verbose')
if not is_enabled and is_verbose:
environ["wsgi.errors"].write("Repo authentication is not enabled. Skipping all checks.")
# If auth is disabled, return true so the framework assumes a valid user has
# been found and will short-circuit any other validation checks.
return not is_enabled
def _config():
config = SafeConfigParser()
config.read(CONFIG_FILENAME)
return config
| gpl-2.0 |
nvoron23/hue | desktop/core/ext-py/Django-1.6.10/django/core/mail/backends/base.py | 660 | 1164 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
| apache-2.0 |
broadbent/scootplayer | scootplayer/watchdog.py | 1 | 3451 | #!/usr/bin/env python2.7
"""Inspects player behaviour to ensure playback is occuring."""
class Watchdog(object):
"""Aids in debugging issues caused by stalled playback."""
watch_value = 0
watch_count = False
max_seg_duration = 0
run = False
def __init__(self, player):
"""Start thread to wait for max duration to become available."""
self.player = player
self.player.start_thread(self.wait_for_max_seg_duration)
def wait_for_max_seg_duration(self):
"""
Get maximum segment duration of current MPD. If not available, wait and
try again.
When this value is available, start the watchdog thread proper.
"""
try:
self.max_seg_duration = self.player.max_seg_duration()
self.player.start_thread(self.watchdog)
except AttributeError:
self.player.start_timed_thread(1, self.wait_for_max_seg_duration)
def stop(self):
"""Stop watching."""
self.run = False
self.player.event('stop', 'watchdog')
def pause(self):
"""Pause watching."""
self.run = False
def resume(self):
"""Resume watching."""
self.run = True
def watchdog(self):
"""
Periodically monitor playback to ensure that it is progressing.
If playback stops for any reason, dump the current set of objects to
file for analysis and debug.
"""
self.player.start_timed_thread(self.max_seg_duration, self.watchdog)
if self.run:
report = self.player.retrieve_metric('report')
if self.watch_value == 0:
try:
report = self.player.report()
self.watch_value = report['playback_time_position']
except AttributeError:
pass
if self.watch_value == report['playback']['time_position']:
if self.watch_count:
self.player.event('error',
'detected stalled playback')
self._dump()
self.player.exit()
self.watch_count = True
else:
self.watch_count = False
self.watch_value = report['playback']['time_position']
def _dump(self):
"""
Dump each object to file.
Also dumps a list of threads with their statuses.
"""
self.player.create_directory('/dump')
self._dump_object('player', self.player)
self._dump_threads()
def _dump_object(self, title, object_):
"""
Recursively dump each object to file.
Starts with the main player and recurses downwards.
"""
file_ = self.player.open_file('/dump/' + title + '.txt')
for key, value in object_.__dict__.items():
if key in ['representations', 'bandwidth', 'playback_queue',
'download_queue', 'reporter', 'remote_control',
'queue']:
self._dump_object(key, value)
file_.write(key + ',' + str(value) + '\n')
def _dump_threads(self):
"""Dump the name and status of each thread registered in the player."""
file_ = self.player.open_file('/dump/threads.txt')
for thread in self.player.threads:
if thread.isAlive():
file_.write(str(thread) + '\n')
| apache-2.0 |
VirtueSecurity/aws-extender | BappModules/docutils/parsers/rst/languages/__init__.py | 170 | 1085 | # $Id: __init__.py 7648 2013-04-18 07:36:22Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/docs/howto/i18n.html>.
"""
This package contains modules for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils.utils import normalize_language_tag
if sys.version_info < (2,5):
from docutils._compat import __import__
_languages = {}
def get_language(language_code):
for tag in normalize_language_tag(language_code):
tag = tag.replace('-','_') # '-' not valid in module names
if tag in _languages:
return _languages[tag]
try:
module = __import__(tag, globals(), locals(), level=1)
except ImportError:
try:
module = __import__(tag, globals(), locals(), level=0)
except ImportError:
continue
_languages[tag] = module
return module
return None
| mit |
mikeckennedy/python-for-entrepreneurs-course-demos | 17_logging_and_monitoring/final_17_blue_yellow_app_monitoring/blue_yellow_app/controllers/base_controller.py | 1 | 1667 | import logbook
import blue_yellow_app.infrastructure.static_cache as static_cache
import pyramid.renderers
import pyramid.httpexceptions as exc
from blue_yellow_app.infrastructure.supressor import suppress
import blue_yellow_app.infrastructure.cookie_auth as cookie_auth
from blue_yellow_app.services.account_service import AccountService
class BaseController:
def __init__(self, request):
self.request = request
self.build_cache_id = static_cache.build_cache_id
layout_render = pyramid.renderers.get_renderer('blue_yellow_app:templates/shared/_layout.pt')
impl = layout_render.implementation()
self.layout = impl.macros['layout']
log_name = 'Ctrls/' + type(self).__name__.replace("Controller", "")
self.log = logbook.Logger(log_name)
@property
def is_logged_in(self):
return cookie_auth.get_user_id_via_auth_cookie(self.request) is not None
# noinspection PyMethodMayBeStatic
@suppress()
def redirect(self, to_url, permanent=False):
if permanent:
raise exc.HTTPMovedPermanently(to_url)
raise exc.HTTPFound(to_url)
@property
def data_dict(self):
data = dict()
data.update(self.request.GET)
data.update(self.request.POST)
data.update(self.request.matchdict)
return data
@property
def logged_in_user_id(self):
user_id = cookie_auth.get_user_id_via_auth_cookie(self.request)
return user_id
@property
def logged_in_user(self):
uid = self.logged_in_user_id
if not uid:
return None
return AccountService.find_account_by_id(uid)
| mit |
onceuponatimeforever/oh-mainline | vendor/packages/twill/twill/other_packages/_mechanize_dist/_http.py | 20 | 25772 | """HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import copy, time, tempfile, htmlentitydefs, re, logging, socket, \
urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _request import Request
from _util import isstringlike
from _response import closeable_response, response_seek_wrapper
from _html import unescape, unescape_charref
from _headersutil import is_html
from _clientcookie import CookieJar, request_host
import _rfc3986
debug = logging.getLogger("mechanize").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response, self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
import _opener
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
def set_opener(self, opener=None):
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
robotparser._debug("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
robotparser._debug("disallow all")
elif status >= 400:
self.allow_all = True
robotparser._debug("allow all")
elif status == 200 and lines:
robotparser._debug("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
time.sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, urllib2.HTTPError):
response = fp
else:
response = urllib2.HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
scheme, sel = urllib.splittype(request.get_selector())
sel_host, sel_path = urllib.splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
[(name.title(), val) for name, val in headers.items()])
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
| agpl-3.0 |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| mit |
mjirayu/sit_academy | common/djangoapps/student/roles.py | 12 | 11449 | """
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
from abc import ABCMeta, abstractmethod
from django.contrib.auth.models import User
import logging
from student.models import CourseAccessRole
from xmodule_django.models import CourseKeyField
log = logging.getLogger(__name__)
# A list of registered access roles.
REGISTERED_ACCESS_ROLES = {}
def register_access_role(cls):
"""
Decorator that allows access roles to be registered within the roles module and referenced by their
string values.
Assumes that the decorated class has a "ROLE" attribute, defining its type.
"""
try:
role_name = getattr(cls, 'ROLE')
REGISTERED_ACCESS_ROLES[role_name] = cls
except AttributeError:
log.exception(u"Unable to register Access Role with attribute 'ROLE'.")
return cls
class RoleCache(object):
"""
A cache of the CourseAccessRoles held by a particular user
"""
def __init__(self, user):
self._roles = set(
CourseAccessRole.objects.filter(user=user).all()
)
def has_role(self, role, course_id, org):
"""
Return whether this RoleCache contains a role with the specified role, course_id, and org
"""
return any(
access_role.role == role and
access_role.course_id == course_id and
access_role.org == org
for access_role in self._roles
)
class AccessRole(object):
"""
Object representing a role with particular access to a resource
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_user(self, user): # pylint: disable=unused-argument
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return user.is_staff
def add_users(self, *users):
for user in users:
if user.is_authenticated() and user.is_active:
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
# don't check is_authenticated nor is_active on purpose
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class RoleBase(AccessRole):
"""
Roles by type (e.g., instructor, beta_user) and optionally org, course_key
"""
def __init__(self, role_name, org='', course_key=None):
"""
Create role from required role_name w/ optional org and course_key. You may just provide a role
name if it's a global role (not constrained to an org or course). Provide org if constrained to
an org. Provide org and course if constrained to a course. Although, you should use the subclasses
for all of these.
"""
super(RoleBase, self).__init__()
self.org = org
self.course_key = course_key
self._role_name = role_name
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
if not (user.is_authenticated() and user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(user, '_roles'):
# Cache a list of tuples identifying the particular roles that a user has
# Stored as tuples, rather than django models, to make it cheaper to construct objects for comparison
user._roles = RoleCache(user)
return user._roles.has_role(self._role_name, self.course_key, self.org)
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
# silently ignores anonymous and inactive users so that any that are
# legit get updated.
from student.models import CourseAccessRole
for user in users:
if user.is_authenticated and user.is_active and not self.has_user(user):
entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org)
entry.save()
if hasattr(user, '_roles'):
del user._roles
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
entries = CourseAccessRole.objects.filter(
user__in=users, role=self._role_name, org=self.org, course_id=self.course_key
)
entries.delete()
for user in users:
if hasattr(user, '_roles'):
del user._roles
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
# Org roles don't query by CourseKey, so use CourseKeyField.Empty for that query
if self.course_key is None:
self.course_key = CourseKeyField.Empty
entries = User.objects.filter(
courseaccessrole__role=self._role_name,
courseaccessrole__org=self.org,
courseaccessrole__course_id=self.course_key
)
return entries
class CourseRole(RoleBase):
"""
A named role in a particular course
"""
def __init__(self, role, course_key):
"""
Args:
course_key (CourseKey)
"""
super(CourseRole, self).__init__(role, course_key.org, course_key)
@classmethod
def course_group_already_exists(self, course_key):
return CourseAccessRole.objects.filter(org=course_key.org, course_id=course_key).exists()
class OrgRole(RoleBase):
"""
A named role in a particular org independent of course
"""
def __init__(self, role, org):
super(OrgRole, self).__init__(role, org)
@register_access_role
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
ROLE = 'staff'
def __init__(self, *args, **kwargs):
super(CourseStaffRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
ROLE = 'instructor'
def __init__(self, *args, **kwargs):
super(CourseInstructorRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseFinanceAdminRole(CourseRole):
"""A course staff member with privileges to review financial data."""
ROLE = 'finance_admin'
def __init__(self, *args, **kwargs):
super(CourseFinanceAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseSalesAdminRole(CourseRole):
"""A course staff member with privileges to perform sales operations. """
ROLE = 'sales_admin'
def __init__(self, *args, **kwargs):
super(CourseSalesAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
ROLE = 'beta_testers'
def __init__(self, *args, **kwargs):
super(CourseBetaTesterRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class LibraryUserRole(CourseRole):
"""
A user who can view a library and import content from it, but not edit it.
Used in Studio only.
"""
ROLE = 'library_user'
def __init__(self, *args, **kwargs):
super(LibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
class CourseCcxCoachRole(CourseRole):
"""A CCX Coach"""
ROLE = 'ccx_coach'
def __init__(self, *args, **kwargs):
super(CourseCcxCoachRole, self).__init__(self.ROLE, *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super(OrgStaffRole, self).__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
class OrgLibraryUserRole(OrgRole):
"""
A user who can view any libraries in an org and import content from them, but not edit them.
Used in Studio only.
"""
ROLE = LibraryUserRole.ROLE
def __init__(self, *args, **kwargs):
super(OrgLibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseCreatorRole(RoleBase):
"""
This is the group of people who have permission to create new courses (we may want to eventually
make this an org based role).
"""
ROLE = "course_creator_group"
def __init__(self, *args, **kwargs):
super(CourseCreatorRole, self).__init__(self.ROLE, *args, **kwargs)
class UserBasedRole(object):
"""
Backward mapping: given a user, manipulate the courses and roles
"""
def __init__(self, user, role):
"""
Create a UserBasedRole accessor: for a given user and role (e.g., "instructor")
"""
self.user = user
self.role = role
def has_course(self, course_key):
"""
Return whether the role's user has the configured role access to the passed course
"""
if not (self.user.is_authenticated() and self.user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(self.user, '_roles'):
self.user._roles = RoleCache(self.user)
return self.user._roles.has_role(self.role, course_key, course_key.org)
def add_course(self, *course_keys):
"""
Grant this object's user the object's role for the supplied courses
"""
if self.user.is_authenticated and self.user.is_active:
for course_key in course_keys:
entry = CourseAccessRole(user=self.user, role=self.role, course_id=course_key, org=course_key.org)
entry.save()
if hasattr(self.user, '_roles'):
del self.user._roles
else:
raise ValueError("user is not active. Cannot grant access to courses")
def remove_courses(self, *course_keys):
"""
Remove the supplied courses from this user's configured role.
"""
entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)
entries.delete()
if hasattr(self.user, '_roles'):
del self.user._roles
def courses_with_role(self):
"""
Return a django QuerySet for all of the courses with this user x role. You can access
any of these properties on each result record:
* user (will be self.user--thus uninteresting)
* org
* course_id
* role (will be self.role--thus uninteresting)
"""
return CourseAccessRole.objects.filter(role=self.role, user=self.user)
| agpl-3.0 |
MinimalOS/android_external_chromium_org_third_party_skia | tools/roll_deps.py | 21 | 9554 | #!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Skia's Chromium DEPS roll script.
This script:
- searches through the last N Skia git commits to find out the hash that is
associated with the SVN revision number.
- creates a new branch in the Chromium tree, modifies the DEPS file to
point at the given Skia commit, commits, uploads to Rietveld, and
deletes the local copy of the branch.
- creates a whitespace-only commit and uploads that to to Rietveld.
- returns the Chromium tree to its previous state.
To specify the location of the git executable, set the GIT_EXECUTABLE
environment variable.
Usage:
%prog -c CHROMIUM_PATH -r REVISION [OPTIONAL_OPTIONS]
"""
import optparse
import os
import re
import shutil
import sys
import tempfile
import fix_pythonpath # pylint: disable=W0611
from common.py.utils import git_utils
from common.py.utils import misc
from common.py.utils import shell_utils
DEFAULT_BOTS_LIST = [
'android_clang_dbg',
'android_dbg',
'android_rel',
'cros_daisy',
'linux',
'linux_asan',
'linux_chromeos',
'linux_chromeos_asan',
'linux_chromium_gn_dbg',
'linux_gpu',
'linux_layout',
'linux_layout_rel',
'mac',
'mac_asan',
'mac_gpu',
'mac_layout',
'mac_layout_rel',
'win',
'win_gpu',
'win_layout',
'win_layout_rel',
]
REGEXP_SKIA_REVISION = (
r'^ "skia_revision": "(?P<revision>[0-9a-fA-F]{2,40})",$')
class DepsRollConfig(object):
"""Contains configuration options for this module.
Attributes:
chromium_path: (string) path to a local chromium git repository.
save_branches: (boolean) iff false, delete temporary branches.
verbose: (boolean) iff false, suppress the output from git-cl.
skip_cl_upload: (boolean)
cl_bot_list: (list of strings)
"""
# pylint: disable=I0011,R0903,R0902
def __init__(self, options=None):
if not options:
options = DepsRollConfig.GetOptionParser()
# pylint: disable=I0011,E1103
self.verbose = options.verbose
self.save_branches = not options.delete_branches
self.chromium_path = options.chromium_path
self.skip_cl_upload = options.skip_cl_upload
# Split and remove empty strigns from the bot list.
self.cl_bot_list = [bot for bot in options.bots.split(',') if bot]
self.default_branch_name = 'autogenerated_deps_roll_branch'
self.reviewers_list = ','.join([
# 'rmistry@google.com',
# 'reed@google.com',
# 'bsalomon@google.com',
# 'robertphillips@google.com',
])
self.cc_list = ','.join([
# 'skia-team@google.com',
])
@staticmethod
def GetOptionParser():
# pylint: disable=I0011,C0103
"""Returns an optparse.OptionParser object.
Returns:
An optparse.OptionParser object.
Called by the main() function.
"""
option_parser = optparse.OptionParser(usage=__doc__)
# Anyone using this script on a regular basis should set the
# CHROMIUM_CHECKOUT_PATH environment variable.
option_parser.add_option(
'-c', '--chromium_path', help='Path to local Chromium Git'
' repository checkout, defaults to CHROMIUM_CHECKOUT_PATH'
' if that environment variable is set.',
default=os.environ.get('CHROMIUM_CHECKOUT_PATH'))
option_parser.add_option(
'-r', '--revision', default=None,
help='The Skia Git commit hash.')
option_parser.add_option(
'', '--delete_branches', help='Delete the temporary branches',
action='store_true', dest='delete_branches', default=False)
option_parser.add_option(
'', '--verbose', help='Do not suppress the output from `git cl`.',
action='store_true', dest='verbose', default=False)
option_parser.add_option(
'', '--skip_cl_upload', help='Skip the cl upload step; useful'
' for testing.',
action='store_true', default=False)
default_bots_help = (
'Comma-separated list of bots, defaults to a list of %d bots.'
' To skip `git cl try`, set this to an empty string.'
% len(DEFAULT_BOTS_LIST))
default_bots = ','.join(DEFAULT_BOTS_LIST)
option_parser.add_option(
'', '--bots', help=default_bots_help, default=default_bots)
return option_parser
class DepsRollError(Exception):
"""Exceptions specific to this module."""
pass
def change_skia_deps(revision, depspath):
"""Update the DEPS file.
Modify the skia_revision entry in the given DEPS file.
Args:
revision: (string) Skia commit hash.
depspath: (string) path to DEPS file.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False,
prefix='skia_DEPS_ROLL_tmp_')
try:
deps_regex_rev = re.compile(REGEXP_SKIA_REVISION)
deps_regex_rev_repl = ' "skia_revision": "%s",' % revision
with open(depspath, 'r') as input_stream:
for line in input_stream:
line = deps_regex_rev.sub(deps_regex_rev_repl, line)
temp_file.write(line)
finally:
temp_file.close()
shutil.move(temp_file.name, depspath)
def submit_tries(bots_to_run, dry_run=False):
"""Submit try requests for the current branch on the given bots.
Args:
bots_to_run: (list of strings) bots to request.
dry_run: (bool) whether to actually submit the try request.
"""
git_try = [
git_utils.GIT, 'cl', 'try', '-m', 'tryserver.chromium']
git_try.extend([arg for bot in bots_to_run for arg in ('-b', bot)])
if dry_run:
space = ' '
print 'You should call:'
print space, git_try
print
else:
shell_utils.run(git_try)
def roll_deps(config, revision):
"""Upload changed DEPS and a whitespace change.
Given the correct git_hash, create two Reitveld issues.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (string) Skia Git hash.
Returns:
a tuple containing textual description of the two issues.
Raises:
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with misc.ChDir(config.chromium_path, verbose=config.verbose):
git_utils.Fetch()
output = shell_utils.run([git_utils.GIT, 'show', 'origin/master:DEPS'],
log_in_real_time=False).rstrip()
match = re.search(REGEXP_SKIA_REVISION, output, flags=re.MULTILINE)
old_revision = None
if match:
old_revision = match.group('revision')
assert old_revision
master_hash = git_utils.FullHash('origin/master').rstrip()
# master_hash[8] gives each whitespace CL a unique name.
branch = 'control_%s' % master_hash[:8]
message = ('whitespace change %s\n\n'
'Chromium base revision: %s\n\n'
'This CL was created by Skia\'s roll_deps.py script.\n'
) % (master_hash[:8], master_hash[:8])
with git_utils.GitBranch(branch, message,
delete_when_finished=not config.save_branches,
upload=not config.skip_cl_upload
) as whitespace_branch:
branch = git_utils.GetCurrentBranch()
with open(os.path.join('build', 'whitespace_file.txt'), 'a') as f:
f.write('\nCONTROL\n')
control_url = whitespace_branch.commit_and_upload()
if config.cl_bot_list:
submit_tries(config.cl_bot_list, dry_run=config.skip_cl_upload)
whitespace_cl = control_url
if config.save_branches:
whitespace_cl += '\n branch: %s' % branch
branch = 'roll_%s_%s' % (revision, master_hash[:8])
message = (
'roll skia DEPS to %s\n\n'
'Chromium base revision: %s\n'
'Old Skia revision: %s\n'
'New Skia revision: %s\n'
'Control CL: %s\n\n'
'This CL was created by Skia\'s roll_deps.py script.\n\n'
'Bypassing commit queue trybots:\n'
'NOTRY=true\n'
% (revision, master_hash[:8],
old_revision[:8], revision[:8], control_url))
with git_utils.GitBranch(branch, message,
delete_when_finished=not config.save_branches,
upload=not config.skip_cl_upload
) as roll_branch:
change_skia_deps(revision, 'DEPS')
deps_url = roll_branch.commit_and_upload()
if config.cl_bot_list:
submit_tries(config.cl_bot_list, dry_run=config.skip_cl_upload)
deps_cl = deps_url
if config.save_branches:
deps_cl += '\n branch: %s' % branch
return deps_cl, whitespace_cl
def main(args):
"""main function; see module-level docstring and GetOptionParser help.
Args:
args: sys.argv[1:]-type argument list.
"""
option_parser = DepsRollConfig.GetOptionParser()
options = option_parser.parse_args(args)[0]
if not options.revision:
option_parser.error('Must specify a revision.')
if not options.chromium_path:
option_parser.error('Must specify chromium_path.')
if not os.path.isdir(options.chromium_path):
option_parser.error('chromium_path must be a directory.')
config = DepsRollConfig(options)
shell_utils.VERBOSE = options.verbose
deps_issue, whitespace_issue = roll_deps(config, options.revision)
if deps_issue and whitespace_issue:
print 'DEPS roll:\n %s\n' % deps_issue
print 'Whitespace change:\n %s\n' % whitespace_issue
else:
print >> sys.stderr, 'No issues created.'
if __name__ == '__main__':
main(sys.argv[1:])
| bsd-3-clause |
CWatM/CWatM | cwatm/hydrological_modules/lakes_res_small.py | 1 | 16937 | # -------------------------------------------------------------------------
# Name: Small Lakes and reservoirs module
# (watershed provided < 5000 km2 or lakearea < 100km2)
# Purpose:
#
# Author: PB
#
# Created: 30/08/2017
# Copyright: (c) PB 2017
# -------------------------------------------------------------------------
from cwatm.management_modules.data_handling import *
from cwatm.hydrological_modules.routing_reservoirs.routing_sub import *
from cwatm.management_modules.globals import *
class lakes_res_small(object):
"""
Small LAKES AND RESERVOIRS
Note:
Calculate water retention in lakes and reservoirs
Using the **Modified Puls approach** to calculate retention of a lake
See also: LISFLOOD manual Annex 3 (Burek et al. 2013)
**Global variables**
==================== ================================================================================ =========
Variable [self.var] Description Unit
==================== ================================================================================ =========
load_initial
smallpart
smalllakeArea
smalllakeDis0
smalllakeA
smalllakeFactor
smalllakeFactorSqr
smalllakeInflowOld
smalllakeVolumeM3
smalllakeOutflow
smalllakeLevel
smalllakeStorage
minsmalllakeVolumeM3
preSmalllakeStorage
smallLakedaycorrect
smallLakeIn
smallevapWaterBody
smallLakeout
smallrunoffDiff
DtSec number of seconds per timestep (default = 86400) s
InvDtSec
cellArea Cell area [m²] of each simulated mesh
EWRef potential evaporation rate from water surface m
lakeEvaFactor a factor which increases evaporation from lake because of wind --
runoff
==================== ================================================================================ =========
**Functions**
"""
def __init__(self, model):
self.var = model.var
self.model = model
def initial(self):
"""
Initialize small lakes and reservoirs
Read parameters from maps e.g
area, location, initial average discharge, type: reservoir or lake) etc.
"""
if checkOption('includeWaterBodies') and returnBool('useSmallLakes'):
if returnBool('useResAndLakes') and returnBool('dynamicLakesRes'):
year = datetime.datetime(dateVar['currDate'].year, 1, 1)
else:
year = datetime.datetime(int(binding['fixLakesResYear']), 1, 1)
# read which part of the cellarea is a lake/res catchment (sumed up for all lakes/res in a cell)
self.var.smallpart = readnetcdf2('smallLakesRes', year, useDaily="yearly", value= 'watershedarea') *1000 * 1000
self.var.smallpart = self.var.smallpart / self.var.cellArea
self.var.smallpart = np.minimum(1., self.var.smallpart)
self.var.smalllakeArea = readnetcdf2('smallLakesRes', year, useDaily="yearly", value= 'area') * 1000 * 1000
# lake discharge at outlet to calculate alpha: parameter of channel width, gravity and weir coefficient
# Lake parameter A (suggested value equal to outflow width in [m])
# multiplied with the calibration parameter LakeMultiplier
testRunoff = "averageRunoff" in binding
if testRunoff:
self.var.smalllakeDis0 = loadmap('averageRunoff') * self.var.smallpart * self.var.cellArea * self.var.InvDtSec
else:
self.var.smalllakeDis0 = loadmap('smallwaterBodyDis')
self.var.smalllakeDis0 = np.maximum(self.var.smalllakeDis0, 0.01)
chanwidth = 7.1 * np.power(self.var.smalllakeDis0, 0.539)
self.var.smalllakeA = loadmap('lakeAFactor') * 0.612 * 2 / 3 * chanwidth * (2 * 9.81) ** 0.5
self.var.smalllakeFactor = self.var.smalllakeArea / (self.var.DtSec * np.sqrt(self.var.smalllakeA))
self.var.smalllakeFactorSqr = np.square(self.var.smalllakeFactor)
# for faster calculation inside dynamic section
self.var.smalllakeInflowOld = self.var.load_initial("smalllakeInflow",self.var.smalllakeDis0) # inflow in m3/s estimate
old = self.var.smalllakeArea * np.sqrt(self.var.smalllakeInflowOld / self.var.smalllakeA)
self.var.smalllakeVolumeM3 = self.var.load_initial("smalllakeStorage",old)
smalllakeStorageIndicator = np.maximum(0.0,self.var.smalllakeVolumeM3 / self.var.DtSec + self.var.smalllakeInflowOld / 2)
out = np.square(-self.var.smalllakeFactor + np.sqrt(self.var.smalllakeFactorSqr + 2 * smalllakeStorageIndicator))
# SI = S/dt + Q/2
# solution of quadratic equation
# 1. storage volume is increase proportional to elevation
# 2. Q= a *H **2.0 (if you choose Q= a *H **1.5 you have to solve the formula of Cardano)
self.var.smalllakeOutflow = self.var.load_initial("smalllakeOutflow", out)
# lake storage ini
self.var.smalllakeLevel = divideValues(self.var.smalllakeVolumeM3, self.var.smalllakeArea)
self.var.smalllakeStorage = self.var.smalllakeVolumeM3.copy()
testStorage = "minStorage" in binding
if testStorage:
self.var.minsmalllakeVolumeM3 = loadmap('minStorage')
else:
self.var.minsmalllakeVolumeM3 = 9.e99
# ------------------ End init ------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
def dynamic(self):
"""
Dynamic part to calculate outflow from small lakes and reservoirs
* lakes with modified Puls approach
* reservoirs with special filling levels
**Flow out of lake:**
:return: outflow in m3 to the network
"""
def dynamic_smalllakes(inflow):
"""
Lake routine to calculate lake outflow
:param inflow: inflow to lakes and reservoirs
:return: QLakeOutM3DtC - lake outflow in [m3] per subtime step
"""
# ************************************************************
# ***** LAKE
# ************************************************************
if checkOption('calcWaterBalance'):
self.var.preSmalllakeStorage = self.var.smalllakeStorage.copy()
#if (dateVar['curr'] == 998):
# ii = 1
inflowM3S = inflow / self.var.DtSec
# just for day to day waterbalance -> get X as difference
# lakeIn = in + X -> (in + old) * 0.5 = in + X -> in + old = 2in + 2X -> in - 2in +old = 2x
# -> (old - in) * 0.5 = X
self.var.smallLakedaycorrect = 0.5 * (self.var.smalllakeInflowOld * self.var.DtSec - inflow) / self.var.cellArea
# Lake inflow in [m3/s]
lakeIn = (inflowM3S + self.var.smalllakeInflowOld) * 0.5
# for Modified Puls Method: (S2/dtime + Qout2/2) = (S1/dtime + Qout1/2) - Qout1 + (Qin1 + Qin2)/2
# here: (Qin1 + Qin2)/2
self.var.smallLakeIn = lakeIn * self.var.DtSec / self.var.cellArea # in [m]
self.var.smallevapWaterBody = self.var.lakeEvaFactor * self.var.EWRef * self.var.smalllakeArea
self.var.smallevapWaterBody = np.where((self.var.smalllakeVolumeM3 - self.var.smallevapWaterBody) > 0., self.var.smallevapWaterBody, self.var.smalllakeVolumeM3)
self.var.smalllakeVolumeM3 = self.var.smalllakeVolumeM3 - self.var.smallevapWaterBody
# lakestorage - evaporation from lakes
self.var.smalllakeInflowOld = inflowM3S.copy()
# Qin2 becomes Qin1 for the next time step
lakeStorageIndicator = np.maximum(0.0,self.var.smalllakeVolumeM3 / self.var.DtSec - 0.5 * self.var.smalllakeOutflow + lakeIn)
# here S1/dtime - Qout1/2 + lakeIn , so that is the right part
# of the equation above
self.var.smalllakeOutflow = np.square(-self.var.smalllakeFactor + np.sqrt(self.var.smalllakeFactorSqr + 2 * lakeStorageIndicator))
QsmallLakeOut = self.var.smalllakeOutflow * self.var.DtSec
self.var.smalllakeVolumeM3 = (lakeStorageIndicator - self.var.smalllakeOutflow * 0.5) * self.var.DtSec
# Lake storage
self.var.smalllakeStorage = self.var.smalllakeStorage + lakeIn * self.var.DtSec - QsmallLakeOut - self.var.smallevapWaterBody
# for mass balance, the lake storage is calculated every time step
### if dateVar['curr'] >= dateVar['intSpin']:
### self.var.minsmalllakeStorageM3 = np.where(self.var.smalllakeStorageM3 < self.var.minsmalllakeStorageM3,self.var.smalllakeStorageM3,self.var.minsmalllakeStorageM3)
self.var.smallevapWaterBody = self.var.smallevapWaterBody / self.var.cellArea # back to [m]
self.var.smalllakeLevel = divideValues(self.var.smalllakeVolumeM3, self.var.smalllakeArea)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[inflow/self.var.cellArea ], # In [m3]
[QsmallLakeOut / self.var.cellArea ,self.var.smallevapWaterBody ] , # Out
[self.var.preSmalllakeStorage / self.var.cellArea, self.var.smallLakedaycorrect], # prev storage
[self.var.smalllakeStorage / self.var.cellArea],
"smalllake1", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.smallLakeIn], # In [m]
[QsmallLakeOut / self.var.cellArea ,self.var.smallevapWaterBody ] , # Out
[self.var.preSmalllakeStorage / self.var.cellArea], # prev storage
[self.var.smalllakeStorage / self.var.cellArea],
"smalllake2", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[inflow], # In [m3]
[lakeIn * self.var.DtSec] , # Out
[self.var.smallLakedaycorrect * self.var.cellArea], # prev storage
[],
"smalllake3", False)
return QsmallLakeOut
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Small lake and reservoirs
if checkOption('includeWaterBodies') and returnBool('useSmallLakes'):
if checkOption('calcWaterBalance'):
runoffold = self.var.runoff.copy()
# check years
if dateVar['newStart'] or dateVar['newYear']:
if returnBool('useResAndLakes') and returnBool('dynamicLakesRes'):
year = datetime.datetime(dateVar['currDate'].year, 1, 1)
else:
year = datetime.datetime(int(binding['fixLakesResYear']), 1, 1)
self.var.smallpart = readnetcdf2('smallLakesRes', year, useDaily="yearly", value= 'watershedarea') *1000 * 1000
self.var.smallpart = self.var.smallpart / self.var.cellArea
self.var.smallpart = np.minimum(1., self.var.smallpart)
self.var.smalllakeArea = readnetcdf2('smallLakesRes', year, useDaily="yearly", value= 'area') *1000 * 1000
# mult with 1,000,000 to convert from km2 to m2
# ----------
# inflow lakes
# 1. dis = upstream1(self.var.downstruct_LR, self.var.discharge) # from river upstream
# 2. runoff = npareatotal(self.var.waterBodyID, self.var.waterBodyID) # from cell itself
# 3. # outflow from upstream lakes
# ----------
# runoff to the lake as a part of the cell basin
inflow = self.var.smallpart * self.var.runoff * self.var.cellArea # inflow in m3
self.var.smallLakeout = dynamic_smalllakes(inflow) / self.var.cellArea # back to [m]
self.var.runoff = self.var.smallLakeout + (1-self.var.smallpart) * self.var.runoff # back to [m] # with and without in m3
# ------------------------------------------------------------
#report(decompress(runoff_LR), "C:\work\output3/run.map")
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.smallLakeIn], # In [m]
[self.var.smallLakeout, self.var.smallevapWaterBody], # Out
[self.var.preSmalllakeStorage / self.var.cellArea], # prev storage
[self.var.smalllakeStorage / self.var.cellArea],
"smalllake1", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[inflow/self.var.cellArea,self.var.smallLakedaycorrect ], # In [m]
[self.var.smallLakeout ,self.var.smallevapWaterBody ] , # Out
[self.var.preSmalllakeStorage / self.var.cellArea], # prev storage
[self.var.smalllakeStorage / self.var.cellArea],
"smalllake7", False)
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[runoffold, self.var.smallLakedaycorrect ], # In [m]
[self.var.runoff ,self.var.smallevapWaterBody ] , # Out
[self.var.preSmalllakeStorage / self.var.cellArea], # prev storage
[self.var.smalllakeStorage / self.var.cellArea],
"smalllake8", False)
return
else:
self.var.smallrunoffDiff = 0
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
| gpl-3.0 |
frozencemetery/mariadb-server | storage/tokudb/ft-index/src/tests/hotindexer-undo-do-tests/prov-2.py | 92 | 1492 | #!/usr/bin/env python
# generate hotindexing undo provisional tests with 2 nested transactions
import sys
def print_tr(fp, tr, trstack):
trtype = tr[0]
xid = tr[1:]
if trtype == 'i':
print >>fp, "insert", trstack, xid, "v"+xid
if trtype == 'd':
print >>fp, "delete", trstack, xid
if trtype == 'p':
print >>fp, "placeholder", trstack, xid
def print_test(fp, live, commit, prov0, prov1):
if live != "":
for xid in live.split(","):
print >>fp, "live", xid
print >>fp, "key k1"
print_tr(fp, commit, "committed")
print_tr(fp, prov0, "provisional")
print_tr(fp, prov1, "provisional")
def main():
# live transactions
for live in ["", "200", "200,201"]:
# committed transaction records
for commit in ["i0", "d0"]:
# provisional level 0 transaction records
for prov0 in ["i200", "d200", "p200"]:
# provisional level 1 transaction records
for prov1 in ["i201", "d201"]:
if live == "":
fname = "prov.%s.%s.%s.test" % (commit, prov0, prov1)
else:
fname = "prov.live%s.%s.%s.%s.test" % (live, commit, prov0, prov1)
print fname
fp = open(fname, "w")
if fp:
print_test(fp, live, commit, prov0, prov1)
fp.close()
return 0
sys.exit(main())
| gpl-2.0 |
cfarquhar/openstack-ansible | scripts/fastest-infra-wheel-mirror.py | 3 | 6034 | #!/usr/bin/env python
#
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2016, Jesse Pretorius <jesse.pretorius@rackspace.co.uk>
#
# Based on the mirror test script posted at
# http://code.activestate.com/recipes/284631-a-python-script-to-test-download-mirrors/
import platform
import Queue
import re
import threading
import time
import urllib
HTTP_TIMEOUT = 10.0 # Max. seconds to wait for a response
HTTP_TITLE = "Wheel Index" # HTTP Title to look for to validate the page
MAX_THREADS = 10
MIRROR_LIST = [
"http://mirror.dfw.rax.openstack.org/wheel/",
"http://mirror.ord.rax.openstack.org/wheel/",
"http://mirror.iad.rax.openstack.org/wheel/",
"http://mirror.gra1.ovh.openstack.org/wheel/",
"http://mirror.bhs1.ovh.openstack.org/wheel/",
"http://mirror.sjc1.bluebox.openstack.org/wheel/",
"http://mirror.nyj01.internap.openstack.org/wheel/",
"http://mirror.regionone.infracloud-chocolate.openstack.org/wheel/",
"http://mirror.regionone.infracloud-vanilla.openstack.org/wheel/",
"http://mirror.kna1.citycloud.openstack.org/wheel/",
"http://mirror.la1.citycloud.openstack.org/wheel/",
"http://mirror.lon1.citycloud.openstack.org/wheel/",
"http://mirror.sto2.citycloud.openstack.org/wheel/"
]
def TestUrl(workQueue, resultQueue):
'''Worker thread procedure.
Test how long it takes to return the mirror index page,
then return the results into resultQueue.
'''
def SubthreadProc(url, result):
'''Subthread procedure.
Actually get the mirror index page in a subthread, so that we can time
out using join rather than wait for a very slow server. Passing in a
list for result lets us simulate pass-by-reference, since callers
cannot get the return code from a Python thread.
'''
startTime = time.time()
try:
data = urllib.urlopen(url).read()
except Exception:
# Could be a socket error or an HTTP error--either way, we
# don't care--it's a failure to us.
result.append(-1)
else:
if not CheckTitle(data):
result.append(-1)
else:
elapsed = int((time.time() - startTime) * 1000)
result.append(elapsed)
def CheckTitle(html):
'''Check that the HTML title is the expected value.
Check the HTML returned for the presence of a specified
title. This caters for a situation where a service provider
may be redirecting DNS resolution failures to a web search
page, or where the returned data is invalid in some other
way.
'''
titleRegex = re.compile("<title>(.+?)</title>")
try:
title = titleRegex.search(html).group(1)
except Exception:
# If there is no match, then we consider it a failure.
result.append(-1)
else:
if title == HTTP_TITLE:
return True
else:
return False
while 1:
# Continue pulling data from the work queue until it's empty
try:
url = workQueue.get(0)
except Queue.Empty:
# work queue is empty--exit the thread proc.
return
# Create a single subthread to do the actual work
result = []
subThread = threading.Thread(target=SubthreadProc, args=(url, result))
# Daemonize the subthread so that even if a few are hanging
# around when the process is done, the process will exit.
subThread.setDaemon(True)
# Run the subthread and wait for it to finish, or time out
subThread.start()
subThread.join(HTTP_TIMEOUT)
if [] == result:
# Subthread hasn't give a result yet. Consider it timed out.
resultQueue.put((url, "TIMEOUT"))
elif -1 == result[0]:
# Subthread returned an error from geturl.
resultQueue.put((url, "FAILED"))
else:
# Subthread returned a time. Store it.
resultQueue.put((url, result[0]))
# Set the number of threads to use
numThreads = min(MAX_THREADS, len(MIRROR_LIST))
# Build a queue to feed the worker threads
workQueue = Queue.Queue()
for url in MIRROR_LIST:
# Build the complete URL
distro = platform.linux_distribution()[0].split(' ')[0].lower()
if distro == 'centos':
version = platform.linux_distribution()[1].split('.')[0]
else:
version = platform.linux_distribution()[1]
architecture = platform.machine()
fullUrl = url + distro + "-" + version + "-" + architecture + "/"
workQueue.put(fullUrl)
workers = []
resultQueue = Queue.Queue()
# Create worker threads to load-balance the retrieval
for threadNum in range(0, numThreads):
workers.append(threading.Thread(target=TestUrl,
args=(workQueue, resultQueue)))
workers[-1].start()
# Wait for all the workers to finish
for w in workers:
w.join()
# Separate the successes from failures
timings = []
failures = []
while not resultQueue.empty():
url, result = resultQueue.get(0)
if isinstance(result, str):
failures.append((result, url))
else:
timings.append((result, url))
# Sort by increasing time or result string
timings.sort()
failures.sort()
# If all results are failed, then exit silently
if len(timings) > 0:
# Print out the fastest mirror URL
print(timings[0][1])
| apache-2.0 |
frangucc/gamify | www/sandbox/pals/node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/gyptest.py | 80 | 7792 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files
if f.startswith('gyptest') and f.endswith('.py') ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] += os.pathsep + extra_path
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
MakeHer/edx-platform | openedx/core/djangoapps/bookmarks/models.py | 8 | 7953 | """
Models for Bookmarks.
"""
import logging
from django.contrib.auth.models import User
from django.db import models
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore import search
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule_django.models import CourseKeyField, LocationKeyField
from . import PathItem
log = logging.getLogger(__name__)
def prepare_path_for_serialization(path):
"""
Return the data from a list of PathItems ready for serialization to json.
"""
return [(unicode(path_item.usage_key), path_item.display_name) for path_item in path]
def parse_path_data(path_data):
"""
Return a list of PathItems constructed from parsing path_data.
"""
path = []
for item in path_data:
usage_key = UsageKey.from_string(item[0])
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
path.append(PathItem(usage_key, item[1]))
return path
class Bookmark(TimeStampedModel):
"""
Bookmarks model.
"""
user = models.ForeignKey(User, db_index=True)
course_key = CourseKeyField(max_length=255, db_index=True)
usage_key = LocationKeyField(max_length=255, db_index=True)
_path = JSONField(db_column='path', help_text='Path in course tree to the block')
xblock_cache = models.ForeignKey('bookmarks.XBlockCache')
class Meta(object):
"""
Bookmark metadata.
"""
unique_together = ('user', 'usage_key')
def __unicode__(self):
return self.resource_id
@classmethod
def create(cls, data):
"""
Create a Bookmark object.
Arguments:
data (dict): The data to create the object with.
Returns:
A Bookmark object.
Raises:
ItemNotFoundError: If no block exists for the usage_key.
"""
data = dict(data)
usage_key = data.pop('usage_key')
with modulestore().bulk_operations(usage_key.course_key):
block = modulestore().get_item(usage_key)
xblock_cache = XBlockCache.create({
'usage_key': usage_key,
'display_name': block.display_name_with_default,
})
data['_path'] = prepare_path_for_serialization(Bookmark.updated_path(usage_key, xblock_cache))
data['course_key'] = usage_key.course_key
data['xblock_cache'] = xblock_cache
user = data.pop('user')
bookmark, created = cls.objects.get_or_create(usage_key=usage_key, user=user, defaults=data)
return bookmark, created
@property
def resource_id(self):
"""
Return the resource id: {username,usage_id}.
"""
return "{0},{1}".format(self.user.username, self.usage_key) # pylint: disable=no-member
@property
def display_name(self):
"""
Return the display_name from self.xblock_cache.
Returns:
String.
"""
return self.xblock_cache.display_name # pylint: disable=no-member
@property
def path(self):
"""
Return the path to the bookmark's block after checking self.xblock_cache.
Returns:
List of dicts.
"""
if self.modified < self.xblock_cache.modified: # pylint: disable=no-member
path = Bookmark.updated_path(self.usage_key, self.xblock_cache)
self._path = prepare_path_for_serialization(path)
self.save() # Always save so that self.modified is updated.
return path
return parse_path_data(self._path)
@staticmethod
def updated_path(usage_key, xblock_cache):
"""
Return the update-to-date path.
xblock_cache.paths is the list of all possible paths to a block
constructed by doing a DFS of the tree. However, in case of DAGS,
which section jump_to_id() takes the user to depends on the
modulestore. If xblock_cache.paths has only one item, we can
just use it. Otherwise, we use path_to_location() to get the path
jump_to_id() will take the user to.
"""
if xblock_cache.paths and len(xblock_cache.paths) == 1:
return xblock_cache.paths[0]
return Bookmark.get_path(usage_key)
@staticmethod
def get_path(usage_key):
"""
Returns data for the path to the block in the course graph.
Note: In case of multiple paths to the block from the course
root, this function returns a path arbitrarily but consistently,
depending on the modulestore. In the future, we may want to
extend it to check which of the paths, the user has access to
and return its data.
Arguments:
block (XBlock): The block whose path is required.
Returns:
list of PathItems
"""
with modulestore().bulk_operations(usage_key.course_key):
try:
path = search.path_to_location(modulestore(), usage_key, full_path=True)
except ItemNotFoundError:
log.error(u'Block with usage_key: %s not found.', usage_key)
return []
except NoPathToItem:
log.error(u'No path to block with usage_key: %s.', usage_key)
return []
path_data = []
for ancestor_usage_key in path:
if ancestor_usage_key != usage_key and ancestor_usage_key.block_type != 'course': # pylint: disable=no-member
try:
block = modulestore().get_item(ancestor_usage_key)
except ItemNotFoundError:
return [] # No valid path can be found.
path_data.append(
PathItem(usage_key=block.location, display_name=block.display_name)
)
return path_data
class XBlockCache(TimeStampedModel):
"""
XBlockCache model to store info about xblocks.
"""
course_key = CourseKeyField(max_length=255, db_index=True)
usage_key = LocationKeyField(max_length=255, db_index=True, unique=True)
display_name = models.CharField(max_length=255, default='')
_paths = JSONField(
db_column='paths', default=[], help_text='All paths in course tree to the corresponding block.'
)
def __unicode__(self):
return unicode(self.usage_key)
@property
def paths(self):
"""
Return paths.
Returns:
list of list of PathItems.
"""
return [parse_path_data(path) for path in self._paths] if self._paths else self._paths
@paths.setter
def paths(self, value):
"""
Set paths.
Arguments:
value (list of list of PathItems): The list of paths to cache.
"""
self._paths = [prepare_path_for_serialization(path) for path in value] if value else value
@classmethod
def create(cls, data):
"""
Create an XBlockCache object.
Arguments:
data (dict): The data to create the object with.
Returns:
An XBlockCache object.
"""
data = dict(data)
usage_key = data.pop('usage_key')
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
data['course_key'] = usage_key.course_key
xblock_cache, created = cls.objects.get_or_create(usage_key=usage_key, defaults=data)
if not created:
new_display_name = data.get('display_name', xblock_cache.display_name)
if xblock_cache.display_name != new_display_name:
xblock_cache.display_name = new_display_name
xblock_cache.save()
return xblock_cache
| agpl-3.0 |
40223149/2015springfinal | static/Brython3.1.1-20150328-091302/Lib/site-packages/turtle.py | 619 | 105984 | import math
from javascript import console
from browser import document, html
import _svg
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+ self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
class _Root:
"""Root class for Screen based on Tkinter."""
def setupcanvas(self, width, height, cwidth, cheight):
self._svg=_svg.svg(Id="mycanvas", width=cwidth, height=cheight)
self._canvas=_svg.g(transform="translate(%d,%d)" % (cwidth//2, cheight//2))
self._svg <= self._canvas
def end(self):
def set_svg():
#have to do this to get animate to work...
document['container'].html=document['container'].html
if "mycanvas" not in document:
document["container"] <= self._svg
from browser import timer
#need this for chrome so that first few draw commands are viewed properly.
timer.set_timeout(set_svg, 1)
def _getcanvas(self):
return self._canvas
def win_width(self):
return self._canvas.width
def win_height(self):
return self._canvas.height
class TurtleScreenBase:
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
#@staticmethod
#def _blankimage():
# """return a blank image object
# """
# pass
#@staticmethod
#def _image(filename):
# """return an image object containing the
# imagedata from a gif-file named filename.
# """
# pass
def __init__(self, cv):
self.cv = cv
self._previous_turtle_attributes={}
self._draw_pos=0
self.canvwidth = cv.width
self.canvheight = cv.height
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
#console.log("_createpoly")
pass
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawpoly")
pass
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawline")
#if not isinstance(lineitem, Turtle):
# return
if coordlist is not None:
_x0, _y0=coordlist[0]
_x1, _y1=coordlist[1]
_dist=math.sqrt( (_x0-_x1)*(_x0-_x1) + (_y0-_y1)*(_y0-_y1) )
_dur="%4.2fs" % (0.01*_dist)
if _dur == '0.00s':
_dur='0.1s'
#_dur="%ss" % 1
self._draw_pos+=1
_shape=["%s,%s" % (_x, _y) for _x,_y in lineitem.get_shapepoly()]
if 0:
#if lineitem.isvisible():
if lineitem in self._previous_turtle_attributes:
_previous=self._previous_turtle_attributes[lineitem]
if _previous.heading() != lineitem.heading():
#if self._turtle_heading[lineitem] != lineitem.heading():
_rotate=_previous.heading()
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (_rotate-90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
# we need to rotate our turtle..
_turtle <= _svg.animateTransform(
Id="animateLine%s" % self._draw_pos,
attributeName="transform",
type="rotate",
attributeType="XML",
From=_rotate - 90,
to=lineitem.heading() -90,
dur=_dur,
begin="animateLine%s.end" % (self._draw_pos-1))
_turtle <= _svg.set(attributeName="display",
attributeType="CSS", to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# to="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# begin="animateLine%s.begin" % self._draw_pos,
# end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animate(attributeName="fill",
# From=_previous.fill, to=fill, dur=_dur,
# begin="animateLine%s.begin" % self._draw_pos)
self._draw_pos+=1
self._canvas <= _turtle
_line= _svg.line(x1=_x0*self.xscale, y1=_y0*self.yscale,
x2=_x0*self.xscale, y2=_y0*self.yscale,
style={'stroke': fill, 'stroke-width': width})
_an1=_svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="x2", attributeType="XML",
From=_x0*self.xscale, to=_x1*self.xscale,
dur=_dur, fill='freeze')
_an2=_svg.animate(attributeName="y2", attributeType="XML",
begin="animateLine%s.begin" % self._draw_pos,
From=_y0*self.xscale, to=_y1*self.xscale,
dur=_dur, fill='freeze')
# draw turtle
if lineitem.isvisible():
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (lineitem.heading() - 90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
to="%s,%s" % (_x1*self.xscale, _y1*self.yscale),
dur=_dur, begin="animateLine%s.begin" % self._draw_pos)
_turtle <= _svg.set(attributeName="display", attributeType="CSS",
to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
self._canvas <= _turtle
self._previous_turtle_attributes[lineitem]=lineitem
if self._draw_pos == 1:
_an1.setAttribute('begin', "0s")
else:
_an1.setAttribute('begin', "animateLine%s.end" % (self._draw_pos-1))
_line <= _an1
_line <= _an2
self._canvas <= _line
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
pass
def _update(self):
"""Redraw graphics items on canvas
"""
pass
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
pass
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
return True #fix me
#try:
# rgb = self.cv.winfo_rgb(color)
# ok = True
#except TK.TclError:
# ok = False
#return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.style.backgroundColor=color
else:
return self.cv.style.backgroundColor
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
self._draw_pos+=1
_text= _svg.text(txt, x=pos[0], y=pos[1], fill=pencolor,
style={'display': 'none'})
_text <= _svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="display", attributeType="CSS",
From="block", to="block", dur="1s", fill='freeze',
begin="animateLine%s.end" % (self._draw_pos-1))
self._canvas <= _text
return Vec2D(pos[0]+50, pos[1]+50) #fix me
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _createimage(self, image):
"""Create and return image item on canvas.
"""
pass
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
pass
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
pass
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
pass
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
self.cv.style.width=canvwidth
self.cv.style.height=canvheight
if bg is not None:
self.cv.style.backgroundColor=bg
def _window_size(self):
""" Return the width and height of the turtle window.
"""
#for now just return canvas width/height
return self.cv.width, self.cv.height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
pass
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
pass
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
pass
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
This stops execution of a turtle graphics script.
Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
pass
class Shape:
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
>>> # .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", None) #self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
>>> screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
#for btn in 1, 2, 3:
# self.onclick(None, btn)
#self.onkeypress(None)
#for key in self._keys[:]:
# self.onkey(None, key)
# self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
... left(10)
... forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
#self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... fd(dist)
... rt(90)
... dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"""Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
return
tracing = self._tracing
self._tracing = True
for t in self.turtles():
#t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkey(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkeypress(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, or by keeping pressed the up-arrow key.
consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
... if running:
... fd(50)
... lt(60)
... screen.ontimer(f, 250)
...
>>> f() # makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
>>> # e.g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator:
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
#console.log('_go')
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
#console.log('_rotate')
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
#console.log('_goto')
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen:
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
>>> turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
#console.log('pen')
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage:
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
#console.log("_setshape", self._type)
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
return
#console.log(self._type)
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
#console.log(self._item)
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
#elif isinstance(canvas, (ScrolledCanvas, Canvas)):
# for screen in RawTurtle.screens:
# if screen.cv == canvas:
# self.screen = screen
# break
# else:
# self.screen = TurtleScreen(canvas)
# RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
#self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
#self.currentLineItem = screen._createline()
self.currentLine = [self._position]
#self.items = [] #[self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = None #Tbuffer(undobuffersize)
#self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
#self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
... undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
#for item in self.items:
# self.screen._delete(item)
#self.currentLineItem = #self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
#self.items = [self.currentLineItem]
self.clearstamps()
#self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
#self._update()
#def _update_data(self):
# self.screen._incrementudc()
# if self.screen._updatecounter != 0:
# return
# if len(self.currentLine)>1:
# self.screen._drawline(self.currentLineItem, self.currentLine,
# self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
return
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
#self._update_data()
self._drawturtle()
#screen._update() # TurtleScreenBase
#screen._delay(screen._delayvalue) # TurtleScreenBase
else:
#self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
#screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... turtle.fd(dist)
... turtle.rt(90)
... dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
#self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
(4.0, -1.0, -0.0, 2.0)
"""
#console.log("shapetransform")
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self._update()
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
return
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
... turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
if self._speed and self.screen._tracing == 1:
if self._drawing:
#console.log('%s:%s:%s:%s:%s' % (self, start, end, self._pencolor,
# self._pensize))
self.screen._drawline(self, #please remove me eventually
(self._position, end),
self._pencolor, self._pensize, False)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
#console.log('_rotate')
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
self._orient = neworient
#self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
#console.log('_newLine')
return
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
... turtle.pensize(5)
... else:
... turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
#self.items.append(self._fillitem)
self._fillpath = [self._position]
#self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
#self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
#self._root.title(_Screen._title)
#self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def end(self):
self._root.end()
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
| gpl-3.0 |
ojengwa/osmc | package/mediacenter-addon-osmc/src/service.osmc.settings/resources/lib/walkthru.py | 2 | 15442 |
# XBMC modules
import xbmc
import xbmcgui
import xbmcaddon
# STANDARD library modules
import os
import sys
import requests
import subprocess
import threading
sys.path.append(xbmc.translatePath(os.path.join(xbmcaddon.Addon().getAddonInfo('path'), 'resources','lib')))
# Custom Modules
import timezones
import LICENSE
import WARRANTY
EULA = LICENSE.license
WARR = WARRANTY.warranty
DIALOG = xbmcgui.Dialog()
def log(message):
xbmc.log(str(message), level=xbmc.LOGDEBUG)
def lang(id):
san = __addon__.getLocalizedString(id).encode( 'utf-8', 'ignore' )
return san
class Networking_caller(threading.Thread):
def __init__(self, parent, net_call):
super(Networking_caller, self).__init__()
self.daemon = True
self.cancelled = False
self.parent = parent
self.net_call = net_call
# instantiate Barkers interface class
# self.networking_interface = NETWORKING.Barkersinterface()
def run(self):
"""Calls Barkers method to check for internet connection"""
log('checking internet connection')
self.parent.internet_connected = self.net_call.check_internet()
log('internet connection is %s' % self.parent.internet_connected)
class walkthru_gui(xbmcgui.WindowXMLDialog):
def __init__(self, strXMLname, strFallbackPath, strDefaultName, networking_instance, lang_rerun, selected_language):
# show timezone switch
self.showtimezone = False
# switch that identifies whether the internet is connected
self.internet_connected = False
#start a new thread that begins checking for an internet connection
self.net_call = networking_instance
self.internet_check = False
self.internet_checker = Networking_caller(self, self.net_call)
self.internet_checker.setDaemon(True)
self.internet_checker.start()
# this flag tells us whether the GUI has been reloaded due to language selection
self.lang_rerun = lang_rerun
# edit the timezone in /etc/timezone
if self.showtimezone:
self.timezones = timezones.get_timezones()
# this attribute denotes the skin the user wants to have applied when the walkthru closes
self.selected_skin = 'OSMC'
# newsletter email address
self.email = ''
# get the languages
self.languages = [folder for folder in os.listdir('/usr/share/kodi/language/')]
self.languages.sort()
self.tz_control_map = {
'Africa' : 30010,
'America' : 30020,
'Asia' : 30030,
'Atlantic' : 30040,
'Australia' : 30050,
'Europe' : 30060,
'Indian' : 30070,
'Pacific' : 30080,
'UTC' : 30090,
}
self.selected_language = selected_language
self.selected_region = None
self.selected_country = None
# textures for the skin image
media_path = xbmc.translatePath(os.path.join(xbmcaddon.Addon().getAddonInfo('path'), 'resources', 'skins', 'Default', 'media'))
self.osmc_skin_image = os.path.join(media_path, 'osmc_preview.png')
self.conf_skin_image = os.path.join(media_path, 'conf_preview.jpg')
self.vero = self.check_hardware()
# this attribute is used to determine when the user is allowed to exit the walkthru using the Esc or Back buttons
self.prevent_escape = True
def onInit(self):
global EULA
global WARR
#hide all timezone, TandC and Apply buttons
for hide_this in [1003, 1004, 1005, 1006, 1007, 1008, 1009]:
self.getControl(hide_this).setVisible(False)
if self.showtimezone:
# populate the timezone controls
for region, countries in self.timezones.iteritems():
for country in countries:
ctl_id = self.tz_control_map.get(region, False)
if not ctl_id: continue
self.tmp = xbmcgui.ListItem(label=country, label2='', thumbnailImage='')
self.getControl(ctl_id).addItem(self.tmp)
# hide the controls that determine panel visibility
for visibility_control in [93000,94000,95000, 96000, 97000, 98000, 99000]:
self.getControl(visibility_control).setVisible(False)
# hide the language sub menus
for tz in [3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009]:
self.getControl(tz*10).setVisible(False)
# populate the language control
for language in self.languages:
self.tmp = xbmcgui.ListItem(label=language, label2='', thumbnailImage='')
self.getControl(20010).addItem(self.tmp)
# populate the terms and conditions
self.getControl(555).setText(EULA)
# populate the warranty
self.getControl(777).setText(WARR)
# set the image for the skin preview control
self.set_skin_image('OSMC')
# this will only be True, if the language has been selected and the GUI has reloaded
if self.lang_rerun:
# set the flag to False so the GUI doesnt reload on exit
self.lang_rerun = False
self.bypass_language()
def bypass_language(self):
''' Bypasses the language setting, sets the language as selected so the window doesnt reopen '''
if self.showtimezone:
# this part is being kept just in case we want to reimplement the timezone selection
self.getControl(92000).setVisible(False)
self.getControl(93000).setVisible(True)
self.getControl(1003).setVisible(True)
self.setFocusId(1003)
else:
# make the language panel invisible
self.getControl(92000).setVisible(False)
# make the terms panel visible
self.getControl(94000).setVisible(True)
# make the Term menu item visible
self.getControl(1004).setVisible(True)
# jump to the Terms menu item
self.setFocusId(40010)
# change the up and down controls for the language and terms menu items
self.getControl(1004).controlUp(self.getControl(1002))
self.getControl(1002).controlDown(self.getControl(1004))
def set_skin_image(self, skin):
''' Sets the image for the skin preview '''
if skin == 'CONF':
self.getControl(88888).setImage(self.conf_skin_image)
else:
self.getControl(88888).setImage(self.osmc_skin_image)
def check_hardware(self):
'''
Checks whether this is a Vero and whether the warranty info should be shown
'''
# generate the URL
with open('/proc/cmdline', 'r') as f:
line = f.readline()
settings = line.split(' ')
for setting in settings:
if setting.startswith('osmcdev='):
if setting[len('osmcdev='):] == 'vero':
log('Hardware is Vero')
return True
log('Hardware not Vero')
return False
def exit_proceedure(self):
if self.selected_country != None:
# set timezone
for reg, cnt in self.timezones.iteritems():
if self.selected_country in cnt:
self.selected_region = reg
break
if self.selected_country != None and self.selected_region != None:
users_timezone = "%s/%s" % (self.selected_region, self.selected_country)
log('users timezone: %s' % users_timezone)
os.system('echo %s | sudo tee /etc/timezone' % users_timezone)
# delete skin update block file
subprocess.call(['sudo', 'rm', '/tmp/NO_UPDATE'])
self.close()
def onClick(self, controlID):
if controlID == 1005: # Exit control
self.exit_proceedure()
elif controlID == 20010: # language container
self.previous_language = self.selected_language
self.selected_language = self.getControl(controlID).getSelectedItem().getLabel()
# display confirmation dialog
user_confirmation = DIALOG.yesno('Confirm', self.selected_language, autoclose=10000)
if user_confirmation == True:
# if user CONFIRMS, check whether a skin reload is required
if 'english' not in self.selected_language.lower():
# if skin reload required, then close the window, change system language, reload the window and jump to TERMS
# when lang_rerun set is True, the walkthru is reloaded and skips to the setting after language
self.lang_rerun = True
self.close()
else:
# jump to the setting AFTER language
self.bypass_language()
else:
# if not, then revert to the previous_language
self.selected_language = self.previous_language
elif controlID in [30010, 30020, 30030, 30040, 30050, 30060, 30070, 30080, 30090]: # timezone containers
# user has clicked on a timezone
self.selected_country = self.getControl(controlID).getSelectedItem().getLabel()
self.getControl(93000).setVisible(False)
self.getControl(94000).setVisible(True)
self.getControl(1004).setVisible(True)
self.setFocusId(40010)
elif controlID == 40010: # terms and conditions I Agree button
if self.vero: # show the warranty panel
self.getControl(94000).setVisible(False)
self.getControl(97000).setVisible(True)
self.getControl(1007).setVisible(True)
self.setFocusId(70010)
else:
# check if internet is connected
if self.internet_connected:
log('internet is connected, jumping to exit')
# skip the Networking setup menu item and go to the skin panel
# -- SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# self.getControl(94000).setVisible(False)
# self.getControl(98000).setVisible(True)
# self.getControl(1008).setVisible(True)
# self.setFocusId(80010)
# display the sign-up panel
# -- INCLUDED ONLY WHILE THE SKIN CHANGE METHOD IS WORKED ON --
self.getControl(94000).setVisible(False)
self.getControl(99000).setVisible(True)
self.getControl(1009).setVisible(True)
self.setFocusId(90010)
else:
log('internet is not connected, jumping to networking')
# display the Networking panel
self.getControl(94000).setVisible(False)
self.getControl(96000).setVisible(True)
self.getControl(1006).setVisible(True)
self.setFocusId(60010)
elif controlID == 70010: # warranty I Agree button
if self.vero:
# check if internet is connected
if self.internet_connected:
log('internet is connected, jumping to exit')
# skip the Networking setup menu item and go to skin selection
# -- SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# self.getControl(97000).setVisible(False)
# self.getControl(98000).setVisible(True)
# self.getControl(1008).setVisible(True)
# self.setFocusId(80010)
# display the sign-up panel
# -- INCLUDED ONLY WHILE THE SKIN CHANGE METHOD IS WORKED ON --
self.getControl(97000).setVisible(False)
self.getControl(99000).setVisible(True)
self.getControl(1009).setVisible(True)
self.setFocusId(90010)
else:
log('internet is not connected, jumping to networking')
# display the Networking panel
self.getControl(97000).setVisible(False)
self.getControl(96000).setVisible(True)
self.getControl(1006).setVisible(True)
self.setFocusId(60010)
else:
pass
elif controlID == 40020: # unused scroll bar for TandC
self.getControl(555).scroll(10)
elif controlID == 40030: # unused scroll bar for TandC
self.getControl(555).scroll(-10)
elif controlID == 60090: # skip networking button
# display the skin panel
# -- SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# self.getControl(96000).setVisible(False)
# self.getControl(98000).setVisible(True)
# self.getControl(1008).setVisible(True)
# self.setFocusId(80010)
# display the sign-up panel
# -- INCLUDED ONLY WHILE THE SKIN CHANGE METHOD IS WORKED ON --
self.getControl(96000).setVisible(False)
self.getControl(99000).setVisible(True)
self.getControl(1009).setVisible(True)
self.setFocusId(90010)
elif controlID == 60010: # open networking gui
self.net_call.run(False)
# display the skin panel
# -- SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# self.getControl(96000).setVisible(False)
# self.getControl(98000).setVisible(True)
# self.getControl(1008).setVisible(True)
# self.setFocusId(80010)
# display the sign-up panel
# -- INCLUDED ONLY WHILE THE SKIN CHANGE METHOD IS WORKED ON --
self.getControl(96000).setVisible(False)
self.getControl(99000).setVisible(True)
self.getControl(1009).setVisible(True)
self.setFocusId(90010)
elif controlID in [80010, 80020]: # user has selected a skin
# -- THIS SECTION IS SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# -- IT CANNOT BE REACHED AS THE PANEL WITH THE BUTTONS IS NEVER DISPLAYED --
if controlID == 80010:
self.selected_skin = 'OSMC'
else:
self.selected_skin = 'Confluence'
# display the sign-up panel
self.getControl(98000).setVisible(False)
self.getControl(99000).setVisible(True)
self.getControl(1009).setVisible(True)
self.setFocusId(90010)
elif controlID in [90010, 90020]: # newsletter sign up
if controlID == 90010:
# show keyboard
kb = xbmc.Keyboard(self.email, 'Please enter your email')
kb.doModal()
if kb.isConfirmed():
self.email = kb.getText()
requests.post('https://osmc.tv/wp-content/plugins/newsletter/do/subscribe.php', data={'ne': self.email})
# display the sign-up panel
self.getControl(99000).setVisible(False)
self.getControl(95000).setVisible(True)
self.getControl(1005).setVisible(True)
self.setFocusId(1005)
# allow the user to exit
self.prevent_escape = False
def onAction(self, action):
if self.prevent_escape:
return
if action == 10 or action == 92:
# delete skin update block file
subprocess.call(['sudo', 'rm', '/tmp/NO_UPDATE'])
self.close()
def onFocus(self, controlID):
main_controls = [1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009]
tz_controls = [3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009]
skin_controls = [80010, 80020]
if controlID in main_controls:
for main in main_controls:
sub_id = main % 1000
ctl = self.getControl((sub_id * 1000) + 90000)
if main == controlID:
ctl.setVisible(True)
else:
ctl.setVisible(False)
elif controlID in tz_controls:
for tz in tz_controls:
ctl = self.getControl(tz * 10)
if tz == controlID:
ctl.setVisible(True)
else:
ctl.setVisible(False)
elif controlID in skin_controls:
# -- THIS SECTION IS SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# -- IT CANNOT BE REACHED AS THE PANEL WITH THE SKIN CONTROLS IS NEVER MADE VISIBLE --
if controlID == 80010:
# display the OSMC skin image
self.set_skin_image('OSMC')
elif controlID == 80020:
# display the confluence skin image
self.set_skin_image('CONF')
def open_gui(networking_instance):
__addon__ = xbmcaddon.Addon()
scriptPath = __addon__.getAddonInfo('path')
xml = "walkthru_720.xml" if xbmcgui.Window(10000).getProperty("SkinHeight") == '720' else "walkthru.xml"
lang_rerun = False
first_run = True
selected_language = None
while first_run or lang_rerun:
first_run = False
GUI = walkthru_gui(xml, scriptPath, 'Default', networking_instance=networking_instance, lang_rerun=lang_rerun, selected_language=selected_language)
GUI.doModal()
selected_language = GUI.selected_language
skin_choice = GUI.selected_skin
lang_rerun = GUI.lang_rerun
# set language
xbmc.executebuiltin('xbmc.SetGUILanguage(%s)' % selected_language)
xbmc.sleep(1000)
log('users language: %s' % selected_language)
log('lang_rerun: %s' % lang_rerun)
log('skin_choice: %s' % skin_choice)
# -- THIS SECTION IS SUPPRESSED WHILE THE SKIN CHANGE METHOD IS WORKED ON --
# if skin_choice != 'OSMC':
# log('Loading Confluence')
# try:
# xbmc.setskin('skin.confluence')
# except:
# log('Loading Confluence failed.')
log('Exiting GUI')
| gpl-2.0 |
quantifiedcode-bot/pyshop | pyshop/tests/test_models.py | 5 | 6633 | from .case import ModelTestCase
from . import setUpModule, tearDownModule
class GroupTestCase(ModelTestCase):
def test_by_name(self):
from pyshop.models import Group
grp = Group.by_name(self.session, u'admin')
self.assertIsInstance(grp, Group)
self.assertEqual(grp.name, u'admin')
class UserTestCase(ModelTestCase):
def test_by_login_ok_mirrored(self):
from pyshop.models import User
user = User.by_login(self.session, u'johndo', local=False)
self.assertIsInstance(user, User)
self.assertEqual(user.login, u'johndo')
def test_by_login_ko_mirrored(self):
from pyshop.models import User
user = User.by_login(self.session, u'johndo')
self.assertEqual(user, None)
def test_by_login_ok_local(self):
from pyshop.models import User
user = User.by_login(self.session, u'local_user')
self.assertIsInstance(user, User)
def test_by_credentials_ko_unexists(self):
from pyshop.models import User
user = User.by_credentials(self.session, u'u404', u"' OR 1 = 1 #")
self.assertEqual(user, None)
def test_by_credentials_ko_mirrored(self):
from pyshop.models import User
user = User.by_credentials(self.session, u'johndo', '')
self.assertEqual(user, None)
def test_by_credentials_ko_password(self):
from pyshop.models import User
user = User.by_credentials(self.session, u'admin', 'CHANGEME')
self.assertIsNone(user)
def test_by_credentials_ok(self):
from pyshop.models import User
user = User.by_credentials(self.session, u'local_user', 'secret')
self.assertIsInstance(user, User)
self.assertEqual(user.login, u'local_user')
self.assertEqual(user.name, u'Local User')
def test_hash_password(self):
from pyshop.models import User
u = User(login=u'test_password', password=u'secret')
self.assertNotEqual(u.password, u'secret', 'password must be hashed')
class ClassifierTestCase(ModelTestCase):
def test_by_name(self):
from pyshop.models import Classifier
clsfier = Classifier.by_name(self.session,
u'Topic :: Software Development')
self.assertIsInstance(clsfier, Classifier)
self.assertEqual(clsfier.category, u'Topic')
self.assertEqual(clsfier.name, u'Topic :: Software Development')
parent = Classifier.by_name(self.session, u'Topic')
self.assertEqual(clsfier.parent_id, parent.id)
self.assertEqual(sorted([c.shortname for c in parent.childs]),
[u'Software Development', u'System'])
class PackageTestCase(ModelTestCase):
def test_versions(self):
from pyshop.models import Package
pkg = Package.by_name(self.session, u'mirrored_package1')
self.assertIsInstance(pkg, Package)
self.assertEqual(pkg.id, 1)
self.assertEqual(pkg.versions, [u'0.2', u'0.1'])
def test_by_name(self):
from pyshop.models import Package
pkg = Package.by_name(self.session, u'mirrored_package1')
self.assertIsInstance(pkg, Package)
self.assertEqual(pkg.id, 1)
self.assertEqual(pkg.name, u'mirrored_package1')
def test_by_owner(self):
from pyshop.models import Package
pkges = Package.by_owner(self.session, u'johndo')
self.assertIsInstance(pkges, list)
pkges = [pkg.name for pkg in pkges]
self.assertEqual(pkges, [u'mirrored_package1', u'mirrored_package2'])
def test_by_maintainer(self):
from pyshop.models import Package
pkges = Package.by_maintainer(self.session, u'janedoe')
self.assertIsInstance(pkges, list)
pkges = [pkg.name for pkg in pkges]
self.assertEqual(pkges, [u'mirrored_package2'])
def test_get_locals(self):
from pyshop.models import Package
pkges = Package.get_locals(self.session)
self.assertIsInstance(pkges, list)
pkges = [pkg.name for pkg in pkges]
self.assertEqual(pkges, [u'local_package1'])
def test_get_mirrored(self):
from pyshop.models import Package
pkges = Package.get_mirrored(self.session)
self.assertIsInstance(pkges, list)
pkges = [pkg.name for pkg in pkges]
self.assertEqual(pkges, [u'mirrored_package1', u'mirrored_package2'])
class ReleaseTestCase(ModelTestCase):
def test_by_version(self):
from pyshop.models import Release
release = Release.by_version(self.session, u'mirrored_package2', u'1.0')
self.assertIsInstance(release, Release)
self.assertEqual(release.package.name, u'mirrored_package2')
self.assertEqual(release.version, u'1.0')
def test_by_classifiers(self):
from pyshop.models import Release
releases = Release.by_classifiers(self.session,
[u'Intended Audience :: Developers'])
self.assertIsInstance(releases, list)
releases = [(r.package.name, r.version) for r in releases]
self.assertEqual(releases, [(u'local_package1', u'0.1')])
def test_search_by_author(self):
from pyshop.models import Release
releases = Release.search(self.session, {'author': 'janedoe'}, 'and')
self.assertIsInstance(releases, list)
releases = [(r.package.name, r.version) for r in releases]
self.assertEqual(releases, [(u'mirrored_package1', u'0.1')])
def test_sorted_releases(self):
from pyshop.models import Package
pkg = Package.by_name(self.session, u'mirrored_package1')
self.assertEqual([release.version for release in pkg.sorted_releases],
['0.2', '0.1'])
class ReleaseFileTestCase(ModelTestCase):
def test_by_release(self):
from pyshop.models import ReleaseFile
files = ReleaseFile.by_release(self.session, u'mirrored_package2',
u'1.0')
self.assertIsInstance(files, list)
files = [f.filename for f in files]
self.assertEqual(files, [u'mirrored_package2-1.0.tar.gz'])
def by_filename(self):
from pyshop.models import ReleaseFile
file = ReleaseFile.by_filename(self.session, u'mirrored_package1',
u'mirrored_package1-0.2.egg')
self.assertIsInstance(file, ReleaseFile)
self.assertEqual(file.release.package.name, u'mirrored_package1')
self.assertEqual(file.release.version, u'0.2')
self.assertEqual(file.package_type, u'bdist_egg')
| bsd-3-clause |
dckc/PyCap | docs/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| mit |
rghe/ansible | lib/ansible/modules/network/f5/bigip_profile_persistence_src_addr.py | 3 | 18384 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_profile_persistence_src_addr
short_description: Manage source address persistence profiles
description:
- Manages source address persistence profiles.
version_added: 2.7
options:
name:
description:
- Specifies the name of the profile.
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(source_addr) profile.
match_across_services:
description:
- When C(yes), specifies that all persistent connections from a client IP address that go
to the same virtual IP address also go to the same node.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: bool
match_across_virtuals:
description:
- When C(yes), specifies that all persistent connections from the same client IP address
go to the same node.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: bool
match_across_pools:
description:
- When C(yes), specifies that the system can use any pool that contains this persistence
record.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: bool
hash_algorithm:
description:
- Specifies the algorithm the system uses for hash persistence load balancing. The hash
result is the input for the algorithm.
- When C(default), specifies that the system uses the index of pool members to obtain the
hash result for the input to the algorithm.
- When C(carp), specifies that the system uses the Cache Array Routing Protocol (CARP)
to obtain the hash result for the input to the algorithm.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
choices:
- default
- carp
entry_timeout:
description:
- Specifies the duration of the persistence entries.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
- To specify an indefinite timeout, use the value C(indefinite).
- If specifying a numeric timeout, the value must be between C(1) and C(4294967295).
override_connection_limit:
description:
- When C(yes), specifies that the system allows you to specify that pool member connection
limits will be overridden for persisted clients.
- Per-virtual connection limits remain hard limits and are not overridden.
type: bool
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a profile
bigip_profile_persistence_src_addr:
name: foo
password: secret
server: lb.mydomain.com
state: present
user: admin
hash_algorithm: carp
match_across_services: yes
match_across_virtuals: yes
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: string
sample: Foo is bar
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'hashAlgorithm': 'hash_algorithm',
'matchAcrossPools': 'match_across_pools',
'matchAcrossServices': 'match_across_services',
'matchAcrossVirtuals': 'match_across_virtuals',
'overrideConnectionLimit': 'override_connection_limit',
# This timeout name needs to be overridden because 'timeout' is a connection
# parameter and we don't want that to be the value that is always set here.
'timeout': 'entry_timeout'
}
api_attributes = [
'defaultsFrom',
'hashAlgorithm',
'matchAcrossPools',
'matchAcrossServices',
'matchAcrossVirtuals',
'overrideConnectionLimit',
'timeout',
]
returnables = [
'parent',
'hash_algorithm',
'match_across_pools',
'match_across_services',
'match_across_virtuals',
'override_connection_limit',
'entry_timeout',
]
updatables = [
'hash_algorithm',
'match_across_pools',
'match_across_services',
'match_across_virtuals',
'override_connection_limit',
'entry_timeout',
]
@property
def entry_timeout(self):
if self._values['entry_timeout'] in [None, 'indefinite']:
return self._values['entry_timeout']
timeout = int(self._values['entry_timeout'])
if 1 > timeout > 4294967295:
raise F5ModuleError(
"'timeout' value must be between 1 and 4294967295, or the value 'indefinite'."
)
return timeout
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def match_across_pools(self):
result = flatten_boolean(self._values['match_across_pools'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def match_across_services(self):
result = flatten_boolean(self._values['match_across_services'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def match_across_virtuals(self):
result = flatten_boolean(self._values['match_across_virtuals'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def override_connection_limit(self):
result = flatten_boolean(self._values['override_connection_limit'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def match_across_services(self):
if self._values['match_across_services'] is None:
return None
elif self._values['match_across_services'] == 'enabled':
return 'yes'
return 'no'
@property
def match_across_virtuals(self):
if self._values['match_across_virtuals'] is None:
return None
elif self._values['match_across_virtuals'] == 'enabled':
return 'yes'
return 'no'
@property
def match_across_pools(self):
if self._values['match_across_pools'] is None:
return None
elif self._values['match_across_pools'] == 'enabled':
return 'yes'
return 'no'
@property
def override_connection_limit(self):
if self._values['override_connection_limit'] is None:
return None
elif self._values['override_connection_limit'] == 'enabled':
return 'yes'
return 'no'
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/persistence/source-addr/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/persistence/source-addr/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/persistence/source-addr/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/persistence/source-addr/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/persistence/source-addr/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
match_across_services=dict(type='bool'),
match_across_virtuals=dict(type='bool'),
match_across_pools=dict(type='bool'),
hash_algorithm=dict(choices=['default', 'carp']),
entry_timeout=dict(),
override_connection_limit=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
client = F5RestClient(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
caiocsalvador/whats_the_craic | lib/python3.4/site-packages/django/db/models/sql/where.py | 439 | 8054 | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from django.db.models.sql.datastructures import EmptyResultSet
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR and this node contains an aggregate,
# then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND) or
(not in_negated and self.connector == OR))
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, 'split_having'):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None
where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
class NothingNode(object):
"""
A node that matches nothing.
"""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint(object):
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
# QuerySet was sent
if hasattr(query, 'values'):
if query._db and connection.alias != query._db:
raise ValueError("Can't do subqueries with queries on different DBs.")
# Do not override already existing values.
if query._fields is None:
query = query.values(*self.targets)
else:
query = query._clone()
query = query.query
if query.can_filter():
# If there is no slicing in use, then we can safely drop all ordering
query.clear_ordering(True)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(
self.alias, self.columns, self.targets,
self.query_object)
| mit |
marteinn/django-csvexport | tests/tests.py | 1 | 2370 | import csv
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from .models import Record
from .exporters import (
Record1FieldExporter,
RecordExcludeIdExporter,
RecordFieldAndExcludeExporter,
RecordDynamicExporter
)
class FieldAccessibility(TestCase):
record = None
def setUp(self):
self.record = Record(title="Hello").save()
def test_fields(self):
records = Record.objects.all()
exporter = Record1FieldExporter(queryset=records)
io_file = exporter.to_file(StringIO())
formatted_csv = csv_to_dict(io_file)
size = len(list(enumerate(formatted_csv[0])))
self.assertEquals(size, 1)
def test_exclude(self):
records = Record.objects.all()
exporter = RecordExcludeIdExporter(queryset=records)
io_file = exporter.to_file(StringIO())
formatted_csv = csv_to_dict(io_file)
size = len(list(enumerate(formatted_csv[0])))
self.assertEquals(size, 4)
def test_fields_and_exclude(self):
records = Record.objects.all()
exporter = RecordFieldAndExcludeExporter(queryset=records)
io_file = exporter.to_file(StringIO())
formatted_csv = csv_to_dict(io_file)
size = len(list(enumerate(formatted_csv[0])))
self.assertEquals(size, 2)
def test_dynamic_exporter(self):
records = Record.objects.all()
exporter = RecordDynamicExporter(queryset=records)
io_file = exporter.to_file(StringIO())
formatted_csv = csv_to_dict(io_file)
self.assertTrue("alt_title" in formatted_csv[0])
class ViewTestCase(TestCase):
def setUp(self):
self.record = Record(title="Hello").save()
def test_render(self):
path = reverse('csv_view')
c = Client()
r = c.get(path)
self.assertEquals(r.status_code, 200)
content = r.content.decode("utf-8")
result_dict = csv_to_dict(StringIO(content))
self.assertFalse("id" in result_dict[0])
def csv_to_dict(csvfile):
rows = []
val = csvfile.getvalue().strip("\r\n")
csvfile = StringIO(val)
reader = csv.DictReader(csvfile)
for row in reader:
rows.append(row)
return rows
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.0/Lib/test/test_re.py | 6 | 12182 | import sys
sys.path = ['.'] + sys.path
from test_support import verbose, TestFailed
import re
import sys, os, string, traceback
# Misc tests from Tim Peters' re.doc
if verbose:
print 'Running tests on re.search and re.match'
try:
assert re.search('x*', 'axx').span(0) == (0, 0)
assert re.search('x*', 'axx').span() == (0, 0)
assert re.search('x+', 'axx').span(0) == (1, 3)
assert re.search('x+', 'axx').span() == (1, 3)
assert re.search('x', 'aaa') == None
except:
raise TestFailed, "re.search"
try:
assert re.match('a*', 'xxx').span(0) == (0, 0)
assert re.match('a*', 'xxx').span() == (0, 0)
assert re.match('x*', 'xxxa').span(0) == (0, 3)
assert re.match('x*', 'xxxa').span() == (0, 3)
assert re.match('a+', 'xxx') == None
except:
raise TestFailed, "re.search"
if verbose:
print 'Running tests on re.sub'
try:
assert re.sub("(?i)b+", "x", "bbbb BBBB") == 'x x'
def bump_num(matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
assert re.sub(r'\d+', bump_num, '08.2 -2 23x99y') == '9.3 -3 24x100y'
assert re.sub(r'\d+', bump_num, '08.2 -2 23x99y', 3) == '9.3 -3 23x99y'
assert re.sub('.', lambda m: r"\n", 'x') == '\\n'
assert re.sub('.', r"\n", 'x') == '\n'
s = r"\1\1"
assert re.sub('(.)', s, 'x') == 'xx'
assert re.sub('(.)', re.escape(s), 'x') == s
assert re.sub('(.)', lambda m: s, 'x') == s
assert re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx') == 'xxxx'
assert re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx') == 'xxxx'
assert re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx') == 'xxxx'
assert re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx') == 'xxxx'
assert re.sub('a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a') == '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D'
assert re.sub('a', '\t\n\v\r\f\a', 'a') == '\t\n\v\r\f\a'
assert re.sub('a', '\t\n\v\r\f\a', 'a') == (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))
assert re.sub('^\s*', 'X', 'test') == 'Xtest'
except AssertionError:
raise TestFailed, "re.sub"
try:
assert re.sub('a', 'b', 'aaaaa') == 'bbbbb'
assert re.sub('a', 'b', 'aaaaa', 1) == 'baaaa'
except AssertionError:
raise TestFailed, "qualified re.sub"
if verbose:
print 'Running tests on symbolic references'
try:
re.sub('(?P<a>x)', '\g<a', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<a a>', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<1a1>', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)', '\g<ab>', 'xx')
except IndexError, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
try:
re.sub('(?P<a>x)|(?P<b>y)', '\\2', 'xx')
except re.error, reason:
pass
else:
raise TestFailed, "symbolic reference"
if verbose:
print 'Running tests on re.subn'
try:
assert re.subn("(?i)b+", "x", "bbbb BBBB") == ('x x', 2)
assert re.subn("b+", "x", "bbbb BBBB") == ('x BBBB', 1)
assert re.subn("b+", "x", "xyz") == ('xyz', 0)
assert re.subn("b*", "x", "xyz") == ('xxxyxzx', 4)
assert re.subn("b*", "x", "xyz", 2) == ('xxxyz', 2)
except AssertionError:
raise TestFailed, "re.subn"
if verbose:
print 'Running tests on re.split'
try:
assert re.split(":", ":a:b::c") == ['', 'a', 'b', '', 'c']
assert re.split(":*", ":a:b::c") == ['', 'a', 'b', 'c']
assert re.split("(:*)", ":a:b::c") == ['', ':', 'a', ':', 'b', '::', 'c']
assert re.split("(?::*)", ":a:b::c") == ['', 'a', 'b', 'c']
assert re.split("(:)*", ":a:b::c") == ['', ':', 'a', ':', 'b', ':', 'c']
assert re.split("([b:]+)", ":a:b::c") == ['', ':', 'a', ':b::', 'c']
assert re.split("(b)|(:+)", ":a:b::c") == \
['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c']
assert re.split("(?:b)|(?::+)", ":a:b::c") == ['', 'a', '', '', 'c']
except AssertionError:
raise TestFailed, "re.split"
try:
assert re.split(":", ":a:b::c", 2) == ['', 'a', 'b::c']
assert re.split(':', 'a:b:c:d', 2) == ['a', 'b', 'c:d']
assert re.split("(:)", ":a:b::c", 2) == ['', ':', 'a', ':', 'b::c']
assert re.split("(:*)", ":a:b::c", 2) == ['', ':', 'a', ':', 'b::c']
except AssertionError:
raise TestFailed, "qualified re.split"
if verbose:
print "Running tests on re.findall"
try:
assert re.findall(":+", "abc") == []
assert re.findall(":+", "a:b::c:::d") == [":", "::", ":::"]
assert re.findall("(:+)", "a:b::c:::d") == [":", "::", ":::"]
assert re.findall("(:)(:*)", "a:b::c:::d") == [(":", ""),
(":", ":"),
(":", "::")]
except AssertionError:
raise TestFailed, "re.findall"
if verbose:
print "Running tests on re.match"
try:
# No groups at all
m = re.match('a', 'a') ; assert m.groups() == ()
# A single group
m = re.match('(a)', 'a') ; assert m.groups() == ('a',)
pat = re.compile('((a)|(b))(c)?')
assert pat.match('a').groups() == ('a', 'a', None, None)
assert pat.match('b').groups() == ('b', None, 'b', None)
assert pat.match('ac').groups() == ('a', 'a', None, 'c')
assert pat.match('bc').groups() == ('b', None, 'b', 'c')
assert pat.match('bc').groups("") == ('b', "", 'b', 'c')
except AssertionError:
raise TestFailed, "match .groups() method"
try:
# A single group
m = re.match('(a)', 'a')
assert m.group(0) == 'a' ; assert m.group(0) == 'a'
assert m.group(1) == 'a' ; assert m.group(1, 1) == ('a', 'a')
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
assert pat.match('a').group(1, 2, 3) == ('a', None, None)
assert pat.match('b').group('a1', 'b2', 'c3') == (None, 'b', None)
assert pat.match('ac').group(1, 'b2', 3) == ('a', None, 'c')
except AssertionError:
raise TestFailed, "match .group() method"
if verbose:
print "Running tests on re.escape"
try:
p=""
for i in range(0, 256):
p = p + chr(i)
assert re.match(re.escape(chr(i)), chr(i)) != None
assert re.match(re.escape(chr(i)), chr(i)).span() == (0,1)
pat=re.compile( re.escape(p) )
assert pat.match(p) != None
assert pat.match(p).span() == (0,256)
except AssertionError:
raise TestFailed, "re.escape"
if verbose:
print 'Pickling a RegexObject instance'
import pickle
pat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(pat)
pat = pickle.loads(s)
try:
assert re.I == re.IGNORECASE
assert re.L == re.LOCALE
assert re.M == re.MULTILINE
assert re.S == re.DOTALL
assert re.X == re.VERBOSE
except AssertionError:
raise TestFailed, 're module constants'
for flags in [re.I, re.M, re.X, re.S, re.L]:
try:
r = re.compile('^pattern$', flags)
except:
print 'Exception raised on flag', flags
if verbose:
print 'Test engine limitations'
# Try nasty case that overflows the straightforward recursive
# implementation of repeated groups.
try:
assert re.match('(x)*', 50000*'x').span() == (0, 50000)
except RuntimeError, v:
print v
from re_tests import *
if verbose:
print 'Running re_tests test suite'
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError, ('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print '=== Syntax error:', t
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print '*** Unexpected error ***', t
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error, msg:
print '=== Unexpected exception', t, repr(msg)
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print '=== Succeeded incorrectly', t
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print '=== grouping error', t,
print repr(repl) + ' should be ' + repr(expected)
else:
print '=== Failed incorrectly', t
# Try the match on a unicode string, and check that it
# still succeeds.
result = obj.search(unicode(s, "latin-1"))
if result == None:
print '=== Fails on unicode match', t
# Try the match on a unicode pattern, and check that it
# still succeeds.
obj=re.compile(unicode(pattern, "latin-1"))
result = obj.search(s)
if result == None:
print '=== Fails on unicode pattern match', t
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result != None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result == None:
print '=== Failed on range-limited match', t
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result == None:
print '=== Fails on case-insensitive match', t
# Try the match with LOCALE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result == None:
print '=== Fails on locale-sensitive match', t
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result == None:
print '=== Fails on unicode-sensitive match', t
| mit |
99cloud/keystone_register | openstack_dashboard/dashboards/project/instances/tests.py | 6 | 55321 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from django import http
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.datastructures import SortedDict
from mox import IsA, IgnoreArg
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from .tables import LaunchLink
from .tabs import InstanceDetailTabs
from .workflows import LaunchInstance
INDEX_URL = reverse('horizon:project:instances:index')
class InstanceTests(test.TestCase):
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'tenant_absolute_limits')})
def test_index(self):
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:instances:index'))
self.assertTemplateUsed(res,
'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('server_list',
'tenant_absolute_limits')})
def test_index_server_list_exception(self):
api.nova.server_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:instances:index'))
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'flavor_get',
'tenant_absolute_limits')})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:instances:index'))
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'flavor_get',
'tenant_absolute_limits')})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
# UUIDs generated using indexes are unlikely to match
# any of existing flavor ids and are guaranteed to be deterministic.
for i, server in enumerate(servers):
server.flavor['id'] = str(uuid.UUID(int=i))
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:instances:index'))
instances = res.context['instances_table'].data
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',)})
def test_terminate_instance(self):
server = self.servers.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',)})
def test_terminate_instance_exception(self):
server = self.servers.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.server_delete(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',)})
def test_pause_instance(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_pause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',)})
def test_pause_instance_exception(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_pause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',)})
def test_unpause_instance(self):
server = self.servers.first()
server.status = "PAUSED"
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_unpause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',)})
def test_unpause_instance_exception(self):
server = self.servers.first()
server.status = "PAUSED"
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_unpause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',)})
def test_reboot_instance(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
api.nova.REBOOT_HARD)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',)})
def test_reboot_instance_exception(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
api.nova.REBOOT_HARD) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',)})
def test_soft_reboot_instance(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
api.nova.REBOOT_SOFT)
self.mox.ReplayAll()
formData = {'action': 'instances__soft_reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',)})
def test_suspend_instance(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',)})
def test_suspend_instance_exception(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',)})
def test_resume_instance(self):
server = self.servers.first()
server.status = "SUSPENDED"
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_resume(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',)})
def test_resume_instance_exception(self):
server = self.servers.first()
server.status = "SUSPENDED"
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.server_resume(IsA(http.HttpRequest),
unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get",
"server_security_groups")})
def test_instance_details_volumes(self):
server = self.servers.first()
volumes = [self.volumes.list()[1]]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.nova.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.first())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get",
"server_security_groups")})
def test_instance_details_volume_sorting(self):
server = self.servers.first()
volumes = self.volumes.list()[1:3]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.nova.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.first())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertEquals(res.context['instance'].volumes[0].device,
"/dev/hda")
self.assertEquals(res.context['instance'].volumes[1].device,
"/dev/hdk")
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get",
"server_security_groups",)})
def test_instance_details_metadata(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.nova.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
tg = InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("overview").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "<dd>keyName</dd>", 1)
self.assertContains(res, "<dt>someMetaLabel</dt>", 1)
self.assertContains(res, "<dd>someMetaData</dd>", 1)
self.assertContains(res, "<dt>some<b>html</b>label</dt>",
1)
self.assertContains(res, "<dd><!--</dd>", 1)
self.assertContains(res, "<dt>empty</dt>", 1)
self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
@test.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log(self):
server = self.servers.first()
CONSOLE_OUTPUT = 'output'
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndReturn(CONSOLE_OUTPUT)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertNoMessages()
self.assertIsInstance(res, http.HttpResponse)
self.assertContains(res, CONSOLE_OUTPUT)
@test.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log_exception(self):
server = self.servers.first()
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_vnc(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/vncserver'
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_OUTPUT + '&title=%s(1)' % server.name
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_vnc_console',)})
def test_instance_vnc_exception(self):
server = self.servers.first()
api.nova.server_vnc_console(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_spice(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/spiceserver'
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_OUTPUT + '&title=%s(1)' % server.name
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_spice_console',)})
def test_instance_spice_exception(self):
server = self.servers.first()
api.nova.server_spice_console(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',
'snapshot_create',
'server_list',
'flavor_list',
'server_delete'),
cinder: ('volume_snapshot_list',),
api.glance: ('snapshot_list_detailed',
'image_list_detailed')})
def test_create_instance_snapshot(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.snapshot_create(IsA(http.HttpRequest),
server.id,
"snapshot1").AndReturn(self.snapshots.first())
api.glance.snapshot_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([[], False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([[], False])
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
self.mox.ReplayAll()
formData = {'instance_id': server.id,
'method': 'CreateSnapshot',
'name': 'snapshot1'}
url = reverse('horizon:project:images_and_snapshots:snapshots:create',
args=[server.id])
redir_url = reverse('horizon:project:images_and_snapshots:index')
res = self.client.post(url, formData)
self.assertRedirects(res, redir_url)
instance_update_get_stubs = {
api.nova: ('server_get',
'security_group_list',
'server_security_groups',)}
@test.create_stubs(instance_update_get_stubs)
def test_instance_update_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/instances/update.html')
@test.create_stubs(instance_update_get_stubs)
def test_instance_update_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def _instance_update_post(self, server_id, server_name, secgroups):
formData = {'name': server_name,
'default_role': 'member',
'role_member': secgroups}
url = reverse('horizon:project:instances:update',
args=[server_id])
return self.client.post(url, formData)
instance_update_post_stubs = {
api.nova: ('server_get', 'server_update',
'security_group_list',
'server_security_groups',
'server_add_security_group',
'server_remove_security_group')}
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post(self):
server = self.servers.first()
secgroups = self.security_groups.list()[:3]
new_name = 'manuel'
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(secgroups)
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest),
server.id,
new_name).AndReturn(server)
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
self.mox.ReplayAll()
res = self._instance_update_post(server.id, new_name, [])
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_secgroup_post(self):
server = self.servers.first()
secgroups = self.security_groups.list()[:3]
server_groups = [secgroups[0], secgroups[1]]
wanted_groups = [secgroups[1].name, secgroups[2].name]
expect_add = secgroups[2].name
expect_rm = secgroups[0].name
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(secgroups)
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn(server_groups)
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn(server_groups)
api.nova.server_add_security_group(IsA(http.HttpRequest),
server.id,
expect_add).AndReturn(server)
api.nova.server_remove_security_group(IsA(http.HttpRequest),
server.id,
expect_rm).AndReturn(server)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, wanted_groups)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest), server.id, server.name) \
.AndRaise(self.exceptions.nova)
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post_secgroup_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.nova.server_security_groups(IsA(http.HttpRequest),
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('flavor_list',
'keypair_list',
'security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
quotas: ('tenant_quota_usages',),
api.quantum: ('network_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get(self):
quota_usages = self.quota_usages.first()
image = self.images.first()
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
params = urlencode({"source_type": "image_id",
"source_id": image.id})
res = self.client.get("%s?%s" % (url, params))
workflow = res.context['workflow']
self.assertTemplateUsed(res,
'project/instances/launch.html')
self.assertEqual(res.context['workflow'].name, LaunchInstance.name)
step = workflow.get_step("setinstancedetailsaction")
self.assertEqual(step.action.initial['image_id'], image.id)
self.assertQuerysetEqual(workflow.steps,
['<SetInstanceDetails: setinstancedetailsaction>',
'<SetAccessControls: setaccesscontrolsaction>',
'<SetNetwork: setnetworkaction>',
'<VolumeOptions: volumeoptionsaction>',
'<PostCreationStep: customizeaction>'])
@test.create_stubs({api.glance: ('image_list_detailed',),
api.quantum: ('network_list',),
quotas: ('tenant_quota_usages',),
api.nova: ('flavor_list',
'keypair_list',
'security_group_list',
'server_create',),
cinder: ('volume_list',
'volume_snapshot_list',)})
def test_launch_instance_post(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping,
nics=nics,
instance_count=IsA(int))
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.quantum: ('network_list',),
quotas: ('tenant_quota_usages',),
api.nova: ('flavor_list',
'keypair_list',
'security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',)})
def test_launch_instance_post_no_images_available(self):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
quotas.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn({})
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([[], False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': '',
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertFormErrors(res, 1, 'There are no image sources available; '
'you must first create an image before '
'attempting to launch an instance.')
self.assertTemplateUsed(res,
'project/instances/launch.html')
@test.create_stubs({api.glance: ('image_list_detailed',),
api.quantum: ('network_list',),
quotas: ('tenant_quota_usages',),
cinder: ('volume_list',
'volume_snapshot_list',),
api.nova: ('flavor_list',
'keypair_list',
'security_group_list',)})
def test_launch_flavorlist_error(self):
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(self.quota_usages.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/instances/launch.html')
@test.create_stubs({api.glance: ('image_list_detailed',),
api.quantum: ('network_list',),
api.nova: ('flavor_list',
'keypair_list',
'security_group_list',
'server_create',),
cinder: ('volume_list',
'volume_snapshot_list',)})
def test_launch_form_keystone_exception(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
customization_script = 'userData'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
cinder.volume_list(IgnoreArg()).AndReturn(self.volumes.list())
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
None,
nics=nics,
instance_count=IsA(int)) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': '',
'network': self.networks.first().id,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.quantum: ('network_list',),
quotas: ('tenant_quota_usages',),
api.nova: ('flavor_list',
'keypair_list',
'security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',)})
def test_launch_form_instance_count_error(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(self.quota_usages.first())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 0}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertContains(res, "greater than or equal to 1")
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'tenant_absolute_limits',)})
def test_launch_button_disabled_when_quota_exceeded(self):
limits = self.limits['absolute']
limits['totalInstancesUsed'] = limits['maxTotalInstances']
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
launch = LaunchLink()
url = launch.get_link_url()
classes = list(launch.get_default_classes()) + list(launch.classes)
link_name = "%s (%s)" % (unicode(launch.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' id='instances__action_launch' " \
"title='%s' class='%s disabled'>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
res = self.client.get(INDEX_URL)
self.assertContains(res, expected_string, html=True,
msg_prefix="The launch button is not disabled")
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'tenant_absolute_limits')})
def test_index_options_after_migrate(self):
server = self.servers.first()
server.status = "VERIFY_RESIZE"
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
@test.create_stubs({api.nova: ('flavor_list',
'keypair_list',
'security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
quotas: ('tenant_quota_usages',),
api.quantum: ('network_list',),
api.glance: ('image_list_detailed',)})
def test_select_default_keypair_if_only_one(self):
keypair = self.keypairs.first()
quota_usages = self.quota_usages.first()
image = self.images.first()
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn([keypair])
api.nova.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertContains(res, "<option selected='selected' value='%(key)s'>"
"%(key)s</option>" % {'key': keypair.name},
html=True,
msg_prefix="The default keypair was not selected.")
| apache-2.0 |
sxjscience/mxnet | python/mxnet/io/__init__.py | 11 | 1296 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
""" Data iterators for common data formats and utility functions."""
from . import io
from .io import CSVIter, DataBatch, DataDesc, DataIter, ImageDetRecordIter, ImageRecordInt8Iter, ImageRecordIter,\
ImageRecordIter_v1, ImageRecordUInt8Iter, ImageRecordUInt8Iter_v1, LibSVMIter, MNISTIter, MXDataIter, NDArrayIter,\
PrefetchingIter, ResizeIter
from . import utils
from .utils import _init_data, _getdata_by_idx, _has_instance
| apache-2.0 |
GitHublong/hue | desktop/core/ext-py/lxml/src/lxml/tests/test_relaxng.py | 29 | 5062 | # -*- coding: utf-8 -*-
"""
Test cases related to RelaxNG parsing and validation
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, BytesIO, _bytes, HelperTestCase, fileInTestDir
from common_imports import doctest, make_doctest
class ETreeRelaxNGTestCase(HelperTestCase):
def test_relaxng(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
schema = etree.RelaxNG(schema)
self.assert_(schema.validate(tree_valid))
self.assert_(not schema.validate(tree_invalid))
def test_relaxng_stringio(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema_file = BytesIO('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
schema = etree.RelaxNG(file=schema_file)
self.assert_(schema.validate(tree_valid))
self.assert_(not schema.validate(tree_invalid))
def test_relaxng_elementtree_error(self):
self.assertRaises(ValueError, etree.RelaxNG, etree.ElementTree())
def test_relaxng_error(self):
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
schema = etree.RelaxNG(schema)
self.assert_(not schema.validate(tree_invalid))
errors = schema.error_log
self.assert_([ log for log in errors
if log.level_name == "ERROR" ])
self.assert_([ log for log in errors
if "not expect" in log.message ])
def test_relaxng_invalid_schema(self):
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b" />
</zeroOrMore>
</element>
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_invalid_schema2(self):
schema = self.parse('''\
<grammar xmlns="http://relaxng.org/ns/structure/1.0" />
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_invalid_schema3(self):
schema = self.parse('''\
<grammar xmlns="http://relaxng.org/ns/structure/1.0">
<define name="test">
<element name="test"/>
</define>
</grammar>
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_invalid_schema4(self):
# segfault
schema = self.parse('''\
<element name="a" xmlns="mynamespace" />
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_include(self):
# this will only work if we access the file through path or
# file object..
f = open(fileInTestDir('test1.rng'), 'rb')
schema = etree.RelaxNG(file=f)
def test_relaxng_shortcut(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
self.assert_(tree_valid.relaxng(schema))
self.assert_(not tree_invalid.relaxng(schema))
def test_multiple_elementrees(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
schema = etree.RelaxNG( self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<element name="b">
<text />
</element>
<element name="c">
<text />
</element>
</element>
''') )
self.assert_(schema.validate(tree))
self.assert_(schema.validate(tree))
schema = etree.RelaxNG( self.parse('''\
<element name="b" xmlns="http://relaxng.org/ns/structure/1.0">
<text />
</element>
''') )
c_tree = etree.ElementTree(tree.getroot()[1])
self.assertEqual(self._rootstring(c_tree), _bytes('<c>C</c>'))
self.assert_(not schema.validate(c_tree))
b_tree = etree.ElementTree(tree.getroot()[0])
self.assertEqual(self._rootstring(b_tree), _bytes('<b>B</b>'))
self.assert_(schema.validate(b_tree))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeRelaxNGTestCase)])
suite.addTests(
[make_doctest('../../../doc/validation.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| apache-2.0 |
docusign/docusign-python-client | docusign_esign/models/bulk_recipients_response.py | 1 | 9575 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BulkRecipientsResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bulk_recipients': 'list[BulkRecipient]',
'end_position': 'str',
'next_uri': 'str',
'previous_uri': 'str',
'result_set_size': 'str',
'start_position': 'str',
'total_set_size': 'str'
}
attribute_map = {
'bulk_recipients': 'bulkRecipients',
'end_position': 'endPosition',
'next_uri': 'nextUri',
'previous_uri': 'previousUri',
'result_set_size': 'resultSetSize',
'start_position': 'startPosition',
'total_set_size': 'totalSetSize'
}
def __init__(self, bulk_recipients=None, end_position=None, next_uri=None, previous_uri=None, result_set_size=None, start_position=None, total_set_size=None): # noqa: E501
"""BulkRecipientsResponse - a model defined in Swagger""" # noqa: E501
self._bulk_recipients = None
self._end_position = None
self._next_uri = None
self._previous_uri = None
self._result_set_size = None
self._start_position = None
self._total_set_size = None
self.discriminator = None
if bulk_recipients is not None:
self.bulk_recipients = bulk_recipients
if end_position is not None:
self.end_position = end_position
if next_uri is not None:
self.next_uri = next_uri
if previous_uri is not None:
self.previous_uri = previous_uri
if result_set_size is not None:
self.result_set_size = result_set_size
if start_position is not None:
self.start_position = start_position
if total_set_size is not None:
self.total_set_size = total_set_size
@property
def bulk_recipients(self):
"""Gets the bulk_recipients of this BulkRecipientsResponse. # noqa: E501
A complex type containing information about the bulk recipients in the response. # noqa: E501
:return: The bulk_recipients of this BulkRecipientsResponse. # noqa: E501
:rtype: list[BulkRecipient]
"""
return self._bulk_recipients
@bulk_recipients.setter
def bulk_recipients(self, bulk_recipients):
"""Sets the bulk_recipients of this BulkRecipientsResponse.
A complex type containing information about the bulk recipients in the response. # noqa: E501
:param bulk_recipients: The bulk_recipients of this BulkRecipientsResponse. # noqa: E501
:type: list[BulkRecipient]
"""
self._bulk_recipients = bulk_recipients
@property
def end_position(self):
"""Gets the end_position of this BulkRecipientsResponse. # noqa: E501
The last position in the result set. # noqa: E501
:return: The end_position of this BulkRecipientsResponse. # noqa: E501
:rtype: str
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""Sets the end_position of this BulkRecipientsResponse.
The last position in the result set. # noqa: E501
:param end_position: The end_position of this BulkRecipientsResponse. # noqa: E501
:type: str
"""
self._end_position = end_position
@property
def next_uri(self):
"""Gets the next_uri of this BulkRecipientsResponse. # noqa: E501
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:return: The next_uri of this BulkRecipientsResponse. # noqa: E501
:rtype: str
"""
return self._next_uri
@next_uri.setter
def next_uri(self, next_uri):
"""Sets the next_uri of this BulkRecipientsResponse.
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:param next_uri: The next_uri of this BulkRecipientsResponse. # noqa: E501
:type: str
"""
self._next_uri = next_uri
@property
def previous_uri(self):
"""Gets the previous_uri of this BulkRecipientsResponse. # noqa: E501
The postal code for the billing address. # noqa: E501
:return: The previous_uri of this BulkRecipientsResponse. # noqa: E501
:rtype: str
"""
return self._previous_uri
@previous_uri.setter
def previous_uri(self, previous_uri):
"""Sets the previous_uri of this BulkRecipientsResponse.
The postal code for the billing address. # noqa: E501
:param previous_uri: The previous_uri of this BulkRecipientsResponse. # noqa: E501
:type: str
"""
self._previous_uri = previous_uri
@property
def result_set_size(self):
"""Gets the result_set_size of this BulkRecipientsResponse. # noqa: E501
The number of results returned in this response. # noqa: E501
:return: The result_set_size of this BulkRecipientsResponse. # noqa: E501
:rtype: str
"""
return self._result_set_size
@result_set_size.setter
def result_set_size(self, result_set_size):
"""Sets the result_set_size of this BulkRecipientsResponse.
The number of results returned in this response. # noqa: E501
:param result_set_size: The result_set_size of this BulkRecipientsResponse. # noqa: E501
:type: str
"""
self._result_set_size = result_set_size
@property
def start_position(self):
"""Gets the start_position of this BulkRecipientsResponse. # noqa: E501
Starting position of the current result set. # noqa: E501
:return: The start_position of this BulkRecipientsResponse. # noqa: E501
:rtype: str
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""Sets the start_position of this BulkRecipientsResponse.
Starting position of the current result set. # noqa: E501
:param start_position: The start_position of this BulkRecipientsResponse. # noqa: E501
:type: str
"""
self._start_position = start_position
@property
def total_set_size(self):
"""Gets the total_set_size of this BulkRecipientsResponse. # noqa: E501
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:return: The total_set_size of this BulkRecipientsResponse. # noqa: E501
:rtype: str
"""
return self._total_set_size
@total_set_size.setter
def total_set_size(self, total_set_size):
"""Sets the total_set_size of this BulkRecipientsResponse.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:param total_set_size: The total_set_size of this BulkRecipientsResponse. # noqa: E501
:type: str
"""
self._total_set_size = total_set_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BulkRecipientsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BulkRecipientsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
kotsios5/openclassifieds2 | oc/vendor/bitpay/vendor/bitpay/php-client/docs/conf.py | 12 | 8254 | # -*- coding: utf-8 -*-
#
# BitPay PHP Library documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 22 13:29:09 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BitPay PHP Library'
copyright = u'2014, BitPay Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BitPayPHPLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'BitPayPHPLibrary.tex', u'BitPay PHP Library Documentation',
u'BitPay Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bitpayphplibrary', u'BitPay PHP Library Documentation',
[u'BitPay Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BitPayPHPLibrary', u'BitPay PHP Library Documentation',
u'BitPay Developers', 'BitPayPHPLibrary', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
antsmc2/mics | survey/tests/views/test_aggregates_view.py | 2 | 1066 | from django.test.client import Client
from django.contrib.auth.models import User
from survey.views.aggregates import *
from survey.tests.base_test import BaseTest
class AggregatesPageTest(BaseTest):
def setUp(self):
self.client = Client()
user_without_permission = User.objects.create_user(username='useless', email='rajni@kant.com', password='I_Suck')
raj = self.assign_permission_to(User.objects.create_user('Rajni', 'rajni@kant.com', 'I_Rock'), 'can_view_aggregates')
self.client.login(username='Rajni', password='I_Rock')
def test_is_valid_params(self):
self.assertTrue(is_valid({'location':'1', 'batch':'2'}))
def test_empty_location_is_also_valid(self):
self.assertTrue(is_valid({'location':'', 'batch':'2'}))
def test_invalid(self):
self.assertFalse(is_valid({'batch':'2'}))
self.assertFalse(is_valid({'location':'2', 'batch':'NOT_A_DIGIT'}))
self.assertFalse(is_valid({'location':'NOT_A_DIGIT', 'batch':'1'}))
self.assertFalse(is_valid({'location':'1'}))
| bsd-3-clause |
disruptek/boto | tests/unit/rds/test_snapshot.py | 121 | 14445 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.rds import RDSConnection
from boto.rds.dbsnapshot import DBSnapshot
from boto.rds import DBInstance
class TestDescribeDBSnapshots(AWSMockServiceTestCase):
connection_class = RDSConnection
def default_body(self):
return """
<DescribeDBSnapshotsResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<DescribeDBSnapshotsResult>
<DBSnapshots>
<DBSnapshot>
<Port>3306</Port>
<SnapshotCreateTime>2011-05-23T06:29:03.483Z</SnapshotCreateTime>
<Engine>mysql</Engine>
<Status>available</Status>
<AvailabilityZone>us-east-1a</AvailabilityZone>
<LicenseModel>general-public-license</LicenseModel>
<InstanceCreateTime>2011-05-23T06:06:43.110Z</InstanceCreateTime>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<EngineVersion>5.1.50</EngineVersion>
<DBSnapshotIdentifier>mydbsnapshot</DBSnapshotIdentifier>
<SnapshotType>manual</SnapshotType>
<MasterUsername>master</MasterUsername>
<OptionGroupName>myoptiongroupname</OptionGroupName>
<Iops>1000</Iops>
<PercentProgress>100</PercentProgress>
<SourceRegion>eu-west-1</SourceRegion>
<VpcId>myvpc</VpcId>
</DBSnapshot>
<DBSnapshot>
<Port>3306</Port>
<SnapshotCreateTime>2011-03-11T07:20:24.082Z</SnapshotCreateTime>
<Engine>mysql</Engine>
<Status>available</Status>
<AvailabilityZone>us-east-1a</AvailabilityZone>
<LicenseModel>general-public-license</LicenseModel>
<InstanceCreateTime>2010-08-04T23:27:36.420Z</InstanceCreateTime>
<AllocatedStorage>50</AllocatedStorage>
<DBInstanceIdentifier>mydbinstance</DBInstanceIdentifier>
<EngineVersion>5.1.49</EngineVersion>
<DBSnapshotIdentifier>mysnapshot1</DBSnapshotIdentifier>
<SnapshotType>manual</SnapshotType>
<MasterUsername>sa</MasterUsername>
<OptionGroupName>myoptiongroupname</OptionGroupName>
<Iops>1000</Iops>
</DBSnapshot>
<DBSnapshot>
<Port>3306</Port>
<SnapshotCreateTime>2012-04-02T00:01:24.082Z</SnapshotCreateTime>
<Engine>mysql</Engine>
<Status>available</Status>
<AvailabilityZone>us-east-1d</AvailabilityZone>
<LicenseModel>general-public-license</LicenseModel>
<InstanceCreateTime>2010-07-16T00:06:59.107Z</InstanceCreateTime>
<AllocatedStorage>60</AllocatedStorage>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<EngineVersion>5.1.47</EngineVersion>
<DBSnapshotIdentifier>rds:simcoprod01-2012-04-02-00-01</DBSnapshotIdentifier>
<SnapshotType>automated</SnapshotType>
<MasterUsername>master</MasterUsername>
<OptionGroupName>myoptiongroupname</OptionGroupName>
<Iops>1000</Iops>
</DBSnapshot>
</DBSnapshots>
</DescribeDBSnapshotsResult>
<ResponseMetadata>
<RequestId>c4191173-8506-11e0-90aa-eb648410240d</RequestId>
</ResponseMetadata>
</DescribeDBSnapshotsResponse>
"""
def test_describe_dbinstances_by_instance(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_dbsnapshots(instance_id='simcoprod01')
self.assert_request_parameters({
'Action': 'DescribeDBSnapshots',
'DBInstanceIdentifier': 'simcoprod01'
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 3)
self.assertIsInstance(response[0], DBSnapshot)
self.assertEqual(response[0].id, 'mydbsnapshot')
self.assertEqual(response[0].status, 'available')
self.assertEqual(response[0].instance_id, 'simcoprod01')
self.assertEqual(response[0].engine_version, '5.1.50')
self.assertEqual(response[0].license_model, 'general-public-license')
self.assertEqual(response[0].iops, 1000)
self.assertEqual(response[0].option_group_name, 'myoptiongroupname')
self.assertEqual(response[0].percent_progress, 100)
self.assertEqual(response[0].snapshot_type, 'manual')
self.assertEqual(response[0].source_region, 'eu-west-1')
self.assertEqual(response[0].vpc_id, 'myvpc')
class TestCreateDBSnapshot(AWSMockServiceTestCase):
connection_class = RDSConnection
def default_body(self):
return """
<CreateDBSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<CreateDBSnapshotResult>
<DBSnapshot>
<Port>3306</Port>
<Engine>mysql</Engine>
<Status>creating</Status>
<AvailabilityZone>us-east-1a</AvailabilityZone>
<LicenseModel>general-public-license</LicenseModel>
<InstanceCreateTime>2011-05-23T06:06:43.110Z</InstanceCreateTime>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<EngineVersion>5.1.50</EngineVersion>
<DBSnapshotIdentifier>mydbsnapshot</DBSnapshotIdentifier>
<SnapshotType>manual</SnapshotType>
<MasterUsername>master</MasterUsername>
</DBSnapshot>
</CreateDBSnapshotResult>
<ResponseMetadata>
<RequestId>c4181d1d-8505-11e0-90aa-eb648410240d</RequestId>
</ResponseMetadata>
</CreateDBSnapshotResponse>
"""
def test_create_dbinstance(self):
self.set_http_response(status_code=200)
response = self.service_connection.create_dbsnapshot('mydbsnapshot', 'simcoprod01')
self.assert_request_parameters({
'Action': 'CreateDBSnapshot',
'DBSnapshotIdentifier': 'mydbsnapshot',
'DBInstanceIdentifier': 'simcoprod01'
}, ignore_params_values=['Version'])
self.assertIsInstance(response, DBSnapshot)
self.assertEqual(response.id, 'mydbsnapshot')
self.assertEqual(response.instance_id, 'simcoprod01')
self.assertEqual(response.status, 'creating')
class TestCopyDBSnapshot(AWSMockServiceTestCase):
connection_class = RDSConnection
def default_body(self):
return """
<CopyDBSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<CopyDBSnapshotResult>
<DBSnapshot>
<Port>3306</Port>
<Engine>mysql</Engine>
<Status>available</Status>
<AvailabilityZone>us-east-1a</AvailabilityZone>
<LicenseModel>general-public-license</LicenseModel>
<InstanceCreateTime>2011-05-23T06:06:43.110Z</InstanceCreateTime>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<EngineVersion>5.1.50</EngineVersion>
<DBSnapshotIdentifier>mycopieddbsnapshot</DBSnapshotIdentifier>
<SnapshotType>manual</SnapshotType>
<MasterUsername>master</MasterUsername>
</DBSnapshot>
</CopyDBSnapshotResult>
<ResponseMetadata>
<RequestId>c4181d1d-8505-11e0-90aa-eb648410240d</RequestId>
</ResponseMetadata>
</CopyDBSnapshotResponse>
"""
def test_copy_dbinstance(self):
self.set_http_response(status_code=200)
response = self.service_connection.copy_dbsnapshot('myautomaticdbsnapshot', 'mycopieddbsnapshot')
self.assert_request_parameters({
'Action': 'CopyDBSnapshot',
'SourceDBSnapshotIdentifier': 'myautomaticdbsnapshot',
'TargetDBSnapshotIdentifier': 'mycopieddbsnapshot'
}, ignore_params_values=['Version'])
self.assertIsInstance(response, DBSnapshot)
self.assertEqual(response.id, 'mycopieddbsnapshot')
self.assertEqual(response.status, 'available')
class TestDeleteDBSnapshot(AWSMockServiceTestCase):
connection_class = RDSConnection
def default_body(self):
return """
<DeleteDBSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<DeleteDBSnapshotResult>
<DBSnapshot>
<Port>3306</Port>
<SnapshotCreateTime>2011-03-11T07:20:24.082Z</SnapshotCreateTime>
<Engine>mysql</Engine>
<Status>deleted</Status>
<AvailabilityZone>us-east-1d</AvailabilityZone>
<LicenseModel>general-public-license</LicenseModel>
<InstanceCreateTime>2010-07-16T00:06:59.107Z</InstanceCreateTime>
<AllocatedStorage>60</AllocatedStorage>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<EngineVersion>5.1.47</EngineVersion>
<DBSnapshotIdentifier>mysnapshot2</DBSnapshotIdentifier>
<SnapshotType>manual</SnapshotType>
<MasterUsername>master</MasterUsername>
</DBSnapshot>
</DeleteDBSnapshotResult>
<ResponseMetadata>
<RequestId>627a43a1-8507-11e0-bd9b-a7b1ece36d51</RequestId>
</ResponseMetadata>
</DeleteDBSnapshotResponse>
"""
def test_delete_dbinstance(self):
self.set_http_response(status_code=200)
response = self.service_connection.delete_dbsnapshot('mysnapshot2')
self.assert_request_parameters({
'Action': 'DeleteDBSnapshot',
'DBSnapshotIdentifier': 'mysnapshot2'
}, ignore_params_values=['Version'])
self.assertIsInstance(response, DBSnapshot)
self.assertEqual(response.id, 'mysnapshot2')
self.assertEqual(response.status, 'deleted')
class TestRestoreDBInstanceFromDBSnapshot(AWSMockServiceTestCase):
connection_class = RDSConnection
def default_body(self):
return """
<RestoreDBInstanceFromDBSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<RestoreDBInstanceFromDBSnapshotResult>
<DBInstance>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues/>
<BackupRetentionPeriod>1</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<LicenseModel>general-public-license</LicenseModel>
<DBInstanceStatus>creating</DBInstanceStatus>
<EngineVersion>5.1.50</EngineVersion>
<DBInstanceIdentifier>myrestoreddbinstance</DBInstanceIdentifier>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.1</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<PreferredBackupWindow>00:00-00:30</PreferredBackupWindow>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<PreferredMaintenanceWindow>sat:07:30-sat:08:00</PreferredMaintenanceWindow>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MasterUsername>master</MasterUsername>
</DBInstance>
</RestoreDBInstanceFromDBSnapshotResult>
<ResponseMetadata>
<RequestId>7ca622e8-8508-11e0-bd9b-a7b1ece36d51</RequestId>
</ResponseMetadata>
</RestoreDBInstanceFromDBSnapshotResponse>
"""
def test_restore_dbinstance_from_dbsnapshot(self):
self.set_http_response(status_code=200)
response = self.service_connection.restore_dbinstance_from_dbsnapshot('mydbsnapshot',
'myrestoreddbinstance',
'db.m1.large',
'3306',
'us-east-1a',
'false',
'true')
self.assert_request_parameters({
'Action': 'RestoreDBInstanceFromDBSnapshot',
'DBSnapshotIdentifier': 'mydbsnapshot',
'DBInstanceIdentifier': 'myrestoreddbinstance',
'DBInstanceClass': 'db.m1.large',
'Port': '3306',
'AvailabilityZone': 'us-east-1a',
'MultiAZ': 'false',
'AutoMinorVersionUpgrade': 'true'
}, ignore_params_values=['Version'])
self.assertIsInstance(response, DBInstance)
self.assertEqual(response.id, 'myrestoreddbinstance')
self.assertEqual(response.status, 'creating')
self.assertEqual(response.instance_class, 'db.m1.large')
self.assertEqual(response.multi_az, False)
if __name__ == '__main__':
unittest.main()
| mit |
abhishekgahlot/flexx | flexx/pyscript/tests/test_parser0.py | 21 | 1126 | from pytest import raises
from flexx.util.testing import run_tests_if_main
from flexx.pyscript.parser0 import JSError, unify
from flexx import pyscript
def test_unify():
# Simple objects
assert unify('3') == '3'
assert unify('3.12') == '3.12'
assert unify('"aa"') == '"aa"'
assert unify("'aa'") == "'aa'"
# Simple names
assert unify('foo') == 'foo'
assert unify('foo.bar') == 'foo.bar'
assert unify('foo12') == 'foo12'
# Simple calls
assert unify('foo()') == 'foo()'
assert unify('bar.foo()') == 'bar.foo()'
# Anything that already has braces or []
assert unify('(foo)') == '(foo)'
assert unify('(3 + 3)') == '(3 + 3)'
assert unify('[2, 3]') == '[2, 3]'
# Func calls with args (but no extra braces)
assert unify('_truthy(some args bla)') == '_truthy(some args bla)'
assert unify('foo(3)') == 'foo(3)'
# Otherwise ... braces!
assert unify('3+3') == '(3+3)'
assert unify('(3)+(3)') == '((3)+(3))'
assert unify('[3]+[3]') == '([3]+[3])'
assert unify('foo((3))') == '(foo((3)))'
run_tests_if_main()
| bsd-2-clause |
dr0pz0ne/sibble | lib/ansible/plugins/lookup/dig.py | 60 | 8336 | # (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
import socket
try:
import dns.resolver
import dns.reversename
from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC,
MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
import dns.exception
HAVE_DNS = True
except ImportError:
HAVE_DNS = False
def make_rdata_dict(rdata):
''' While the 'dig' lookup plugin supports anything which dnspython supports
out of the box, the following supported_types list describes which
DNS query types we can convert to a dict.
Note: adding support for RRSIG is hard work. :)
'''
supported_types = {
A : ['address'],
AAAA : ['address'],
CNAME : ['target'],
DNAME : ['target'],
DLV : ['algorithm', 'digest_type', 'key_tag', 'digest'],
DNSKEY : ['flags', 'algorithm', 'protocol', 'key'],
DS : ['algorithm', 'digest_type', 'key_tag', 'digest'],
HINFO : ['cpu', 'os'],
LOC : ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
MX : ['preference', 'exchange'],
NAPTR : ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
NS : ['target'],
NSEC3PARAM : ['algorithm', 'flags', 'iterations', 'salt'],
PTR : ['target'],
RP : ['mbox', 'txt'],
# RRSIG : ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
SOA : ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
SPF : ['strings'],
SRV : ['priority', 'weight', 'port', 'target'],
SSHFP : ['algorithm', 'fp_type', 'fingerprint'],
TLSA : ['usage', 'selector', 'mtype', 'cert'],
TXT : ['strings'],
}
rd = {}
if rdata.rdtype in supported_types:
fields = supported_types[rdata.rdtype]
for f in fields:
val = rdata.__getattribute__(f)
if type(val) == dns.name.Name:
val = dns.name.Name.to_text(val)
if rdata.rdtype == DLV and f == 'digest':
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
if rdata.rdtype == DS and f == 'digest':
val = dns.rdata._hexify(rdata.digest).replace(' ', '')
if rdata.rdtype == DNSKEY and f == 'key':
val = dns.rdata._base64ify(rdata.key).replace(' ', '')
if rdata.rdtype == NSEC3PARAM and f == 'salt':
val = dns.rdata._hexify(rdata.salt).replace(' ', '')
if rdata.rdtype == SSHFP and f == 'fingerprint':
val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
if rdata.rdtype == TLSA and f == 'cert':
val = dns.rdata._hexify(rdata.cert).replace(' ', '')
rd[f] = val
return rd
# ==============================================================
# dig: Lookup DNS records
#
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
'''
terms contains a string with things to `dig' for. We support the
following formats:
example.com # A record
example.com qtype=A # same
example.com/TXT # specific qtype
example.com qtype=txt # same
192.168.1.2/PTR # reverse PTR
^^ shortcut for 2.1.168.192.in-addr.arpa/PTR
example.net/AAAA @nameserver # query specified server
^^^ can be comma-sep list of names/addresses
... flat=0 # returns a dict; default is 1 == string
'''
if HAVE_DNS == False:
raise AnsibleError("Can't LOOKUP(dig): module dns.resolver is not installed")
# Create Resolver object so that we can set NS if necessary
myres = dns.resolver.Resolver()
edns_size = 4096
myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
domain = None
qtype = 'A'
flat = True
for t in terms:
if t.startswith('@'): # e.g. "@10.0.1.2,192.168.1.1" is ok.
nsset = t[1:].split(',')
nameservers = []
for ns in nsset:
# Check if we have a valid IP address. If so, use that, otherwise
# try to resolve name to address using system's resolver. If that
# fails we bail out.
try:
socket.inet_aton(ns)
nameservers.append(ns)
except:
try:
nsaddr = dns.resolver.query(ns)[0].address
nameservers.append(nsaddr)
except Exception as e:
raise AnsibleError("dns lookup NS: ", str(e))
myres.nameservers = nameservers
continue
if '=' in t:
try:
opt, arg = t.split('=')
except:
pass
if opt == 'qtype':
qtype = arg.upper()
elif opt == 'flat':
flat = int(arg)
continue
if '/' in t:
try:
domain, qtype = t.split('/')
except:
domain = t
else:
domain = t
# print "--- domain = {0} qtype={1}".format(domain, qtype)
ret = []
if qtype.upper() == 'PTR':
try:
n = dns.reversename.from_address(domain)
domain = n.to_text()
except dns.exception.SyntaxError:
pass
except Exception as e:
raise AnsibleError("dns.reversename unhandled exception", str(e))
try:
answers = myres.query(domain, qtype)
for rdata in answers:
s = rdata.to_text()
if qtype.upper() == 'TXT':
s = s[1:-1] # Strip outside quotes on TXT rdata
if flat:
ret.append(s)
else:
try:
rd = make_rdata_dict(rdata)
rd['owner'] = answers.canonical_name.to_text()
rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
rd['ttl'] = answers.rrset.ttl
ret.append(rd)
except Exception as e:
ret.append(str(e))
except dns.resolver.NXDOMAIN:
ret.append('NXDOMAIN')
except dns.resolver.NoAnswer:
ret.append("")
except dns.resolver.Timeout:
ret.append('')
except dns.exception.DNSException as e:
raise AnsibleError("dns.resolver unhandled exception", e)
return ret
| gpl-3.0 |
liangsun/firstwiki | scaffold.py | 8 | 1082 | #!/usr/bin/env python
#fileencoding=utf-8
import time
import logging
from tornado.options import options, parse_command_line
from pymongo import MongoClient
import settings
class Scaffold(object):
def __init__(self):
self.setup()
def setup(self):
settings.define_app_options()
parse_command_line(final=True)
self.db = self.setup_db()
def setup_db(self):
logging.info('Runing in %s mode' % ('debug' if options.debug else 'production'))
logging.debug('DEBUG')
db = MongoClient(options.mongodb_host, options.mongodb_port)[options.mongodb_name]
logging.info('Connected to db %s ' % options.mongodb_host)
return db
def timeit(self, fn, *args, **kwargs):
t1 = time.clock()
ret = fn(*args, **kwargs)
t2 = time.clock()
return t2 - t1, ret
def run(self, *args, **kwargs):
t, r = self.timeit(self.main, *args, **kwargs)
logging.info('Cost %s seconds.' % t)
return r
def main(self, *args, **kwargs):
assert True == False
| bsd-3-clause |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/psrc/large_area/de_population_DDD.py | 2 | 2320 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
from variable_functions import my_attribute_label
from opus_core.logger import logger
class de_population_DDD(Variable):
"""sum of year DDD population forecast by Dram/Empal in large area"""
_return_type="int32"
def __init__(self, number):
self.tnumber = number
self.variable_name = "de_population_" + str(int(number))
Variable.__init__(self)
def dependencies(self):
return [attribute_label("faz", "large_area_id"),
attribute_label("faz", self.variable_name),
my_attribute_label("large_area_id")]
def compute(self, dataset_pool):
faz = dataset_pool.get_dataset('faz')
return self.get_dataset().sum_over_ids(faz.get_attribute("large_area_id"),
faz.get_attribute(self.variable_name))
def post_check(self, values, dataset_pool):
size = dataset_pool.get_dataset('faz').get_attribute(self.variable_name).sum()
self.do_check("x >= 0 and x <= " + str(size), values)
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from numpy import array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
variable_name = "psrc.large_area.de_population_2010"
def test_my_inputs(self):
de_population_2010 = array([21,22,27,42])
faz_large_area_ids = array([1,2,1,3])
faz_id = array([1,2,3,4])
values = VariableTestToolbox().compute_variable(self.variable_name, \
{"large_area":{
"large_area_id":array([1,2, 3])}, \
"faz":{ \
"de_population_2010":de_population_2010,\
"large_area_id":faz_large_area_ids, \
"faz_id":faz_id}}, \
dataset = "large_area")
should_be = array([48, 22, 42])
self.assertEqual(ma.allclose(values, should_be, rtol=1e-2), \
True, msg = "Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | gpl-2.0 |
pasiegel/SickGear | lib/sqlalchemy/sql/dml.py | 78 | 29493 | # sql/dml.py
# Copyright (C) 2009-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`.
"""
from .base import Executable, _generative, _from_objects, DialectKWArgs
from .elements import ClauseElement, _literal_as_text, Null, and_, _clone
from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes
from .. import util
from .. import exc
class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
_prefixes = ()
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if isinstance(parameters, (list, tuple)) and \
parameters and \
isinstance(parameters[0], (list, tuple, dict)):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
"""Add a :term:`RETURNING` or equivalent clause to this statement.
e.g.::
stmt = table.update().\\
where(table.c.data == 'value').\\
values(status='X').\\
returning(table.c.server_flag, table.c.updated_timestamp)
for server_flag, updated_timestamp in connection.execute(stmt):
print(server_flag, updated_timestamp)
The given collection of column expressions should be derived from
the table that is
the target of the INSERT, UPDATE, or DELETE. While :class:`.Column`
objects are typical, the elements can also be expressions::
stmt = table.insert().returning(
(table.c.first_name + " " + table.c.last_name).label('fullname')
)
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using :meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults` - an alternative method tailored
towards efficient fetching of server-side defaults and triggers
for single-row INSERTs or UPDATEs.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
select = None
def __init__(self, table, values, prefixes):
self.table = _interpret_as_from(table)
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`. However,
the :meth:`.ValuesBase.values` method can be used to "fix" a particular
set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: Alternatively, a dictionary, tuple or list
of dictionaries or tuples can be passed as a single positional
argument in order to form the VALUES or
SET clause of the statement. The single dictionary form
works the same as the kwargs form::
users.insert().values({"name": "some name"})
If a tuple is passed, the tuple should contain the same number
of columns as the target :class:`.Table`::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports multiply-rendered VALUES
construct, for those backends which support this SQL syntax
(SQLite, Postgresql, MySQL). This mode is indicated by passing a list
of one or more dictionaries/tuples::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
In the case of an :class:`.Update`
construct, only the single dictionary/tuple form is accepted,
else an exception is raised. It is also an exception case to
attempt to mix the single-/multiple- value styles together,
either through multiple :meth:`.ValuesBase.values` calls
or by sending a list + kwargs at the same time.
.. note::
Passing a multiple values list is *not* the same
as passing a multiple values list to the :meth:`.Connection.execute`
method. Passing a list of parameter sets to :meth:`.ValuesBase.values`
produces a construct of this form::
INSERT INTO table (col1, col2, col3) VALUES
(col1_0, col2_0, col3_0),
(col1_1, col2_1, col3_1),
...
whereas a multiple list passed to :meth:`.Connection.execute`
has the effect of using the DBAPI
`executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
method, which provides a high-performance system of invoking
a single-row INSERT statement many times against a series
of parameter sets. The "executemany" style is supported by
all database backends, as it does not depend on a special SQL
syntax.
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self.select is not None:
raise exc.InvalidRequestError(
"This construct already inserts from a SELECT")
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaenously")
else:
self.parameters.update(kwargs)
@_generative
def return_defaults(self, *cols):
"""Make use of a :term:`RETURNING` clause for the purpose
of fetching server-side expressions and defaults.
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
When used against a backend that supports RETURNING, all column
values generated by SQL expression or server-side-default will be added
to any existing RETURNING clause, provided that
:meth:`.UpdateBase.returning` is not used simultaneously. The column values
will then be available on the result using the
:attr:`.ResultProxy.returned_defaults` accessor as a
dictionary, referring to values keyed to the :class:`.Column` object
as well as its ``.key``.
This method differs from :meth:`.UpdateBase.returning` in these ways:
1. :meth:`.ValuesBase.return_defaults` is only intended for use with
an INSERT or an UPDATE statement that matches exactly one row.
While the RETURNING construct in the general sense supports multiple
rows for a multi-row UPDATE or DELETE statement, or for special
cases of INSERT that return multiple rows (e.g. INSERT from SELECT,
multi-valued VALUES clause), :meth:`.ValuesBase.return_defaults`
is intended only
for an "ORM-style" single-row INSERT/UPDATE statement. The row
returned by the statement is also consumed implcitly when
:meth:`.ValuesBase.return_defaults` is used. By contrast,
:meth:`.UpdateBase.returning` leaves the RETURNING result-set intact
with a collection of any number of rows.
2. It is compatible with the existing logic to fetch auto-generated
primary key values, also known as "implicit returning". Backends that
support RETURNING will automatically make use of RETURNING in order
to fetch the value of newly generated primary keys; while the
:meth:`.UpdateBase.returning` method circumvents this behavior,
:meth:`.ValuesBase.return_defaults` leaves it intact.
3. It can be called against any backend. Backends that don't support
RETURNING will skip the usage of the feature, rather than raising
an exception. The return value of :attr:`.ResultProxy.returned_defaults`
will be ``None``
:meth:`.ValuesBase.return_defaults` is used by the ORM to provide
an efficient implementation for the ``eager_defaults`` feature of
:func:`.mapper`.
:param cols: optional list of column key names or :class:`.Column`
objects. If omitted, all column expressions evaulated on the server
are added to the returning list.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.UpdateBase.returning`
:attr:`.ResultProxy.returned_defaults`
"""
self._return_defaults = cols or True
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Insert` object.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = self.select_names = None
self.inline = inline
self._returning = returning
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
@_generative
def from_select(self, names, select):
"""Return a new :class:`.Insert` construct which represents
an ``INSERT...FROM SELECT`` statement.
e.g.::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
:param names: a sequence of string column names or :class:`.Column`
objects representing the target columns.
:param select: a :func:`.select` construct, :class:`.FromClause`
or other construct which resolves into a :class:`.FromClause`,
such as an ORM :class:`.Query` object, etc. The order of
columns returned from this FROM clause should correspond to the
order of columns sent as the ``names`` parameter; while this
is not checked before passing along to the database, the database
would normally raise an exception if these column lists don't
correspond.
.. note::
Depending on backend, it may be necessary for the :class:`.Insert`
statement to be constructed using the ``inline=True`` flag; this
flag will prevent the implicit usage of ``RETURNING`` when the
``INSERT`` statement is rendered, which isn't supported on a backend
such as Oracle in conjunction with an ``INSERT..SELECT`` combination::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert(inline=True).from_select(['a', 'b'], sel)
.. note::
A SELECT..INSERT construct in SQL has no VALUES clause. Therefore
:class:`.Column` objects which utilize Python-side defaults
(e.g. as described at :ref:`metadata_defaults_toplevel`)
will **not** take effect when using :meth:`.Insert.from_select`.
.. versionadded:: 0.8.3
"""
if self.parameters:
raise exc.InvalidRequestError(
"This construct already inserts value expressions")
self.parameters, self._has_multi_parameters = \
self._process_colparams(dict((n, Null()) for n in names))
self.select_names = names
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
if self.select is not None:
self.select = _clone(self.select)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause=None,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Update` object.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause=None,
bind=None,
returning=None,
prefixes=None,
**dialect_kw):
"""Construct :class:`.Delete` object.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
self._bind = bind
self.table = _interpret_as_from(table)
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self._validate_dialect_kwargs(dialect_kw)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
| gpl-3.0 |
daviddupont69/CouchPotatoServer | couchpotato/core/helpers/rss.py | 10 | 1172 | from couchpotato.core.logger import CPLog
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
class RSS(object):
def getTextElements(self, xml, path):
""" Find elements and return tree"""
textelements = []
try:
elements = xml.findall(path)
except:
return
for element in elements:
textelements.append(element.text)
return textelements
def getElements(self, xml, path):
elements = None
try:
elements = xml.findall(path)
except:
pass
return elements
def getElement(self, xml, path):
""" Find element and return text"""
try:
return xml.find(path)
except:
return
def getTextElement(self, xml, path):
""" Find element and return text"""
try:
return xml.find(path).text
except:
return
def getItems(self, data, path = 'channel/item'):
try:
return XMLTree.parse(data).findall(path)
except Exception, e:
log.error('Error parsing RSS. %s', e)
return []
| gpl-3.0 |
Kami/libcloud | libcloud/common/ovh.py | 2 | 5963 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import hashlib
import time
try:
import simplejson as json
except ImportError:
import json # type: ignore
from libcloud.utils.py3 import httplib
from libcloud.utils.connection import get_response_object
from libcloud.common.types import InvalidCredsError
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.http import LibcloudConnection
__all__ = [
'OvhResponse',
'OvhConnection'
]
API_HOST = 'api.ovh.com'
API_ROOT = '/1.0'
LOCATIONS = {
'SBG1': {'id': 'SBG1', 'name': 'Strasbourg 1', 'country': 'FR'},
'BHS1': {'id': 'BHS1', 'name': 'Montreal 1', 'country': 'CA'},
'GRA1': {'id': 'GRA1', 'name': 'Gravelines 1', 'country': 'FR'}
}
DEFAULT_ACCESS_RULES = [
{'method': 'GET', 'path': '/*'},
{'method': 'POST', 'path': '/*'},
{'method': 'PUT', 'path': '/*'},
{'method': 'DELETE', 'path': '/*'},
]
class OvhException(Exception):
pass
class OvhResponse(JsonResponse):
def parse_error(self):
response = super(OvhResponse, self).parse_body()
response = response or {}
if response.get('errorCode', None) == 'INVALID_SIGNATURE':
raise InvalidCredsError('Signature validation failed, probably '
'using invalid credentials')
return self.body
class OvhConnection(ConnectionUserAndKey):
"""
A connection to the Ovh API
Wraps SSL connections to the Ovh API, automagically injecting the
parameters that the API needs for each request.
"""
host = API_HOST
request_path = API_ROOT
responseCls = OvhResponse
timestamp = None
ua = [] # type: List[str]
LOCATIONS = LOCATIONS
_timedelta = None
allow_insecure = True
def __init__(self, user_id, *args, **kwargs):
self.consumer_key = kwargs.pop('ex_consumer_key', None)
if self.consumer_key is None:
consumer_key_json = self.request_consumer_key(user_id)
msg = ("Your consumer key isn't validated, "
"go to '%(validationUrl)s' for valid it. After instantiate "
"your driver with \"ex_consumer_key='%(consumerKey)s'\"." %
consumer_key_json)
raise OvhException(msg)
super(OvhConnection, self).__init__(user_id, *args, **kwargs)
def request_consumer_key(self, user_id):
action = self.request_path + '/auth/credential'
data = json.dumps({
'accessRules': DEFAULT_ACCESS_RULES,
'redirection': 'http://ovh.com',
})
headers = {
'Content-Type': 'application/json',
'X-Ovh-Application': user_id,
}
httpcon = LibcloudConnection(host=self.host, port=443)
httpcon.request(method='POST', url=action, body=data, headers=headers)
response = JsonResponse(httpcon.getresponse(), httpcon)
if response.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
json_response = response.parse_body()
httpcon.close()
return json_response
def get_timestamp(self):
if not self._timedelta:
url = 'https://%s%s/auth/time' % (API_HOST, API_ROOT)
response = get_response_object(url=url, method='GET', headers={})
if not response or not response.body:
raise Exception('Failed to get current time from Ovh API')
timestamp = int(response.body)
self._timedelta = timestamp - int(time.time())
return int(time.time()) + self._timedelta
def make_signature(self, method, action, params, data, timestamp):
full_url = 'https://%s%s' % (API_HOST, action)
if params:
full_url += '?'
for key, value in params.items():
full_url += '%s=%s&' % (key, value)
full_url = full_url[:-1]
sha1 = hashlib.sha1()
base_signature = "+".join([
self.key,
self.consumer_key,
method.upper(),
full_url,
data if data else '',
str(timestamp),
])
sha1.update(base_signature.encode())
signature = '$1$' + sha1.hexdigest()
return signature
def add_default_params(self, params):
return params
def add_default_headers(self, headers):
headers.update({
'X-Ovh-Application': self.user_id,
'X-Ovh-Consumer': self.consumer_key,
'Content-type': 'application/json',
})
return headers
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False):
data = json.dumps(data) if data else None
timestamp = self.get_timestamp()
signature = self.make_signature(method, action, params, data,
timestamp)
headers = headers or {}
headers.update({
'X-Ovh-Timestamp': timestamp,
'X-Ovh-Signature': signature
})
return super(OvhConnection, self)\
.request(action, params=params, data=data, headers=headers,
method=method, raw=raw)
| apache-2.0 |
elainexmas/boto | boto/services/result.py | 153 | 5596 | #!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
class ResultProcessor(object):
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
self.sd = sd
self.batch = batch_name
self.log_fp = None
self.num_files = 0
self.total_time = 0
self.min_time = timedelta.max
self.max_time = timedelta.min
self.earliest_time = datetime.max
self.latest_time = datetime.min
self.queue = self.sd.get_obj('output_queue')
self.domain = self.sd.get_obj('output_domain')
def calculate_stats(self, msg):
start_time = parse_ts(msg['Service-Read'])
end_time = parse_ts(msg['Service-Write'])
elapsed_time = end_time - start_time
if elapsed_time > self.max_time:
self.max_time = elapsed_time
if elapsed_time < self.min_time:
self.min_time = elapsed_time
self.total_time += elapsed_time.seconds
if start_time < self.earliest_time:
self.earliest_time = start_time
if end_time > self.latest_time:
self.latest_time = end_time
def log_message(self, msg, path):
keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
self.log_fp.write(line+'\n')
values = []
for key in keys:
value = msg[key]
if value.find(',') > 0:
value = '"%s"' % value
values.append(value)
line = ','.join(values)
self.log_fp.write(line+'\n')
def process_record(self, record, path, get_file=True):
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
for output in outputs:
if get_file:
key_name = output.split(';')[0]
key = bucket.lookup(key_name)
file_name = os.path.join(path, key_name)
print('retrieving file: %s to %s' % (key_name, file_name))
key.get_contents_to_filename(file_name)
self.num_files += 1
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
m = self.queue.read()
def get_results_from_domain(self, path, get_file=True):
rs = self.domain.query("['Batch'='%s']" % self.batch)
for item in rs:
self.process_record(item, path, get_file)
def get_results_from_bucket(self, path):
bucket = self.sd.get_obj('output_bucket')
if bucket:
print('No output queue or domain, just retrieving files from output_bucket')
for key in bucket:
file_name = os.path.join(path, key)
print('retrieving file: %s to %s' % (key, file_name))
key.get_contents_to_filename(file_name)
self.num_files + 1
def get_results(self, path, get_file=True, delete_msg=True):
if not os.path.isdir(path):
os.mkdir(path)
if self.queue:
self.get_results_from_queue(path, get_file)
elif self.domain:
self.get_results_from_domain(path, get_file)
else:
self.get_results_from_bucket(path)
if self.log_fp:
self.log_fp.close()
print('%d results successfully retrieved.' % self.num_files)
if self.num_files > 0:
self.avg_time = float(self.total_time)/self.num_files
print('Minimum Processing Time: %d' % self.min_time.seconds)
print('Maximum Processing Time: %d' % self.max_time.seconds)
print('Average Processing Time: %f' % self.avg_time)
self.elapsed_time = self.latest_time-self.earliest_time
print('Elapsed Time: %d' % self.elapsed_time.seconds)
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print('Throughput: %f transactions / minute' % tput)
| mit |
mydearxym/shadowsocks-1 | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
dbftdiyoeywga/bards | bards/utils.py | 1 | 3353 | import time
import os
import random
import numpy as np
import pandas as pd
import argparse
import mlflow
from pathlib import Path
from contextlib import contextmanager
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f"[{name}] done in {time.time() - t0:.0f} s")
def reduce_mem_usage(df: pd.DataFrame):
"""iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage after optimization is: {:.2f} MB".format(end_mem))
print("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
return df
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
def csv2feather(path: str, target=None):
df = pd.read_csv(path)
p = Path(path)
o_path = p.with_suffix(".ftr")
if Path(o_path).exists():
pass
else:
df.to_feather(o_path)
if target is not None:
o_path = p.parent / "target.ftr"
if Path(o_path).exists():
pass
else:
df_ = pd.DataFrame()
df_["target"] = df[target]
df_.to_feather(o_path)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--force", "-f", action="store_true", help="Overwrite existing files"
)
return parser.parse_args()
def save_log(score_dict):
mlflow.log_metrics(score_dict)
mlflow.log_artifact("./config/config.yaml")
def load_dataset(features: list):
train = [pd.read_feather(f"./features/{f}_train.ftr") for f in features]
test = [pd.read_feather(f"./features/{f}_test.ftr") for f in features]
return pd.concat(train, axis=1), pd.concat(test, axis=1)
def load_target(feature: str):
target = pd.read_feather("./data/raw/")
| mit |
srager13/ns3_qoi_symptotics | src/applications/bindings/callbacks_list.py | 331 | 1249 | callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
nitinitprof/odoo | addons/point_of_sale/__init__.py | 378 | 1189 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import controllers
import point_of_sale
import report
import res_users
import res_partner
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xtopsoft/grpc | src/python/interop/interop/empty_pb2.py | 22 | 1566 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: test/cpp/interop/empty.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='test/cpp/interop/empty.proto',
package='grpc.testing',
serialized_pb=_b('\n\x1ctest/cpp/interop/empty.proto\x12\x0cgrpc.testing\"\x07\n\x05\x45mpty')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EMPTY = _descriptor.Descriptor(
name='Empty',
full_name='grpc.testing.Empty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=53,
)
DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
DESCRIPTOR = _EMPTY,
__module__ = 'test.cpp.interop.empty_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.Empty)
))
_sym_db.RegisterMessage(Empty)
import abc
from grpc.early_adopter import implementations
from grpc.framework.alpha import utilities
# @@protoc_insertion_point(module_scope)
| bsd-3-clause |
qqqmr/digitalwurlitzer | node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| unlicense |
flotre/Sick-Beard | lib/guessit/patterns.py | 40 | 9342 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
# Copyright (c) 2011 Ricard Marxer <ricardmp@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import re
subtitle_exts = [ 'srt', 'idx', 'sub', 'ssa' ]
video_exts = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2',
'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm',
'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'wav', 'webm', 'wma', 'wmv']
group_delimiters = [ '()', '[]', '{}' ]
# separator character regexp
sep = r'[][)(}{+ /\._-]' # regexp art, hehe :D
# character used to represent a deleted char (when matching groups)
deleted = '_'
# format: [ (regexp, confidence, span_adjust) ]
episode_rexps = [ # ... Season 2 ...
(r'season (?P<season>[0-9]+)', 1.0, (0, 0)),
(r'saison (?P<season>[0-9]+)', 1.0, (0, 0)),
# ... s02e13 ...
(r'[Ss](?P<season>[0-9]{1,2}).?(?P<episodeNumber>(?:[Ee-][0-9]{1,2})+)[^0-9]', 1.0, (0, -1)),
# ... s03-x02 ...
(r'[Ss](?P<season>[0-9]{1,2}).?(?P<bonusNumber>(?:[Xx][0-9]{1,2})+)[^0-9]', 1.0, (0, -1)),
# ... 2x13 ...
(r'[^0-9](?P<season>[0-9]{1,2}).?(?P<episodeNumber>(?:[xX][0-9]{1,2})+)[^0-9]', 0.8, (1, -1)),
# ... s02 ...
#(sep + r's(?P<season>[0-9]{1,2})' + sep, 0.6, (1, -1)),
(r's(?P<season>[0-9]{1,2})[^0-9]', 0.6, (0, -1)),
# v2 or v3 for some mangas which have multiples rips
(r'(?P<episodeNumber>[0-9]{1,3})v[23]' + sep, 0.6, (0, 0)),
# ... ep 23 ...
('ep' + sep + r'(?P<episodeNumber>[0-9]{1,2})[^0-9]', 0.7, (0, -1)),
# ... e13 ... for a mini-series without a season number
(sep + r'e(?P<episodeNumber>[0-9]{1,2})' + sep, 0.6, (1, -1))
]
weak_episode_rexps = [ # ... 213 or 0106 ...
(sep + r'(?P<episodeNumber>[0-9]{2,4})' + sep, (1, -1))
]
non_episode_title = [ 'extras', 'rip' ]
video_rexps = [ # cd number
(r'cd ?(?P<cdNumber>[0-9])( ?of ?(?P<cdNumberTotal>[0-9]))?', 1.0, (0, 0)),
(r'(?P<cdNumberTotal>[1-9]) cds?', 0.9, (0, 0)),
# special editions
(r'edition' + sep + r'(?P<edition>collector)', 1.0, (0, 0)),
(r'(?P<edition>collector)' + sep + 'edition', 1.0, (0, 0)),
(r'(?P<edition>special)' + sep + 'edition', 1.0, (0, 0)),
(r'(?P<edition>criterion)' + sep + 'edition', 1.0, (0, 0)),
# director's cut
(r"(?P<edition>director'?s?" + sep + "cut)", 1.0, (0, 0)),
# video size
(r'(?P<width>[0-9]{3,4})x(?P<height>[0-9]{3,4})', 0.9, (0, 0)),
# website
(r'(?P<website>www(\.[a-zA-Z0-9]+){2,3})', 0.8, (0, 0)),
# bonusNumber: ... x01 ...
(r'x(?P<bonusNumber>[0-9]{1,2})', 1.0, (0, 0)),
# filmNumber: ... f01 ...
(r'f(?P<filmNumber>[0-9]{1,2})', 1.0, (0, 0))
]
websites = [ 'tvu.org.ru', 'emule-island.com', 'UsaBit.com', 'www.divx-overnet.com',
'sharethefiles.com' ]
unlikely_series = [ 'series' ]
# prop_multi is a dict of { property_name: { canonical_form: [ pattern ] } }
# pattern is a string considered as a regexp, with the addition that dashes are
# replaced with '([ \.-_])?' which matches more types of separators (or none)
# note: simpler patterns need to be at the end of the list to not shadow more
# complete ones, eg: 'AAC' needs to come after 'He-AAC'
# ie: from most specific to less specific
prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ],
'HD-DVD': [ 'HD-(?:DVD)?-Rip', 'HD-DVD' ],
'BluRay': [ 'Blu-ray', 'B[DR]Rip' ],
'HDTV': [ 'HD-TV' ],
'DVB': [ 'DVB-Rip', 'DVB', 'PD-TV' ],
'WEBRip': [ 'WEB-Rip' ],
'Screener': [ 'DVD-SCR', 'Screener' ],
'VHS': [ 'VHS' ],
'WEB-DL': [ 'WEB-DL' ] },
'screenSize': { '480p': [ '480p?' ],
'720p': [ '720p?' ],
'1080p': [ '1080p?' ] },
'videoCodec': { 'XviD': [ 'Xvid' ],
'DivX': [ 'DVDivX', 'DivX' ],
'h264': [ '[hx]-264' ],
'Rv10': [ 'Rv10' ] },
'audioCodec': { 'AC3': [ 'AC3' ],
'DTS': [ 'DTS' ],
'AAC': [ 'He-AAC', 'AAC-He', 'AAC' ] },
'audioChannels': { '5.1': [ r'5\.1', 'DD5\.1', '5ch' ] },
'episodeFormat': { 'Minisode': [ 'Minisodes?' ] }
}
# prop_single dict of { property_name: [ canonical_form ] }
prop_single = { 'releaseGroup': [ 'ESiR', 'WAF', 'SEPTiC', r'\[XCT\]', 'iNT', 'PUKKA',
'CHD', 'ViTE', 'TLF', 'DEiTY', 'FLAiTE',
'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS', 'FiNaLe',
'UnSeeN', 'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL',
'SiNNERS', 'DiRTY', 'REWARD', 'ECI', 'KiNGS', 'CLUE',
'CtrlHD', 'POD', 'WiKi', 'DIMENSION', 'IMMERSE', 'FQM',
'2HD', 'REPTiLE', 'CTU', 'HALCYON', 'EbP', 'SiTV',
'SAiNTS', 'HDBRiSe', 'AlFleNi-TeaM', 'EVOLVE', '0TV',
'TLA', 'NTB', 'ASAP', 'MOMENTUM', 'FoV', 'D-Z0N3' ],
'other': [ 'PROPER', 'REPACK', 'LIMITED', 'DualAudio', 'Audiofixed', 'R5',
'complete', 'classic', # not so sure about these ones, could appear in a title
'ws' ] # widescreen
}
_dash = '-'
_psep = '[-\. _]?'
def _to_rexp(prop):
return re.compile(prop.replace(_dash, _psep), re.IGNORECASE)
# properties_rexps dict of { property_name: { canonical_form: [ rexp ] } }
# containing the rexps compiled from both prop_multi and prop_single
properties_rexps = dict((type, dict((canonical_form,
[ _to_rexp(pattern) for pattern in patterns ])
for canonical_form, patterns in props.items()))
for type, props in prop_multi.items())
properties_rexps.update(dict((type, dict((canonical_form, [ _to_rexp(canonical_form) ])
for canonical_form in props))
for type, props in prop_single.items()))
def find_properties(string):
result = []
for property_name, props in properties_rexps.items():
for canonical_form, rexps in props.items():
for value_rexp in rexps:
match = value_rexp.search(string)
if match:
start, end = match.span()
# make sure our word is always surrounded by separators
# note: sep is a regexp, but in this case using it as
# a char sequence achieves the same goal
if ((start > 0 and string[start-1] not in sep) or
(end < len(string) and string[end] not in sep)):
continue
result.append((property_name, canonical_form, start, end))
return result
property_synonyms = { 'Special Edition': [ 'Special' ],
'Collector Edition': [ 'Collector' ],
'Criterion Edition': [ 'Criterion' ]
}
def revert_synonyms():
reverse = {}
for canonical, synonyms in property_synonyms.items():
for synonym in synonyms:
reverse[synonym.lower()] = canonical
return reverse
reverse_synonyms = revert_synonyms()
def canonical_form(string):
return reverse_synonyms.get(string.lower(), string)
def compute_canonical_form(property_name, value):
"""Return the canonical form of a property given its type if it is a valid
one, None otherwise."""
for canonical_form, rexps in properties_rexps[property_name].items():
for rexp in rexps:
if rexp.match(value):
return canonical_form
return None
| gpl-3.0 |
switchboardOp/ansible | lib/ansible/utils/helpers.py | 38 | 1272 | # (c) 2016, Ansible by Red Hat <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
def pct_to_int(value, num_items, min_value=1):
'''
Converts a given value to a percentage if specified as "x%",
otherwise converts the given value to an integer.
'''
if isinstance(value, string_types) and value.endswith('%'):
value_pct = int(value.replace("%", ""))
return int((value_pct / 100.0) * num_items) or min_value
else:
return int(value)
| gpl-3.0 |
vlinhd11/vlinhd11-android-scripting | python-build/python-libs/gdata/tests/gdata_tests/spreadsheet/text_db_test.py | 128 | 7522 | #!/usr/bin/python
#
# Copyright Google 2007-2008, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import getpass
import gdata.spreadsheet.text_db
import gdata.spreadsheet.service
__author__ = 'api.jscudder (Jeffrey Scudder)'
username = ''
password = ''
class FactoryTest(unittest.TestCase):
def setUp(self):
self.client = gdata.spreadsheet.text_db.DatabaseClient()
def testBadCredentials(self):
try:
self.client.SetCredentials('foo', 'bar')
self.fail()
except gdata.spreadsheet.text_db.Error, e:
pass
def testCreateGetAndDeleteDatabase(self):
db_title = 'google_spreadsheets_db unit test 1'
self.client.SetCredentials(username, password)
db = self.client.CreateDatabase(db_title)
# Test finding the database using the name
time.sleep(5)
db_list = self.client.GetDatabases(name=db_title)
self.assert_(len(db_list) >= 1)
if len(db_list) >= 1:
self.assert_(db_list[0].entry.title.text == db_title)
# Test finding the database using the spreadsheet key
db_list = self.client.GetDatabases(spreadsheet_key=db.spreadsheet_key)
self.assert_(len(db_list) == 1)
self.assert_(db_list[0].entry.title.text == db_title)
# Delete the test spreadsheet
time.sleep(10)
db.Delete()
class DatabaseTest(unittest.TestCase):
def setUp(self):
client = gdata.spreadsheet.text_db.DatabaseClient(username, password)
self.db = client.CreateDatabase('google_spreadsheets_db unit test 2')
def tearDown(self):
time.sleep(10)
self.db.Delete()
def testCreateGetAndDeleteTable(self):
table = self.db.CreateTable('test1', ['1','2','3'])
# Try to get the new table using the worksheet id.
table_list = self.db.GetTables(worksheet_id=table.worksheet_id)
self.assert_(len(table_list) == 1)
self.assert_(table_list[0].entry.title.text, 'test1')
# Try to get the table using the name
table_list = self.db.GetTables(name='test1')
self.assert_(len(table_list) == 1)
self.assert_(table_list[0].entry.title.text, 'test1')
# Delete the table
table.Delete()
class TableTest(unittest.TestCase):
def setUp(self):
client = gdata.spreadsheet.text_db.DatabaseClient(username, password)
self.db = client.CreateDatabase('google_spreadsheets_db unit test 3')
self.table = self.db.CreateTable('test1', ['a','b','c_d','a', 'd:e'])
def tearDown(self):
time.sleep(10)
self.db.Delete()
def testCreateGetAndDeleteRecord(self):
new_record = self.table.AddRecord({'a':'test1', 'b':'test2', 'cd':'test3', 'a_2':'test4', 'de':'test5'})
# Test getting record by line number.
record = self.table.GetRecord(row_number=1)
self.assert_(record is not None)
self.assert_(record.content['a'] == 'test1')
self.assert_(record.content['b'] == 'test2')
self.assert_(record.content['cd'] == 'test3')
self.assert_(record.content['a_2'] == 'test4')
# Test getting record using the id.
record_list = self.table.GetRecord(row_id=new_record.row_id)
self.assert_(record is not None)
# Delete the record.
time.sleep(10)
new_record.Delete()
def testPushPullSyncing(self):
# Get two copies of the same row.
first_copy = self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'})
second_copy = self.table.GetRecord(first_copy.row_id)
# Make changes in the first copy
first_copy.content['a'] = '7'
first_copy.content['b'] = '9'
# Try to get the changes before they've been committed
second_copy.Pull()
self.assert_(second_copy.content['a'] == '1')
self.assert_(second_copy.content['b'] == '2')
# Commit the changes, the content should now be different
first_copy.Push()
second_copy.Pull()
self.assert_(second_copy.content['a'] == '7')
self.assert_(second_copy.content['b'] == '9')
# Make changes to the second copy, push, then try to push changes from
# the first copy.
first_copy.content['a'] = '10'
second_copy.content['a'] = '15'
first_copy.Push()
try:
second_copy.Push()
# The second update should raise and exception due to a 409 conflict.
self.fail()
except gdata.spreadsheet.service.RequestError:
pass
except Exception, error:
#TODO: Why won't the except RequestError catch this?
pass
def testFindRecords(self):
# Add lots of test records:
self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'hi', 'b':'2', 'cd':'20', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'3'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'hi hi hi', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'"5"', 'b':'5', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'5', 'b':'5', 'cd':'15', 'de':'7'})
time.sleep(10)
matches = self.table.FindRecords('a == 1')
self.assert_(len(matches) == 1)
self.assert_(matches[0].content['a'] == '1')
self.assert_(matches[0].content['b'] == '2')
matches = self.table.FindRecords('a > 1 && cd < 20')
self.assert_(len(matches) == 4)
matches = self.table.FindRecords('cd < de')
self.assert_(len(matches) == 7)
matches = self.table.FindRecords('a == b')
self.assert_(len(matches) == 0)
matches = self.table.FindRecords('a == 5')
self.assert_(len(matches) == 1)
def testIterateResultSet(self):
# Populate the table with test data.
self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'hi', 'b':'2', 'cd':'20', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'3'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'hi hi hi', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'"5"', 'b':'5', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'5', 'b':'5', 'cd':'15', 'de':'7'})
# Get the first two rows.
records = self.table.GetRecords(1, 2)
self.assert_(len(records) == 2)
self.assert_(records[0].content['a'] == '1')
self.assert_(records[1].content['a'] == 'hi')
# Then get the next two rows.
next_records = records.GetNext()
self.assert_(len(next_records) == 2)
self.assert_(next_records[0].content['a'] == '2')
self.assert_(next_records[0].content['cd'] == '3')
self.assert_(next_records[1].content['cd'] == '15')
self.assert_(next_records[1].content['de'] == '7')
def testLookupFieldsOnPreexistingTable(self):
existing_table = self.db.GetTables(name='test1')[0]
existing_table.LookupFields()
self.assertEquals(existing_table.fields, ['a', 'b', 'cd', 'a_2', 'de'])
if __name__ == '__main__':
if not username:
username = raw_input('Spreadsheets API | Text DB Tests\n'
'Please enter your username: ')
if not password:
password = getpass.getpass()
unittest.main()
| apache-2.0 |
AustinRoy7/Pomodoro-timer | venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 713 | 2320 | from __future__ import absolute_import
import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| mit |
apyrgio/ganeti | test/py/cfgupgrade_unittest.py | 2 | 18843 | #!/usr/bin/python
#
# Copyright (C) 2010, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing tools/cfgupgrade"""
import os
import sys
import unittest
import shutil
import tempfile
import operator
import json
from ganeti import constants
from ganeti import utils
from ganeti import serializer
from ganeti import netutils
from ganeti.utils import version
import testutils
def GetMinimalConfig():
return {
"version": constants.CONFIG_VERSION,
"cluster": {
"master_node": "node1-uuid",
"ipolicy": None,
"default_iallocator_params": {},
"diskparams": {},
"ndparams": {},
"candidate_certs": {},
"install_image": "",
"instance_communication_network": "",
"zeroing_image": "",
"compression_tools": constants.IEC_DEFAULT_TOOLS,
"enabled_user_shutdown": False,
"data_collectors": {
"diskstats": { "active": True, "interval": 5000000 },
"drbd": { "active": True, "interval": 5000000 },
"lv": { "active": True, "interval": 5000000 },
"inst-status-xen": { "active": True, "interval": 5000000 },
"cpu-avg-load": { "active": True, "interval": 5000000 },
},
},
"instances": {},
"disks": {},
"networks": {},
"filters": {},
"nodegroups": {},
"nodes": {
"node1-uuid": {
"name": "node1",
"uuid": "node1-uuid"
}
},
}
def _RunUpgrade(path, dry_run, no_verify, ignore_hostname=True,
downgrade=False):
cmd = [sys.executable, "%s/tools/cfgupgrade" % testutils.GetSourceDir(),
"--debug", "--force", "--path=%s" % path, "--confdir=%s" % path]
if ignore_hostname:
cmd.append("--ignore-hostname")
if dry_run:
cmd.append("--dry-run")
if no_verify:
cmd.append("--no-verify")
if downgrade:
cmd.append("--downgrade")
result = utils.RunCmd(cmd, cwd=os.getcwd())
if result.failed:
raise Exception("cfgupgrade failed: %s, output %r" %
(result.fail_reason, result.output))
class TestCfgupgrade(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.config_path = utils.PathJoin(self.tmpdir, "config.data")
self.noded_cert_path = utils.PathJoin(self.tmpdir, "server.pem")
self.rapi_cert_path = utils.PathJoin(self.tmpdir, "rapi.pem")
self.rapi_users_path = utils.PathJoin(self.tmpdir, "rapi", "users")
self.rapi_users_path_pre24 = utils.PathJoin(self.tmpdir, "rapi_users")
self.known_hosts_path = utils.PathJoin(self.tmpdir, "known_hosts")
self.confd_hmac_path = utils.PathJoin(self.tmpdir, "hmac.key")
self.cds_path = utils.PathJoin(self.tmpdir, "cluster-domain-secret")
self.ss_master_node_path = utils.PathJoin(self.tmpdir, "ssconf_master_node")
self.file_storage_paths = utils.PathJoin(self.tmpdir, "file-storage-paths")
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _LoadConfig(self):
return serializer.LoadJson(utils.ReadFile(self.config_path))
def _LoadTestDataConfig(self, filename):
return serializer.LoadJson(testutils.ReadTestData(filename))
def _CreateValidConfigDir(self):
utils.WriteFile(self.noded_cert_path, data="")
utils.WriteFile(self.known_hosts_path, data="")
utils.WriteFile(self.ss_master_node_path,
data="node.has.another.name.example.net")
def testNoConfigDir(self):
self.assertFalse(utils.ListVisibleFiles(self.tmpdir))
self.assertRaises(Exception, _RunUpgrade, self.tmpdir, False, True)
self.assertRaises(Exception, _RunUpgrade, self.tmpdir, True, True)
def testWrongHostname(self):
self._CreateValidConfigDir()
utils.WriteFile(self.config_path,
data=serializer.DumpJson(GetMinimalConfig()))
hostname = netutils.GetHostname().name
assert hostname != utils.ReadOneLineFile(self.ss_master_node_path)
self.assertRaises(Exception, _RunUpgrade, self.tmpdir, False, True,
ignore_hostname=False)
def testCorrectHostname(self):
self._CreateValidConfigDir()
utils.WriteFile(self.config_path,
data=serializer.DumpJson(GetMinimalConfig()))
utils.WriteFile(self.ss_master_node_path,
data="%s\n" % netutils.GetHostname().name)
_RunUpgrade(self.tmpdir, False, True, ignore_hostname=False)
def testInconsistentConfig(self):
self._CreateValidConfigDir()
# There should be no "config_version"
cfg = GetMinimalConfig()
cfg["version"] = 0
cfg["cluster"]["config_version"] = 0
utils.WriteFile(self.config_path, data=serializer.DumpJson(cfg))
self.assertRaises(Exception, _RunUpgrade, self.tmpdir, False, True)
def testInvalidConfig(self):
self._CreateValidConfigDir()
# Missing version from config
utils.WriteFile(self.config_path, data=serializer.DumpJson({}))
self.assertRaises(Exception, _RunUpgrade, self.tmpdir, False, True)
def _TestUpgradeFromFile(self, filename, dry_run):
cfg = self._LoadTestDataConfig(filename)
self._TestUpgradeFromData(cfg, dry_run)
def _TestSimpleUpgrade(self, from_version, dry_run,
file_storage_dir=None,
shared_file_storage_dir=None):
cfg = GetMinimalConfig()
cfg["version"] = from_version
cluster = cfg["cluster"]
if file_storage_dir:
cluster["file_storage_dir"] = file_storage_dir
if shared_file_storage_dir:
cluster["shared_file_storage_dir"] = shared_file_storage_dir
self._TestUpgradeFromData(cfg, dry_run)
def _TestUpgradeFromData(self, cfg, dry_run):
assert "version" in cfg
from_version = cfg["version"]
self._CreateValidConfigDir()
utils.WriteFile(self.config_path, data=serializer.DumpJson(cfg))
self.assertFalse(os.path.isfile(self.rapi_cert_path))
self.assertFalse(os.path.isfile(self.confd_hmac_path))
self.assertFalse(os.path.isfile(self.cds_path))
_RunUpgrade(self.tmpdir, dry_run, True)
if dry_run:
expversion = from_version
checkfn = operator.not_
else:
expversion = constants.CONFIG_VERSION
checkfn = operator.truth
self.assert_(checkfn(os.path.isfile(self.rapi_cert_path)))
self.assert_(checkfn(os.path.isfile(self.confd_hmac_path)))
self.assert_(checkfn(os.path.isfile(self.cds_path)))
newcfg = self._LoadConfig()
self.assertEqual(newcfg["version"], expversion)
def testRapiUsers(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
self.assertFalse(os.path.exists(os.path.dirname(self.rapi_users_path)))
utils.WriteFile(self.rapi_users_path_pre24, data="some user\n")
self._TestSimpleUpgrade(version.BuildVersion(2, 3, 0), False)
self.assertTrue(os.path.isdir(os.path.dirname(self.rapi_users_path)))
self.assert_(os.path.islink(self.rapi_users_path_pre24))
self.assert_(os.path.isfile(self.rapi_users_path))
self.assertEqual(os.readlink(self.rapi_users_path_pre24),
self.rapi_users_path)
for path in [self.rapi_users_path, self.rapi_users_path_pre24]:
self.assertEqual(utils.ReadFile(path), "some user\n")
def testRapiUsers24AndAbove(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
os.mkdir(os.path.dirname(self.rapi_users_path))
utils.WriteFile(self.rapi_users_path, data="other user\n")
self._TestSimpleUpgrade(version.BuildVersion(2, 3, 0), False)
self.assert_(os.path.islink(self.rapi_users_path_pre24))
self.assert_(os.path.isfile(self.rapi_users_path))
self.assertEqual(os.readlink(self.rapi_users_path_pre24),
self.rapi_users_path)
for path in [self.rapi_users_path, self.rapi_users_path_pre24]:
self.assertEqual(utils.ReadFile(path), "other user\n")
def testRapiUsersExistingSymlink(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
os.mkdir(os.path.dirname(self.rapi_users_path))
os.symlink(self.rapi_users_path, self.rapi_users_path_pre24)
utils.WriteFile(self.rapi_users_path, data="hello world\n")
self._TestSimpleUpgrade(version.BuildVersion(2, 2, 0), False)
self.assert_(os.path.isfile(self.rapi_users_path) and
not os.path.islink(self.rapi_users_path))
self.assert_(os.path.islink(self.rapi_users_path_pre24))
self.assertEqual(os.readlink(self.rapi_users_path_pre24),
self.rapi_users_path)
for path in [self.rapi_users_path, self.rapi_users_path_pre24]:
self.assertEqual(utils.ReadFile(path), "hello world\n")
def testRapiUsersExistingTarget(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
os.mkdir(os.path.dirname(self.rapi_users_path))
utils.WriteFile(self.rapi_users_path, data="other user\n")
utils.WriteFile(self.rapi_users_path_pre24, data="hello world\n")
self.assertRaises(Exception, self._TestSimpleUpgrade,
version.BuildVersion(2, 2, 0), False)
for path in [self.rapi_users_path, self.rapi_users_path_pre24]:
self.assert_(os.path.isfile(path) and not os.path.islink(path))
self.assertEqual(utils.ReadFile(self.rapi_users_path), "other user\n")
self.assertEqual(utils.ReadFile(self.rapi_users_path_pre24),
"hello world\n")
def testRapiUsersDryRun(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
utils.WriteFile(self.rapi_users_path_pre24, data="some user\n")
self._TestSimpleUpgrade(version.BuildVersion(2, 3, 0), True)
self.assertFalse(os.path.isdir(os.path.dirname(self.rapi_users_path)))
self.assertTrue(os.path.isfile(self.rapi_users_path_pre24) and
not os.path.islink(self.rapi_users_path_pre24))
self.assertFalse(os.path.exists(self.rapi_users_path))
def testRapiUsers24AndAboveDryRun(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
os.mkdir(os.path.dirname(self.rapi_users_path))
utils.WriteFile(self.rapi_users_path, data="other user\n")
self._TestSimpleUpgrade(version.BuildVersion(2, 3, 0), True)
self.assertTrue(os.path.isfile(self.rapi_users_path) and
not os.path.islink(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
self.assertEqual(utils.ReadFile(self.rapi_users_path), "other user\n")
def testRapiUsersExistingSymlinkDryRun(self):
self.assertFalse(os.path.exists(self.rapi_users_path))
self.assertFalse(os.path.exists(self.rapi_users_path_pre24))
os.mkdir(os.path.dirname(self.rapi_users_path))
os.symlink(self.rapi_users_path, self.rapi_users_path_pre24)
utils.WriteFile(self.rapi_users_path, data="hello world\n")
self._TestSimpleUpgrade(version.BuildVersion(2, 2, 0), True)
self.assertTrue(os.path.islink(self.rapi_users_path_pre24))
self.assertTrue(os.path.isfile(self.rapi_users_path) and
not os.path.islink(self.rapi_users_path))
self.assertEqual(os.readlink(self.rapi_users_path_pre24),
self.rapi_users_path)
for path in [self.rapi_users_path, self.rapi_users_path_pre24]:
self.assertEqual(utils.ReadFile(path), "hello world\n")
def testFileStoragePathsDryRun(self):
self.assertFalse(os.path.exists(self.file_storage_paths))
self._TestSimpleUpgrade(version.BuildVersion(2, 6, 0), True,
file_storage_dir=self.tmpdir,
shared_file_storage_dir="/tmp")
self.assertFalse(os.path.exists(self.file_storage_paths))
def testFileStoragePathsBoth(self):
self.assertFalse(os.path.exists(self.file_storage_paths))
self._TestSimpleUpgrade(version.BuildVersion(2, 6, 0), False,
file_storage_dir=self.tmpdir,
shared_file_storage_dir="/tmp")
lines = utils.ReadFile(self.file_storage_paths).splitlines()
self.assertTrue(lines.pop(0).startswith("# "))
self.assertTrue(lines.pop(0).startswith("# cfgupgrade"))
self.assertEqual(lines.pop(0), self.tmpdir)
self.assertEqual(lines.pop(0), "/tmp")
self.assertFalse(lines)
self.assertEqual(os.stat(self.file_storage_paths).st_mode & 0777,
0600, msg="Wrong permissions")
def testFileStoragePathsSharedOnly(self):
self.assertFalse(os.path.exists(self.file_storage_paths))
self._TestSimpleUpgrade(version.BuildVersion(2, 5, 0), False,
file_storage_dir=None,
shared_file_storage_dir=self.tmpdir)
lines = utils.ReadFile(self.file_storage_paths).splitlines()
self.assertTrue(lines.pop(0).startswith("# "))
self.assertTrue(lines.pop(0).startswith("# cfgupgrade"))
self.assertEqual(lines.pop(0), self.tmpdir)
self.assertFalse(lines)
def testUpgradeFrom_2_0(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 0, 0), False)
def testUpgradeFrom_2_1(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 1, 0), False)
def testUpgradeFrom_2_2(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 2, 0), False)
def testUpgradeFrom_2_3(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 3, 0), False)
def testUpgradeFrom_2_4(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 4, 0), False)
def testUpgradeFrom_2_5(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 5, 0), False)
def testUpgradeFrom_2_6(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 6, 0), False)
def testUpgradeFrom_2_7(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 7, 0), False)
def testUpgradeFullConfigFrom_2_7(self):
self._TestUpgradeFromFile("cluster_config_2.7.json", False)
def testUpgradeFullConfigFrom_2_8(self):
self._TestUpgradeFromFile("cluster_config_2.8.json", False)
def testUpgradeFullConfigFrom_2_9(self):
self._TestUpgradeFromFile("cluster_config_2.9.json", False)
def testUpgradeFullConfigFrom_2_10(self):
self._TestUpgradeFromFile("cluster_config_2.10.json", False)
def testUpgradeFullConfigFrom_2_11(self):
self._TestUpgradeFromFile("cluster_config_2.11.json", False)
def testUpgradeFullConfigFrom_2_12(self):
self._TestUpgradeFromFile("cluster_config_2.12.json", False)
def testUpgradeFullConfigFrom_2_13(self):
self._TestUpgradeFromFile("cluster_config_2.13.json", False)
def testUpgradeCurrent(self):
self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
def _RunDowngradeUpgrade(self):
oldconf = self._LoadConfig()
_RunUpgrade(self.tmpdir, False, True, downgrade=True)
_RunUpgrade(self.tmpdir, False, True)
newconf = self._LoadConfig()
self.assertEqual(oldconf, newconf)
def testDowngrade(self):
self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
self._RunDowngradeUpgrade()
def testDowngradeFullConfig(self):
"""Test for upgrade + downgrade combination."""
# This test can work only with the previous version of a configuration!
oldconfname = "cluster_config_2.13.json"
self._TestUpgradeFromFile(oldconfname, False)
_RunUpgrade(self.tmpdir, False, True, downgrade=True)
oldconf = self._LoadTestDataConfig(oldconfname)
newconf = self._LoadConfig()
self.maxDiff = None
self.assertEqual(oldconf, newconf)
def testDowngradeFullConfigBackwardFrom_2_7(self):
"""Test for upgrade + downgrade + upgrade combination."""
self._TestUpgradeFromFile("cluster_config_2.7.json", False)
self._RunDowngradeUpgrade()
def _RunDowngradeTwice(self):
"""Make sure that downgrade is idempotent."""
_RunUpgrade(self.tmpdir, False, True, downgrade=True)
oldconf = self._LoadConfig()
_RunUpgrade(self.tmpdir, False, True, downgrade=True)
newconf = self._LoadConfig()
self.assertEqual(oldconf, newconf)
def testDowngradeTwice(self):
self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
self._RunDowngradeTwice()
def testDowngradeTwiceFullConfigFrom_2_7(self):
self._TestUpgradeFromFile("cluster_config_2.7.json", False)
self._RunDowngradeTwice()
def testUpgradeDryRunFrom_2_0(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 0, 0), True)
def testUpgradeDryRunFrom_2_1(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 1, 0), True)
def testUpgradeDryRunFrom_2_2(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 2, 0), True)
def testUpgradeDryRunFrom_2_3(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 3, 0), True)
def testUpgradeDryRunFrom_2_4(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 4, 0), True)
def testUpgradeDryRunFrom_2_5(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 5, 0), True)
def testUpgradeDryRunFrom_2_6(self):
self._TestSimpleUpgrade(version.BuildVersion(2, 6, 0), True)
def testUpgradeCurrentDryRun(self):
self._TestSimpleUpgrade(constants.CONFIG_VERSION, True)
def testDowngradeDryRun(self):
self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
oldconf = self._LoadConfig()
_RunUpgrade(self.tmpdir, True, True, downgrade=True)
newconf = self._LoadConfig()
self.assertEqual(oldconf["version"], newconf["version"])
if __name__ == "__main__":
testutils.GanetiTestProgram()
| bsd-2-clause |
thisisshi/cloud-custodian | tools/c7n_azure/c7n_azure/resources/key_vault.py | 2 | 11853 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
from c7n.filters import Filter
from c7n.utils import type_schema
from c7n_azure.actions.base import AzureBaseAction
from c7n_azure.constants import GRAPH_AUTH_ENDPOINT
from c7n_azure.filters import FirewallBypassFilter, FirewallRulesFilter
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n_azure.session import Session
from c7n_azure.utils import GraphHelper
from netaddr import IPSet
log = logging.getLogger('custodian.azure.keyvault')
@resources.register('keyvault')
class KeyVault(ArmResourceManager):
"""Key Vault Resource
:example:
This policy will find all KeyVaults with 10 or less API Hits over the last 72 hours
.. code-block:: yaml
policies:
- name: inactive-keyvaults
resource: azure.keyvault
filters:
- type: metric
metric: ServiceApiHit
op: ge
aggregation: total
threshold: 10
timeframe: 72
:example:
This policy will find all KeyVaults where Service Principals that
have access permissions that exceed `read-only`.
.. code-block:: yaml
policies:
- name: policy
description:
Ensure only authorized people have an access
resource: azure.keyvault
filters:
- not:
- type: whitelist
key: principalName
users:
- account1@sample.com
- account2@sample.com
permissions:
keys:
- get
secrets:
- get
certificates:
- get
:example:
This policy will find all KeyVaults and add get and list permissions for keys.
.. code-block:: yaml
policies:
- name: policy
description:
Add get and list permissions to keys access policy
resource: azure.keyvault
actions:
- type: update-access-policy
operation: add
access-policies:
- tenant-id: 00000000-0000-0000-0000-000000000000
object-id: 11111111-1111-1111-1111-111111111111
permissions:
keys:
- get
- list
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Security']
service = 'azure.mgmt.keyvault'
client = 'KeyVaultManagementClient'
enum_spec = ('vaults', 'list', None)
resource_type = 'Microsoft.KeyVault/vaults'
@KeyVault.filter_registry.register('firewall-rules')
class KeyVaultFirewallRulesFilter(FirewallRulesFilter):
def __init__(self, data, manager=None):
super(KeyVaultFirewallRulesFilter, self).__init__(data, manager)
self._log = log
@property
def log(self):
return self._log
def _query_rules(self, resource):
if 'properties' not in resource:
vault = self.client.vaults.get(resource['resourceGroup'], resource['name'])
resource['properties'] = vault.properties.serialize()
if 'networkAcls' not in resource['properties']:
return IPSet(['0.0.0.0/0'])
if resource['properties']['networkAcls']['defaultAction'] == 'Deny':
ip_rules = resource['properties']['networkAcls']['ipRules']
resource_rules = IPSet([r['value'] for r in ip_rules])
else:
resource_rules = IPSet(['0.0.0.0/0'])
return resource_rules
@KeyVault.filter_registry.register('firewall-bypass')
class KeyVaultFirewallBypassFilter(FirewallBypassFilter):
"""
Filters resources by the firewall bypass rules.
:example:
This policy will find all KeyVaults with enabled Azure Services bypass rules
.. code-block:: yaml
policies:
- name: keyvault-bypass
resource: azure.keyvault
filters:
- type: firewall-bypass
mode: equal
list:
- AzureServices
"""
schema = FirewallBypassFilter.schema(['AzureServices'])
def _query_bypass(self, resource):
if 'properties' not in resource:
vault = self.client.vaults.get(resource['resourceGroup'], resource['name'])
resource['properties'] = vault.properties.serialize()
# Remove spaces from the string for the comparision
if 'networkAcls' not in resource['properties']:
return []
if resource['properties']['networkAcls']['defaultAction'] == 'Allow':
return ['AzureServices']
bypass_string = resource['properties']['networkAcls'].get('bypass', '').replace(' ', '')
return list(filter(None, bypass_string.split(',')))
@KeyVault.filter_registry.register('whitelist')
class WhiteListFilter(Filter):
schema = type_schema('whitelist', rinherit=None,
required=['key'],
key={'type': 'string'},
users={'type': 'array'},
permissions={
'certificates': {'type': 'array'},
'secrets': {'type': 'array'},
'keys': {'type': 'array'}})
GRAPH_PROVIDED_KEYS = ['displayName', 'aadType', 'principalName']
graph_client = None
def __init__(self, data, manager=None):
super(WhiteListFilter, self).__init__(data, manager)
self.key = self.data['key']
# If not specified, initialize with empty list or dictionary.
self.users = self.data.get('users', [])
self.permissions = self.data.get('permissions', {})
def __call__(self, i):
if 'accessPolicies' not in i:
client = self.manager.get_client()
vault = client.vaults.get(i['resourceGroup'], i['name'])
# Retrieve access policies for the keyvaults
access_policies = []
for policy in vault.properties.access_policies:
access_policies.append({
'tenantId': policy.tenant_id,
'objectId': policy.object_id,
'applicationId': policy.application_id,
'permissions': {
'keys': policy.permissions.keys,
'secrets': policy.permissions.secrets,
'certificates': policy.permissions.certificates
}
})
# Enhance access policies with displayName, aadType and
# principalName if necessary
if self.key in self.GRAPH_PROVIDED_KEYS:
i['accessPolicies'] = self._enhance_policies(access_policies)
# Ensure each policy is
# - User is whitelisted
# - Permissions don't exceed allowed permissions
for p in i['accessPolicies']:
if self.key not in p or p[self.key] not in self.users:
if not self.compare_permissions(p['permissions'], self.permissions):
return False
return True
@staticmethod
def compare_permissions(user_permissions, permissions):
for v in user_permissions.keys():
if user_permissions[v]:
if v not in permissions.keys():
# If user_permissions is not empty, but allowed permissions is empty -- Failed.
return False
# User lowercase to compare sets
lower_user_perm = {x.lower() for x in user_permissions[v]}
lower_perm = {x.lower() for x in permissions[v]}
if lower_user_perm.difference(lower_perm):
# If user has more permissions than allowed -- Failed
return False
return True
def _enhance_policies(self, access_policies):
if not access_policies:
return access_policies
if self.graph_client is None:
s = Session(resource_endpoint_type=GRAPH_AUTH_ENDPOINT)
self.graph_client = s.client('azure.graphrbac.GraphRbacManagementClient')
# Retrieve graph objects for all object_id
object_ids = [p['objectId'] for p in access_policies]
# GraphHelper.get_principal_dictionary returns empty AADObject if not found with graph
# or if graph is not available.
principal_dics = GraphHelper.get_principal_dictionary(
self.graph_client, object_ids, True)
for policy in access_policies:
aad_object = principal_dics[policy['objectId']]
if aad_object.object_id:
policy['displayName'] = aad_object.display_name
policy['aadType'] = aad_object.object_type
policy['principalName'] = GraphHelper.get_principal_name(aad_object)
return access_policies
@KeyVault.action_registry.register('update-access-policy')
class KeyVaultUpdateAccessPolicyAction(AzureBaseAction):
"""
Adds Get and List key access policy to all keyvaults
.. code-block:: yaml
policies:
- name: azure-keyvault-update-access-policies
resource: azure.keyvault
description: |
Add key get and list to all keyvault access policies
actions:
- type: update-access-policy
operation: add
access-policies:
- tenant-id: 00000000-0000-0000-0000-000000000000
object-id: 11111111-1111-1111-1111-111111111111
permissions:
keys:
- Get
- List
"""
schema = type_schema('update-access-policy',
required=['operation', 'access-policies'],
operation={'type': 'string', 'enum': ['add', 'replace']},
**{
"access-policies": {
'type': 'array',
'items': {
'type': 'object',
'tenant-id': {'type': 'string'},
'object-id': {'type': 'string'},
'permissions': {
'type': 'object',
'keys': {'type': 'array', 'items': {'type': 'string'}},
'secrets': {'type': 'array', 'items': {'type': 'string'}},
'certificates': {'type': 'array', 'items': {'type': 'string'}}
}
}
}
})
def _prepare_processing(self):
self.client = self.manager.get_client()
def _process_resource(self, resource):
operation = self.data.get('operation')
access_policies = KeyVaultUpdateAccessPolicyAction._transform_access_policies(
self.data.get('access-policies')
)
try:
self.client.vaults.update_access_policy(
resource_group_name=resource['resourceGroup'],
vault_name=resource['name'],
operation_kind=operation,
properties=access_policies
)
except Exception as error:
log.warning(error)
@staticmethod
def _transform_access_policies(access_policies):
policies = [
{"objectId": i['object-id'],
"tenantId": i['tenant-id'],
"permissions": i['permissions']} for i in access_policies]
return {"accessPolicies": policies}
| apache-2.0 |
rickerc/cinder_audit | cinder/wsgi.py | 1 | 16411 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import errno
import os
import socket
import ssl
import sys
import time
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help="Number of backlog requests to configure the socket with"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.StrOpt('ssl_ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('ssl_key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
LOG = logging.getLogger(__name__)
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = 16384
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = 1000
def __init__(self, name, app, host=None, port=None, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:returns: None
"""
self.name = name
self.app = app
self._host = host or "0.0.0.0"
self._port = port or 0
self._server = None
self._socket = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = logging.WritableLogger(self._logger)
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
ca_file = CONF.ssl_ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
def wrap_ssl(sock):
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
if use_ssl:
sock = wrap_ssl(sock)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for 30 seconds") %
{'host': host, 'port': port})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
def _start(self):
"""Run the blocking eventlet WSGI server.
:returns: None
"""
eventlet.wsgi.server(self._socket,
self.app,
protocol=self._protocol,
custom_pool=self._pool,
log=self._wsgi_logger)
def start(self, backlog=128):
"""Start serving a WSGI application.
:param backlog: Maximum number of queued connections.
:returns: None
:raises: cinder.exception.InvalidInput
"""
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
self._socket = self._get_socket(self._host,
self._port,
backlog=backlog)
self._server = eventlet.spawn(self._start)
(self._host, self._port) = self._socket.getsockname()[0:2]
LOG.info(_("Started %(name)s on %(host)s:%(port)s") %
{'name': self.name, 'host': self.host, 'port': self.port})
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = cinder.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import cinder.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = cinder.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import cinder.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print(('*' * 40) + ' BODY')
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
config_path = config_path or CONF.api_paste_config
self.config_path = utils.find_config(config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `cinder.exception.PasteAppNotFound`
"""
try:
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
| apache-2.0 |
hpi-xnor/BMXNet | example/rnn/old/rnn_cell_demo.py | 38 | 6539 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A simple demo of new RNN cell with PTB language model."""
import os
import numpy as np
import mxnet as mx
from bucket_io import BucketSentenceIter, default_build_vocab
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def Perplexity(label, pred):
# TODO(tofix): we make a transpose of label here, because when
# using the RNN cell, we called swap axis to the data.
label = label.T.reshape((-1,))
loss = 0.
for i in range(pred.shape[0]):
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
return np.exp(loss / label.size)
if __name__ == '__main__':
batch_size = 128
buckets = [10, 20, 30, 40, 50, 60]
num_hidden = 200
num_embed = 200
num_lstm_layer = 2
num_epoch = 2
learning_rate = 0.01
momentum = 0.0
contexts = [mx.context.gpu(i) for i in range(4)]
vocab = default_build_vocab(os.path.join(data_dir, 'ptb.train.txt'))
init_h = [('LSTM_init_h', (batch_size, num_lstm_layer, num_hidden))]
init_c = [('LSTM_init_c', (batch_size, num_lstm_layer, num_hidden))]
init_states = init_c + init_h
data_train = BucketSentenceIter(os.path.join(data_dir, 'ptb.train.txt'),
vocab, buckets, batch_size, init_states)
data_val = BucketSentenceIter(os.path.join(data_dir, 'ptb.valid.txt'),
vocab, buckets, batch_size, init_states)
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=len(vocab),
output_dim=num_embed, name='embed')
# TODO(tofix)
# The inputs and labels from IO are all in batch-major.
# We need to transform them into time-major to use RNN cells.
embed_tm = mx.sym.SwapAxis(embed, dim1=0, dim2=1)
label_tm = mx.sym.SwapAxis(label, dim1=0, dim2=1)
# TODO(tofix)
# Create transformed RNN initial states. Normally we do
# no need to do this. But the RNN symbol expects the state
# to be time-major shape layout, while the current mxnet
# IO and high-level training logic assume everything from
# the data iter have batch_size as the first dimension.
# So until we have extended our IO and training logic to
# support this more general case, this dummy axis swap is
# needed.
rnn_h_init = mx.sym.SwapAxis(mx.sym.Variable('LSTM_init_h'),
dim1=0, dim2=1)
rnn_c_init = mx.sym.SwapAxis(mx.sym.Variable('LSTM_init_c'),
dim1=0, dim2=1)
# TODO(tofix)
# currently all the LSTM parameters are concatenated as
# a huge vector, and named '<name>_parameters'. By default
# mxnet initializer does not know how to initilize this
# guy because its name does not ends with _weight or _bias
# or anything familiar. Here we just use a temp workaround
# to create a variable and name it as LSTM_bias to get
# this demo running. Note by default bias is initialized
# as zeros, so this is not a good scheme. But calling it
# LSTM_weight is not good, as this is 1D vector, while
# the initialization scheme of a weight parameter needs
# at least two dimensions.
rnn_params = mx.sym.Variable('LSTM_bias')
# RNN cell takes input of shape (time, batch, feature)
rnn = mx.sym.RNN(data=embed_tm, state_size=num_hidden,
num_layers=num_lstm_layer, mode='lstm',
name='LSTM',
# The following params can be omitted
# provided we do not need to apply the
# workarounds mentioned above
state=rnn_h_init,
state_cell=rnn_c_init,
parameters=rnn_params)
# the RNN cell output is of shape (time, batch, dim)
# if we need the states and cell states in the last time
# step (e.g. when building encoder-decoder models), we
# can set state_outputs=True, and the RNN cell will have
# extra outputs: rnn['LSTM_output'], rnn['LSTM_state']
# and for LSTM, also rnn['LSTM_state_cell']
# now we collapse the time and batch dimension to do the
# final linear logistic regression prediction
hidden = mx.sym.Reshape(data=rnn, shape=(-1, num_hidden))
label_cl = mx.sym.Reshape(data=label_tm, shape=(-1,))
pred = mx.sym.FullyConnected(data=hidden, num_hidden=len(vocab),
name='pred')
sm = mx.sym.SoftmaxOutput(data=pred, label=label_cl, name='softmax')
data_names = ['data', 'LSTM_init_h', 'LSTM_init_c']
label_names = ['softmax_label']
return (sm, data_names, label_names)
if len(buckets) == 1:
mod = mx.mod.Module(*sym_gen(buckets[0]), context=contexts)
else:
mod = mx.mod.BucketingModule(sym_gen, default_bucket_key=data_train.default_bucket_key,
context=contexts)
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
mod.fit(data_train, eval_data=data_val, num_epoch=num_epoch,
eval_metric=mx.metric.np(Perplexity),
batch_end_callback=mx.callback.Speedometer(batch_size, 50),
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
optimizer='sgd',
optimizer_params={'learning_rate': learning_rate,
'momentum': momentum, 'wd': 0.00001})
| apache-2.0 |
sund/namebench | libnamebench/selectors_test.py | 175 | 3406 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the selector module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import selectors
import unittest
class SelectorsTest(unittest.TestCase):
def testMaxRepeatCount(self):
self.assertEquals(selectors.MaxRepeatCount(range(1,10), 5),
selectors.MAX_REPEAT)
self.assertEquals(selectors.MaxRepeatCount(range(1,10), 50),
2**32)
def testRandomSelect(self):
elements = range(10)
result = selectors.RandomSelect(elements, 10)
self.assertEquals(len(result), 10)
self.assertNotEquals(result, range(10))
def testRandomSelectConstrained(self):
elements = range(5)
result = selectors.RandomSelect(elements, 10)
self.assertEquals(len(result), 10)
ones = [x for x in result if x == 1]
twos = [x for x in result if x == 2]
self.assertTrue(len(ones) <= selectors.MAX_REPEAT)
self.assertTrue(len(twos) <= selectors.MAX_REPEAT)
def testRandomSelectVeryConstrained(self):
"""Test to make sure we don't infinite loop if count > len(elements)*3"""
elements = range(2)
result = selectors.RandomSelect(elements, 20)
self.assertEquals(len(result), 20)
ones = [x for x in result if x == 1]
twos = [x for x in result if x == 2]
self.assertTrue(ones > selectors.MAX_REPEAT)
self.assertTrue(twos > selectors.MAX_REPEAT)
def testWeightedDistribution(self):
"""Ensure that a weighted distribution is indeed weighted."""
elements = range(20)
result = selectors.WeightedDistribution(elements, 10)
self.assertEquals(len(result), 10)
zeros = [x for x in result if x == 0]
ones = [x for x in result if x == 1]
low = [x for x in result if x < 3]
mid = [x for x in result if x > 7 and x < 13]
high = [x for x in result if x > 17]
self.assertTrue(len(zeros) <= selectors.MAX_REPEAT)
self.assertTrue(len(ones) <= selectors.MAX_REPEAT)
self.assertTrue(len(low) >= 3)
self.assertTrue(len(mid) <= 3)
self.assertTrue(len(high) <= 2)
def testChuckSelect(self):
elements = range(10000)
result = selectors.ChunkSelect(elements, 5)
self.assertEquals(len(result), 5)
# Make sure our segment is a subset
self.assertTrue(set(result).issubset(set(elements)))
# Make sure our segment is contiguous
self.assertEquals(result, range(result[0], result[0]+5))
result2 = selectors.ChunkSelect(elements, 5)
self.assertEquals(len(result), 5)
self.assertNotEquals(result, result2)
def testChunkSelectConstrained(self):
"""Make sure we aren't inventing bogus data."""
elements = range(20)
result = selectors.ChunkSelect(elements, 25)
self.assertEquals(len(result), 20)
self.assertEquals(elements, result)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rothnic/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
Karel-van-de-Plassche/bokeh | bokeh/server/views/ws.py | 2 | 9915 | ''' Provide a web socket handler for the Bokeh Server application.
'''
from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
import codecs
from six.moves.urllib.parse import urlparse
from tornado import gen, locks
from tornado.websocket import StreamClosedError, WebSocketHandler, WebSocketClosedError
from ..protocol_handler import ProtocolHandler
from ...protocol import Protocol
from ...protocol.exceptions import MessageError, ProtocolError, ValidationError
from ...protocol.message import Message
from ...protocol.receiver import Receiver
from bokeh.util.session_id import check_session_id_signature
class WSHandler(WebSocketHandler):
''' Implements a custom Tornado WebSocketHandler for the Bokeh Server.
'''
def __init__(self, tornado_app, *args, **kw):
self.receiver = None
self.handler = None
self.connection = None
self.application_context = kw['application_context']
self.latest_pong = -1
# write_lock allows us to lock the connection to send multiple
# messages atomically.
self.write_lock = locks.Lock()
# Note: tornado_app is stored as self.application
super(WSHandler, self).__init__(tornado_app, *args, **kw)
def initialize(self, application_context, bokeh_websocket_path):
pass
def check_origin(self, origin):
''' Implement a check_origin policy for Tornado to call.
The suplied origin will be compared to the Bokeh server whitelist. If the
origin is not allow, an error will be logged and ``False`` will be returned.
Args:
origin (str) :
The URL of the connection origin
Returns:
bool, True if the connection is allowed, False otherwise
'''
from ..util import check_whitelist
parsed_origin = urlparse(origin)
origin_host = parsed_origin.netloc.lower()
allowed_hosts = self.application.websocket_origins
allowed = check_whitelist(origin_host, allowed_hosts)
if allowed:
return True
else:
log.error("Refusing websocket connection from Origin '%s'; \
use --allow-websocket-origin=%s to permit this; currently we allow origins %r",
origin, origin_host, allowed_hosts)
return False
def open(self):
''' Initialize a connection to a client.
Returns:
None
'''
log.info('WebSocket connection opened')
proto_version = self.get_argument("bokeh-protocol-version", default=None)
if proto_version is None:
self.close()
raise ProtocolError("No bokeh-protocol-version specified")
session_id = self.get_argument("bokeh-session-id", default=None)
if session_id is None:
self.close()
raise ProtocolError("No bokeh-session-id specified")
if not check_session_id_signature(session_id,
signed=self.application.sign_sessions,
secret_key=self.application.secret_key):
log.error("Session id had invalid signature: %r", session_id)
raise ProtocolError("Invalid session ID")
def on_fully_opened(future):
e = future.exception()
if e is not None:
# this isn't really an error (unless we have a
# bug), it just means a client disconnected
# immediately, most likely.
log.debug("Failed to fully open connection %r", e)
future = self._async_open(session_id, proto_version)
self.application.io_loop.add_future(future, on_fully_opened)
@gen.coroutine
def _async_open(self, session_id, proto_version):
''' Perform the specific steps needed to open a connection to a Bokeh session
Sepcifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None
'''
try:
yield self.application_context.create_session_if_needed(session_id, self.request)
session = self.application_context.get_session(session_id)
protocol = Protocol(proto_version)
self.receiver = Receiver(protocol)
log.debug("Receiver created for %r", protocol)
self.handler = ProtocolHandler()
log.debug("ProtocolHandler created for %r", protocol)
self.connection = self.application.new_connection(protocol, self, self.application_context, session)
log.info("ServerConnection created")
except ProtocolError as e:
log.error("Could not create new server session, reason: %s", e)
self.close()
raise e
msg = self.connection.protocol.create('ACK')
yield self.send_message(msg)
raise gen.Return(None)
@gen.coroutine
def on_message(self, fragment):
''' Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
'''
# We shouldn't throw exceptions from on_message because the caller is
# just Tornado and it doesn't know what to do with them other than
# report them as an unhandled Future
try:
message = yield self._receive(fragment)
except Exception as e:
# If you go look at self._receive, it's catching the
# expected error types... here we have something weird.
log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True)
self._internal_error("server failed to parse a message")
try:
if message:
work = yield self._handle(message)
if work:
yield self._schedule(work)
except Exception as e:
log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True)
self._internal_error("server failed to handle a message")
raise gen.Return(None)
def on_pong(self, data):
# if we get an invalid integer or utf-8 back, either we
# sent a buggy ping or the client is evil/broken.
try:
self.latest_pong = int(codecs.decode(data, 'utf-8'))
except UnicodeDecodeError:
log.trace("received invalid unicode in pong %r", data, exc_info=True)
except ValueError:
log.trace("received invalid integer in pong %r", data, exc_info=True)
@gen.coroutine
def send_message(self, message):
''' Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
'''
try:
yield message.send(self)
except (WebSocketClosedError, StreamClosedError): # Tornado 4.x may raise StreamClosedError
# on_close() is / will be called anyway
log.warn("Failed sending message as connection was closed")
raise gen.Return(None)
@gen.coroutine
def write_message(self, message, binary=False, locked=True):
''' Override parent write_message with a version that acquires a
write lock before writing.
'''
if locked:
with (yield self.write_lock.acquire()):
yield super(WSHandler, self).write_message(message, binary)
else:
yield super(WSHandler, self).write_message(message, binary)
def on_close(self):
''' Clean up when the connection is closed.
'''
log.info('WebSocket connection closed: code=%s, reason=%r', self.close_code, self.close_reason)
if self.connection is not None:
self.application.client_lost(self.connection)
@gen.coroutine
def _receive(self, fragment):
# Receive fragments until a complete message is assembled
try:
message = yield self.receiver.consume(fragment)
raise gen.Return(message)
except (MessageError, ProtocolError, ValidationError) as e:
self._protocol_error(str(e))
raise gen.Return(None)
@gen.coroutine
def _handle(self, message):
# Handle the message, possibly resulting in work to do
try:
work = yield self.handler.handle(message, self.connection)
raise gen.Return(work)
except (MessageError, ProtocolError, ValidationError) as e: # TODO (other exceptions?)
self._internal_error(str(e))
raise gen.Return(None)
@gen.coroutine
def _schedule(self, work):
if isinstance(work, Message):
yield self.send_message(work)
else:
self._internal_error("expected a Message not " + repr(work))
raise gen.Return(None)
def _internal_error(self, message):
log.error("Bokeh Server internal error: %s, closing connection", message)
self.close(10000, message)
def _protocol_error(self, message):
log.error("Bokeh Server protocol error: %s, closing connection", message)
self.close(10001, message)
| bsd-3-clause |
koomik/CouchPotatoServer | libs/enzyme/__init__.py | 168 | 2414 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import mimetypes
import os
import sys
from exceptions import *
PARSERS = [('asf', ['video/asf'], ['asf', 'wmv', 'wma']),
('flv', ['video/flv'], ['flv']),
('mkv', ['video/x-matroska', 'application/mkv'], ['mkv', 'mka', 'webm']),
('mp4', ['video/quicktime', 'video/mp4'], ['mov', 'qt', 'mp4', 'mp4a', '3gp', '3gp2', '3g2', 'mk2']),
('mpeg', ['video/mpeg'], ['mpeg', 'mpg', 'mp4', 'ts']),
('ogm', ['application/ogg'], ['ogm', 'ogg', 'ogv']),
('real', ['video/real'], ['rm', 'ra', 'ram']),
('riff', ['video/avi'], ['wav', 'avi'])
]
def parse(path):
"""Parse metadata of the given video
:param string path: path to the video file to parse
:return: a parser corresponding to the video's mimetype or extension
:rtype: :class:`~enzyme.core.AVContainer`
"""
if not os.path.isfile(path):
raise ValueError('Invalid path')
extension = os.path.splitext(path)[1][1:]
mimetype = mimetypes.guess_type(path)[0]
parser_ext = None
parser_mime = None
for (parser_name, parser_mimetypes, parser_extensions) in PARSERS:
if mimetype in parser_mimetypes:
parser_mime = parser_name
if extension in parser_extensions:
parser_ext = parser_name
parser = parser_mime or parser_ext
if not parser:
raise NoParserError()
mod = __import__(parser, globals=globals(), locals=locals(), fromlist=[], level=-1)
with open(path, 'rb') as f:
p = mod.Parser(f)
return p
| gpl-3.0 |
huiren/ece511 | src/arch/x86/isa/insts/general_purpose/compare_and_test/set_byte_on_condition.py | 91 | 7884 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop SALC_R
{
sbb reg, reg, reg, dataSize=1
};
def macroop SETZ_R
{
movi reg, reg, 1, flags=(CZF,)
movi reg, reg, 0, flags=(nCZF,)
};
def macroop SETZ_M
{
movi t1, t1, 1, flags=(CZF,)
movi t1, t1, 0, flags=(nCZF,)
st t1, seg, sib, disp
};
def macroop SETZ_P
{
rdip t7
movi t1, t1, 1, flags=(CZF,)
movi t1, t1, 0, flags=(nCZF,)
st t1, seg, riprel, disp
};
def macroop SETNZ_R
{
movi reg, reg, 1, flags=(nCZF,)
movi reg, reg, 0, flags=(CZF,)
};
def macroop SETNZ_M
{
movi t1, t1, 1, flags=(nCZF,)
movi t1, t1, 0, flags=(CZF,)
st t1, seg, sib, disp
};
def macroop SETNZ_P
{
rdip t7
movi t1, t1, 1, flags=(nCZF,)
movi t1, t1, 0, flags=(CZF,)
st t1, seg, riprel, disp
};
def macroop SETB_R
{
movi reg, reg, 1, flags=(CCF,)
movi reg, reg, 0, flags=(nCCF,)
};
def macroop SETB_M
{
movi t1, t1, 1, flags=(CCF,)
movi t1, t1, 0, flags=(nCCF,)
st t1, seg, sib, disp
};
def macroop SETB_P
{
rdip t7
movi t1, t1, 1, flags=(CCF,)
movi t1, t1, 0, flags=(nCCF,)
st t1, seg, riprel, disp
};
def macroop SETNB_R
{
movi reg, reg, 1, flags=(nCCF,)
movi reg, reg, 0, flags=(CCF,)
};
def macroop SETNB_M
{
movi t1, t1, 1, flags=(nCCF,)
movi t1, t1, 0, flags=(CCF,)
st t1, seg, sib, disp
};
def macroop SETNB_P
{
rdip t7
movi t1, t1, 1, flags=(nCCF,)
movi t1, t1, 0, flags=(CCF,)
st t1, seg, riprel, disp
};
def macroop SETBE_R
{
movi reg, reg, 1, flags=(CCvZF,)
movi reg, reg, 0, flags=(nCCvZF,)
};
def macroop SETBE_M
{
movi t1, t1, 1, flags=(CCvZF,)
movi t1, t1, 0, flags=(nCCvZF,)
st t1, seg, sib, disp
};
def macroop SETBE_P
{
rdip t7
movi t1, t1, 1, flags=(CCvZF,)
movi t1, t1, 0, flags=(nCCvZF,)
st t1, seg, riprel, disp
};
def macroop SETNBE_R
{
movi reg, reg, 1, flags=(nCCvZF,)
movi reg, reg, 0, flags=(CCvZF,)
};
def macroop SETNBE_M
{
movi t1, t1, 1, flags=(nCCvZF,)
movi t1, t1, 0, flags=(CCvZF,)
st t1, seg, sib, disp
};
def macroop SETNBE_P
{
rdip t7
movi t1, t1, 1, flags=(nCCvZF,)
movi t1, t1, 0, flags=(CCvZF,)
st t1, seg, riprel, disp
};
def macroop SETS_R
{
movi reg, reg, 1, flags=(CSF,)
movi reg, reg, 0, flags=(nCSF,)
};
def macroop SETS_M
{
movi t1, t1, 1, flags=(CSF,)
movi t1, t1, 0, flags=(nCSF,)
st t1, seg, sib, disp
};
def macroop SETS_P
{
rdip t7
movi t1, t1, 1, flags=(CSF,)
movi t1, t1, 0, flags=(nCSF,)
st t1, seg, riprel, disp
};
def macroop SETNS_R
{
movi reg, reg, 1, flags=(nCSF,)
movi reg, reg, 0, flags=(CSF,)
};
def macroop SETNS_M
{
movi t1, t1, 1, flags=(nCSF,)
movi t1, t1, 0, flags=(CSF,)
st t1, seg, sib, disp
};
def macroop SETNS_P
{
rdip t7
movi t1, t1, 1, flags=(nCSF,)
movi t1, t1, 0, flags=(CSF,)
st t1, seg, riprel, disp
};
def macroop SETP_R
{
movi reg, reg, 1, flags=(CPF,)
movi reg, reg, 0, flags=(nCPF,)
};
def macroop SETP_M
{
movi t1, t1, 1, flags=(CPF,)
movi t1, t1, 0, flags=(nCPF,)
st t1, seg, sib, disp
};
def macroop SETP_P
{
rdip t7
movi t1, t1, 1, flags=(CPF,)
movi t1, t1, 0, flags=(nCPF,)
st t1, seg, riprel, disp
};
def macroop SETNP_R
{
movi reg, reg, 1, flags=(nCPF,)
movi reg, reg, 0, flags=(CPF,)
};
def macroop SETNP_M
{
movi t1, t1, 1, flags=(nCPF,)
movi t1, t1, 0, flags=(CPF,)
st t1, seg, sib, disp
};
def macroop SETNP_P
{
rdip t7
movi t1, t1, 1, flags=(nCPF,)
movi t1, t1, 0, flags=(CPF,)
st t1, seg, riprel, disp
};
def macroop SETL_R
{
movi reg, reg, 1, flags=(CSxOF,)
movi reg, reg, 0, flags=(nCSxOF,)
};
def macroop SETL_M
{
movi t1, t1, 1, flags=(CSxOF,)
movi t1, t1, 0, flags=(nCSxOF,)
st t1, seg, sib, disp
};
def macroop SETL_P
{
rdip t7
movi t1, t1, 1, flags=(CSxOF,)
movi t1, t1, 0, flags=(nCSxOF,)
st t1, seg, riprel, disp
};
def macroop SETNL_R
{
movi reg, reg, 1, flags=(nCSxOF,)
movi reg, reg, 0, flags=(CSxOF,)
};
def macroop SETNL_M
{
movi t1, t1, 1, flags=(nCSxOF,)
movi t1, t1, 0, flags=(CSxOF,)
st t1, seg, sib, disp
};
def macroop SETNL_P
{
rdip t7
movi t1, t1, 1, flags=(nCSxOF,)
movi t1, t1, 0, flags=(CSxOF,)
st t1, seg, riprel, disp
};
def macroop SETLE_R
{
movi reg, reg, 1, flags=(CSxOvZF,)
movi reg, reg, 0, flags=(nCSxOvZF,)
};
def macroop SETLE_M
{
movi t1, t1, 1, flags=(CSxOvZF,)
movi t1, t1, 0, flags=(nCSxOvZF,)
st t1, seg, sib, disp
};
def macroop SETLE_P
{
rdip t7
movi t1, t1, 1, flags=(CSxOvZF,)
movi t1, t1, 0, flags=(nCSxOvZF,)
st t1, seg, riprel, disp
};
def macroop SETNLE_R
{
movi reg, reg, 1, flags=(nCSxOvZF,)
movi reg, reg, 0, flags=(CSxOvZF,)
};
def macroop SETNLE_M
{
movi t1, t1, 1, flags=(nCSxOvZF,)
movi t1, t1, 0, flags=(CSxOvZF,)
st t1, seg, sib, disp
};
def macroop SETNLE_P
{
rdip t7
movi t1, t1, 1, flags=(nCSxOvZF,)
movi t1, t1, 0, flags=(CSxOvZF,)
st t1, seg, riprel, disp
};
def macroop SETO_R
{
movi reg, reg, 1, flags=(COF,)
movi reg, reg, 0, flags=(nCOF,)
};
def macroop SETO_M
{
movi t1, t1, 1, flags=(COF,)
movi t1, t1, 0, flags=(nCOF,)
st t1, seg, sib, disp
};
def macroop SETO_P
{
rdip t7
movi t1, t1, 1, flags=(COF,)
movi t1, t1, 0, flags=(nCOF,)
st t1, seg, riprel, disp
};
def macroop SETNO_R
{
movi reg, reg, 1, flags=(nCOF,)
movi reg, reg, 0, flags=(COF,)
};
def macroop SETNO_M
{
movi t1, t1, 1, flags=(nCOF,)
movi t1, t1, 0, flags=(COF,)
st t1, seg, sib, disp
};
def macroop SETNO_P
{
rdip t7
movi t1, t1, 1, flags=(nCOF,)
movi t1, t1, 0, flags=(COF,)
st t1, seg, riprel, disp
};
'''
| bsd-3-clause |
julen/translate | translate/storage/workflow.py | 25 | 8994 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
A workflow is defined by a set of states that a translation unit can be in and
the (allowed) transitions between these states. A state is defined by a range
between -128 and 127, indicating its level of "completeness". The range is
closed at the beginning and open at the end. That is, if a workflow contains
states A, B and C where A < B < C, a unit with state number n is in state A if
A <= n < B, state B if B <= n < C or state C if C <= n < MAX.
A value of 0 is typically the "empty" or "new" state with negative values
reserved for states like "obsolete" or "do not use".
Format specific workflows should be defined in such a way that the numeric
state values correspond to similar states. For example state 0 should be
"untranslated" in PO and "new" or "empty" in XLIFF, state 100 should be
"translated" in PO and "final" in XLIFF. This allows formats to implicitly
define similar states.
"""
class StateEnum:
"""Only contains the constants for default states."""
MIN = -128
OBSOLETE = -100
EMPTY = 0
NEEDS_WORK = 30
REJECTED = 60
NEEDS_REVIEW = 80
UNREVIEWED = 100
FINAL = 120
MAX = 127
class State(object):
def __init__(self, name, enter_action=None, leave_action=None):
self.name = name
self.enter_action = enter_action
self.leave_action = leave_action
def __eq__(self, rhs):
return self.name == rhs.name
def __repr__(self):
return '<State "%s">' % (self.name)
def enter(self, obj):
if not self.enter_action or not callable(self.enter_action):
return
self.enter_action(obj)
def leave(self, obj):
if not self.leave_action or not callable(self.leave_action):
return
self.leave_action(obj)
class UnitState(State):
def __init__(self, name, state_value):
self.state_value = state_value
super(UnitState, self).__init__(name, self._enter)
def __repr__(self):
return '<UnitState name=%s value=%d>' % (self.name, self.state_value)
def _enter(self, unit):
unit.set_state_n(self.state_value)
class WorkflowError(Exception):
pass
class NoInitialStateError(WorkflowError):
pass
class TransitionError(WorkflowError):
pass
class InvalidStateObjectError(WorkflowError):
def __init__(self, obj):
super(InvalidStateObjectError, self).__init__('Invalid state object: %s' % (obj))
class StateNotInWorkflowError(Exception):
def __init__(self, state):
super(StateNotInWorkflowError, self).__init__(
'State not in workflow: %s' % (state))
class Workflow(object):
# INITIALISERS #
def __init__(self, wf_obj=None):
self._current_state = None
self._edges = []
self._initial_state = None
self._states = []
self._workflow_obj = wf_obj
# ACCESSORS #
def _get_edges(self):
return list(self._edges)
edges = property(_get_edges)
def _get_states(self):
return list(self._states)
states = property(_get_states)
# METHODS #
def add_edge(self, from_state, to_state):
if isinstance(from_state, basestring):
from_state = self.get_state_by_name(from_state)
if isinstance(to_state, basestring):
to_state = self.get_state_by_name(to_state)
for s in (from_state, to_state):
if s not in self.states:
raise StateNotInWorkflowError(s)
if (from_state, to_state) in self.edges:
return # Edge already exists. Return quietly
self._edges.append((from_state, to_state))
def add_state(self, state):
if not isinstance(state, State):
raise InvalidStateObjectError(state)
if state in self.states:
raise ValueError('State already in workflow: %s' % (state))
self._states.append(state)
if self._initial_state is None:
self._initial_state = state
def get_from_states(self):
"""Returns a list of states that can be transitioned from to the
current state."""
return [e[0] for e in self.edges if e[1] is self._current_state]
def get_to_states(self):
"""Returns a list of states that can be transitioned to from the
current state."""
return [e[1] for e in self.edges if e[0] is self._current_state]
def get_state_by_name(self, state_name):
"""Get the ``State`` object for the given name."""
for s in self.states:
if s.name == state_name:
return s
else:
raise StateNotInWorkflowError(state_name)
def set_current_state(self, state):
"""Set the current state. This is absolute and not subject to edge
constraints. The current state's ``leave`` and the new state's
``enter`` method is still called. For edge transitions, see the
``trans`` method."""
if isinstance(state, basestring):
state = self.get_state_by_name(state)
if state not in self.states:
raise StateNotInWorkflowError(state)
if self._current_state:
self._current_state.leave(self._workflow_obj)
self._current_state = state
self._current_state.enter(self._workflow_obj)
def set_initial_state(self, state):
"""Sets the initial state, used by the :meth:`.reset` method."""
if isinstance(state, basestring):
state = self.get_state_by_name(state)
if not isinstance(state, State):
raise InvalidStateObjectError(state)
if state not in self.states:
raise StateNotInWorkflowError(state)
self._initial_state = state
def reset(self, wf_obj, init_state=None):
"""Reset the work flow to the initial state using the given object."""
self._workflow_obj = wf_obj
if init_state is not None:
if isinstance(init_state, basestring):
init_state = self.get_state_by_name(init_state)
if init_state not in self.states:
raise StateNotInWorkflowError()
self._initial_state = init_state
self._current_state = init_state
return
if self._initial_state is None:
raise NoInitialStateError()
self._current_state = None
self.set_current_state(self._initial_state)
def trans(self, to_state=None):
"""Transition to the given state. If no state is given, the first one
returned by ``get_to_states`` is used."""
if self._current_state is None:
raise ValueError('No current state set')
if isinstance(to_state, basestring):
to_state = self.get_state_by_name(to_state)
if to_state is None:
to_state = self.get_to_states()
if not to_state:
raise TransitionError('No state to transition to')
to_state = to_state[0]
if to_state not in self.states:
raise StateNotInWorkflowError(to_state)
if (self._current_state, to_state) not in self.edges:
raise TransitionError('No edge between edges %s and %s' % (
self._current_state, to_state))
self._current_state.leave(self._workflow_obj)
self._current_state = to_state
self._current_state.enter(self._workflow_obj)
def create_unit_workflow(unit, state_names):
wf = Workflow(unit)
state_info = unit.STATE.items()
state_info.sort(key=lambda x: x[0])
init_state, prev_state = None, None
for state_id, state_range in state_info:
if state_range[0] < 0:
continue
state_name = state_names[state_id]
# We use the low end value below, because the range is closed there
state = UnitState(state_name, state_range[0])
wf.add_state(state)
# Use the first non-negative state as the initial state...
if init_state is None and state_range[0] >= 0:
init_state = state
if prev_state:
wf.add_edge(prev_state, state_name)
prev_state = state_name
if init_state:
wf.set_initial_state(init_state)
return wf
| gpl-2.0 |
ramondelafuente/ansible | lib/ansible/utils/cmd_functions.py | 61 | 2044 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import shlex
import subprocess
import select
def run_cmd(cmd, live=False, readsize=10):
#readsize = 10
cmdargs = shlex.split(cmd)
p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = ''
stderr = ''
rpipes = [p.stdout, p.stderr]
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), readsize)
if live:
sys.stdout.write(dat)
stdout += dat
if dat == '':
rpipes.remove(p.stdout)
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), readsize)
stderr += dat
if live:
sys.stdout.write(dat)
if dat == '':
rpipes.remove(p.stderr)
# only break out if we've emptied the pipes, or there is nothing to
# read from and the process has finished.
if (not rpipes or not rfd) and p.poll() is not None:
break
# Calling wait while there are still pipes to read can cause a lock
elif not rpipes and p.poll() == None:
p.wait()
return p.returncode, stdout, stderr
| gpl-3.0 |
zcbenz/cefode-chromium | third_party/mesa/MesaLib/src/mesa/main/APIspec.py | 48 | 21602 | #!/usr/bin/python
#
# Copyright (C) 2009 Chia-I Wu <olv@0xlab.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
A parser for APIspec.
"""
class SpecError(Exception):
"""Error in the spec file."""
class Spec(object):
"""A Spec is an abstraction of the API spec."""
def __init__(self, doc):
self.doc = doc
self.spec_node = doc.getRootElement()
self.tmpl_nodes = {}
self.api_nodes = {}
self.impl_node = None
# parse <apispec>
node = self.spec_node.children
while node:
if node.type == "element":
if node.name == "template":
self.tmpl_nodes[node.prop("name")] = node
elif node.name == "api":
self.api_nodes[node.prop("name")] = node
else:
raise SpecError("unexpected node %s in apispec" %
node.name)
node = node.next
# find an implementation
for name, node in self.api_nodes.iteritems():
if node.prop("implementation") == "true":
self.impl_node = node
break
if not self.impl_node:
raise SpecError("unable to find an implementation")
def get_impl(self):
"""Return the implementation."""
return API(self, self.impl_node)
def get_api(self, name):
"""Return an API."""
return API(self, self.api_nodes[name])
class API(object):
"""An API consists of categories and functions."""
def __init__(self, spec, api_node):
self.name = api_node.prop("name")
self.is_impl = (api_node.prop("implementation") == "true")
self.categories = []
self.functions = []
# parse <api>
func_nodes = []
node = api_node.children
while node:
if node.type == "element":
if node.name == "category":
cat = node.prop("name")
self.categories.append(cat)
elif node.name == "function":
func_nodes.append(node)
else:
raise SpecError("unexpected node %s in api" % node.name)
node = node.next
# realize functions
for func_node in func_nodes:
tmpl_node = spec.tmpl_nodes[func_node.prop("template")]
try:
func = Function(tmpl_node, func_node, self.is_impl,
self.categories)
except SpecError, e:
func_name = func_node.prop("name")
raise SpecError("failed to parse %s: %s" % (func_name, e))
self.functions.append(func)
def match(self, func, conversions={}):
"""Find a matching function in the API."""
match = None
need_conv = False
for f in self.functions:
matched, conv = f.match(func, conversions)
if matched:
match = f
need_conv = conv
# exact match
if not need_conv:
break
return (match, need_conv)
class Function(object):
"""Parse and realize a <template> node."""
def __init__(self, tmpl_node, func_node, force_skip_desc=False, categories=[]):
self.tmpl_name = tmpl_node.prop("name")
self.direction = tmpl_node.prop("direction")
self.name = func_node.prop("name")
self.prefix = func_node.prop("default_prefix")
self.is_external = (func_node.prop("external") == "true")
if force_skip_desc:
self._skip_desc = True
else:
self._skip_desc = (func_node.prop("skip_desc") == "true")
self._categories = categories
# these attributes decide how the template is realized
self._gltype = func_node.prop("gltype")
if func_node.hasProp("vector_size"):
self._vector_size = int(func_node.prop("vector_size"))
else:
self._vector_size = 0
self._expand_vector = (func_node.prop("expand_vector") == "true")
self.return_type = "void"
param_nodes = []
# find <proto>
proto_node = tmpl_node.children
while proto_node:
if proto_node.type == "element" and proto_node.name == "proto":
break
proto_node = proto_node.next
if not proto_node:
raise SpecError("no proto")
# and parse it
node = proto_node.children
while node:
if node.type == "element":
if node.name == "return":
self.return_type = node.prop("type")
elif node.name == "param" or node.name == "vector":
if self.support_node(node):
# make sure the node is not hidden
if not (self._expand_vector and
(node.prop("hide_if_expanded") == "true")):
param_nodes.append(node)
else:
raise SpecError("unexpected node %s in proto" % node.name)
node = node.next
self._init_params(param_nodes)
self._init_descs(tmpl_node, param_nodes)
def __str__(self):
return "%s %s%s(%s)" % (self.return_type, self.prefix, self.name,
self.param_string(True))
def _init_params(self, param_nodes):
"""Parse and initialize parameters."""
self.params = []
for param_node in param_nodes:
size = self.param_node_size(param_node)
# when no expansion, vector is just like param
if param_node.name == "param" or not self._expand_vector:
param = Parameter(param_node, self._gltype, size)
self.params.append(param)
continue
if not size or size > param_node.lsCountNode():
raise SpecError("could not expand %s with unknown or "
"mismatch sizes" % param.name)
# expand the vector
expanded_params = []
child = param_node.children
while child:
if (child.type == "element" and child.name == "param" and
self.support_node(child)):
expanded_params.append(Parameter(child, self._gltype))
if len(expanded_params) == size:
break
child = child.next
# just in case that lsCountNode counts unknown nodes
if len(expanded_params) < size:
raise SpecError("not enough named parameters")
self.params.extend(expanded_params)
def _init_descs(self, tmpl_node, param_nodes):
"""Parse and initialize parameter descriptions."""
self.checker = Checker()
if self._skip_desc:
return
node = tmpl_node.children
while node:
if node.type == "element" and node.name == "desc":
if self.support_node(node):
# parse <desc>
desc = Description(node, self._categories)
self.checker.add_desc(desc)
node = node.next
self.checker.validate(self, param_nodes)
def support_node(self, node):
"""Return true if a node is in the supported category."""
return (not node.hasProp("category") or
node.prop("category") in self._categories)
def get_param(self, name):
"""Return the named parameter."""
for param in self.params:
if param.name == name:
return param
return None
def param_node_size(self, param):
"""Return the size of a vector."""
if param.name != "vector":
return 0
size = param.prop("size")
if size.isdigit():
size = int(size)
else:
size = 0
if not size:
size = self._vector_size
if not size and self._expand_vector:
# return the number of named parameters
size = param.lsCountNode()
return size
def param_string(self, declaration):
"""Return the C code of the parameters."""
args = []
if declaration:
for param in self.params:
sep = "" if param.type.endswith("*") else " "
args.append("%s%s%s" % (param.type, sep, param.name))
if not args:
args.append("void")
else:
for param in self.params:
args.append(param.name)
return ", ".join(args)
def match(self, other, conversions={}):
"""Return true if the functions match, probably with a conversion."""
if (self.tmpl_name != other.tmpl_name or
self.return_type != other.return_type or
len(self.params) != len(other.params)):
return (False, False)
need_conv = False
for i in xrange(len(self.params)):
src = other.params[i]
dst = self.params[i]
if (src.is_vector != dst.is_vector or src.size != dst.size):
return (False, False)
if src.type != dst.type:
if dst.base_type() in conversions.get(src.base_type(), []):
need_conv = True
else:
# unable to convert
return (False, False)
return (True, need_conv)
class Parameter(object):
"""A parameter of a function."""
def __init__(self, param_node, gltype=None, size=0):
self.is_vector = (param_node.name == "vector")
self.name = param_node.prop("name")
self.size = size
type = param_node.prop("type")
if gltype:
type = type.replace("GLtype", gltype)
elif type.find("GLtype") != -1:
raise SpecError("parameter %s has unresolved type" % self.name)
self.type = type
def base_type(self):
"""Return the base GL type by stripping qualifiers."""
return [t for t in self.type.split(" ") if t.startswith("GL")][0]
class Checker(object):
"""A checker is the collection of all descriptions on the same level.
Descriptions of the same parameter are concatenated.
"""
def __init__(self):
self.switches = {}
self.switch_constants = {}
def add_desc(self, desc):
"""Add a description."""
# TODO allow index to vary
const_attrs = ["index", "error", "convert", "size_str"]
if desc.name not in self.switches:
self.switches[desc.name] = []
self.switch_constants[desc.name] = {}
for attr in const_attrs:
self.switch_constants[desc.name][attr] = None
# some attributes, like error code, should be the same for all descs
consts = self.switch_constants[desc.name]
for attr in const_attrs:
if getattr(desc, attr) is not None:
if (consts[attr] is not None and
consts[attr] != getattr(desc, attr)):
raise SpecError("mismatch %s for %s" % (attr, desc.name))
consts[attr] = getattr(desc, attr)
self.switches[desc.name].append(desc)
def validate(self, func, param_nodes):
"""Validate the checker against a function."""
tmp = Checker()
for switch in self.switches.itervalues():
valid_descs = []
for desc in switch:
if desc.validate(func, param_nodes):
valid_descs.append(desc)
# no possible values
if not valid_descs:
return False
for desc in valid_descs:
if not desc._is_noop:
tmp.add_desc(desc)
self.switches = tmp.switches
self.switch_constants = tmp.switch_constants
return True
def flatten(self, name=None):
"""Return a flat list of all descriptions of the named parameter."""
flat_list = []
for switch in self.switches.itervalues():
for desc in switch:
if not name or desc.name == name:
flat_list.append(desc)
flat_list.extend(desc.checker.flatten(name))
return flat_list
def always_check(self, name):
"""Return true if the parameter is checked in all possible pathes."""
if name in self.switches:
return True
# a param is always checked if any of the switch always checks it
for switch in self.switches.itervalues():
# a switch always checks it if all of the descs always check it
always = True
for desc in switch:
if not desc.checker.always_check(name):
always = False
break
if always:
return True
return False
def _c_switch(self, name, indent="\t"):
"""Output C switch-statement for the named parameter, for debug."""
switch = self.switches.get(name, [])
# make sure there are valid values
need_switch = False
for desc in switch:
if desc.values:
need_switch = True
if not need_switch:
return []
stmts = []
var = switch[0].name
if switch[0].index >= 0:
var += "[%d]" % switch[0].index
stmts.append("switch (%s) { /* assume GLenum */" % var)
for desc in switch:
if desc.values:
for val in desc.values:
stmts.append("case %s:" % val)
for dep_name in desc.checker.switches.iterkeys():
dep_stmts = [indent + s for s in desc.checker._c_switch(dep_name, indent)]
stmts.extend(dep_stmts)
stmts.append(indent + "break;")
stmts.append("default:")
stmts.append(indent + "ON_ERROR(%s);" % switch[0].error);
stmts.append(indent + "break;")
stmts.append("}")
return stmts
def dump(self, indent="\t"):
"""Dump the descriptions in C code."""
stmts = []
for name in self.switches.iterkeys():
c_switch = self._c_switch(name)
print "\n".join(c_switch)
class Description(object):
"""A description desribes a parameter and its relationship with other
parameters.
"""
def __init__(self, desc_node, categories=[]):
self._categories = categories
self._is_noop = False
self.name = desc_node.prop("name")
self.index = -1
self.error = desc_node.prop("error") or "GL_INVALID_ENUM"
# vector_size may be C code
self.size_str = desc_node.prop("vector_size")
self._has_enum = False
self.values = []
dep_nodes = []
# parse <desc>
valid_names = ["value", "range", "desc"]
node = desc_node.children
while node:
if node.type == "element":
if node.name in valid_names:
# ignore nodes that require unsupported categories
if (node.prop("category") and
node.prop("category") not in self._categories):
node = node.next
continue
else:
raise SpecError("unexpected node %s in desc" % node.name)
if node.name == "value":
val = node.prop("name")
if not self._has_enum and val.startswith("GL_"):
self._has_enum = True
self.values.append(val)
elif node.name == "range":
first = int(node.prop("from"))
last = int(node.prop("to"))
base = node.prop("base") or ""
if not self._has_enum and base.startswith("GL_"):
self._has_enum = True
# expand range
for i in xrange(first, last + 1):
self.values.append("%s%d" % (base, i))
else: # dependent desc
dep_nodes.append(node)
node = node.next
# default to convert if there is no enum
self.convert = not self._has_enum
if desc_node.hasProp("convert"):
self.convert = (desc_node.prop("convert") == "true")
self._init_deps(dep_nodes)
def _init_deps(self, dep_nodes):
"""Parse and initialize dependents."""
self.checker = Checker()
for dep_node in dep_nodes:
# recursion!
dep = Description(dep_node, self._categories)
self.checker.add_desc(dep)
def _search_param_node(self, param_nodes, name=None):
"""Search the template parameters for the named node."""
param_node = None
param_index = -1
if not name:
name = self.name
for node in param_nodes:
if name == node.prop("name"):
param_node = node
elif node.name == "vector":
child = node.children
idx = 0
while child:
if child.type == "element" and child.name == "param":
if name == child.prop("name"):
param_node = node
param_index = idx
break
idx += 1
child = child.next
if param_node:
break
return (param_node, param_index)
def _find_final(self, func, param_nodes):
"""Find the final parameter."""
param = func.get_param(self.name)
param_index = -1
# the described param is not in the final function
if not param:
# search the template parameters
node, index = self._search_param_node(param_nodes)
if not node:
raise SpecError("invalid desc %s in %s" %
(self.name, func.name))
# a named parameter of a vector
if index >= 0:
param = func.get_param(node.prop("name"))
param_index = index
elif node.name == "vector":
# must be an expanded vector, check its size
if self.size_str and self.size_str.isdigit():
size = int(self.size_str)
expanded_size = func.param_node_size(node)
if size != expanded_size:
return (False, None, -1)
# otherwise, it is a valid, but no-op, description
return (True, param, param_index)
def validate(self, func, param_nodes):
"""Validate a description against certain function."""
if self.checker.switches and not self.values:
raise SpecError("no valid values for %s" % self.name)
valid, param, param_index = self._find_final(func, param_nodes)
if not valid:
return False
# the description is valid, but the param is gone
# mark it no-op so that it will be skipped
if not param:
self._is_noop = True
return True
if param.is_vector:
# if param was known, this should have been done in __init__
if self._has_enum:
self.size_str = "1"
# size mismatch
if (param.size and self.size_str and self.size_str.isdigit() and
param.size != int(self.size_str)):
return False
elif self.size_str:
# only vector accepts vector_size
raise SpecError("vector_size is invalid for %s" % param.name)
if not self.checker.validate(func, param_nodes):
return False
# update the description
self.name = param.name
self.index = param_index
return True
def main():
import libxml2
filename = "APIspec.xml"
apinames = ["GLES1.1", "GLES2.0"]
doc = libxml2.readFile(filename, None,
libxml2.XML_PARSE_DTDLOAD +
libxml2.XML_PARSE_DTDVALID +
libxml2.XML_PARSE_NOBLANKS)
spec = Spec(doc)
impl = spec.get_impl()
for apiname in apinames:
spec.get_api(apiname)
doc.freeDoc()
print "%s is successfully parsed" % filename
if __name__ == "__main__":
main()
| bsd-3-clause |
subutai/cortipy | tests/integration/compare_test.py | 5 | 2373 | # The MIT License (MIT)
#
# Copyright (c) 2015 Numenta, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This test verifies that compare correctly does the call to Cortical.io's
API and gets a dictionary of distances
"""
import cortipy
import unittest
class CompareTest(unittest.TestCase):
"""Requires CORTICAL_API_KEY to be set"""
def testCompare(self):
"""
Tests client.createClassification(). Asserts the returned object has fields
with expected values for both the classifciation name and bitmap.
"""
client = cortipy.CorticalClient(useCache=False)
bitmap1 = client.getBitmap("one")["fingerprint"]["positions"]
bitmap2 = client.getBitmap("two")["fingerprint"]["positions"]
distances = client.compare(bitmap1, bitmap2)
types = ["cosineSimilarity", "euclideanDistance", "jaccardDistance",
"overlappingAll", "overlappingLeftRight", "overlappingRightLeft",
"sizeLeft", "sizeRight", "weightedScoring"]
self.assertIsInstance(distances, dict,
"The returned object is not a dictionary")
for t in types:
self.assertIn(t, distances,
"No \'{}\' field in the distances".format(t))
for t in types:
self.assertIsInstance(distances[t], (float, int),
"No \'{}\' field in the distances".format(t))
if __name__ == '__main__':
unittest.main()
| mit |
0x7678/youtube-dl | youtube_dl/extractor/daum.py | 16 | 2754 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
class DaumIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
'info_dict': {
'id': '52554690',
'ext': 'mp4',
'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
'upload_date': '20130831',
'duration': 3868,
},
}, {
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
'only_matching': True,
}, {
'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(
r'<iframe src="http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"]',
webpage, 'full id')
query = compat_urllib_parse.urlencode({'vid': full_id})
info = self._download_xml(
'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
'Downloading video info')
urls = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
video_id, 'Downloading video formats info')
formats = []
for format_el in urls.findall('result/output_list/output_list'):
profile = format_el.attrib['profile']
format_query = compat_urllib_parse.urlencode({
'vid': full_id,
'profile': profile,
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
video_id, note='Downloading video data for %s format' % profile)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,
'format_id': profile,
})
return {
'id': video_id,
'title': info.find('TITLE').text,
'formats': formats,
'thumbnail': self._og_search_thumbnail(webpage),
'description': info.find('CONTENTS').text,
'duration': int(info.find('DURATION').text),
'upload_date': info.find('REGDTTM').text[:8],
}
| unlicense |
fadushin/esp8266 | micropython/uhttpd/demo/stats_api.py | 1 | 6615 | #
# Copyright (c) dushin.net All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of dushin.net nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import uhttpd
import network
class Handler:
def __init__(self):
pass
#
# callbacks
#
def get(self, api_request):
components = api_request['context']
full_response = self.get_response()
return self.extract_response(components, full_response)
#
# internal operations
#
def get_response(self):
return {
'sys': self.get_sys_stats(),
'machine': self.get_machine_stats(),
'esp': self.get_esp_stats(),
'gc': self.get_gc_stats(),
'network': self.get_network_stats()
}
def get_sys_stats(self):
import sys
implementation = sys.implementation
return {
'byteorder': sys.byteorder,
'implementation': {
'name': implementation[0],
'version': implementation[1]
},
'maxsize': sys.maxsize,
'modules': self.keys(sys.modules),
'path': sys.path,
'platform': sys.platform,
'version': sys.version,
'vfs': self.get_vfs_stats()
}
def get_vfs_stats(self):
import os
stats = os.statvfs('/')
return {
'frsize': stats[1],
'blocks': stats[2],
'bavail': stats[4]
}
def keys(self, pairs):
ret = []
for k, v in pairs.items():
ret.append(k)
return ret
def get_machine_stats(self):
import machine
import ubinascii
id = "0x{}".format(ubinascii.hexlify(machine.unique_id()).decode().upper())
return {
'freq': machine.freq(),
'unique_id': id
}
def get_esp_stats(self):
import esp
return {
'flash_id': esp.flash_id(),
'flash_size': esp.flash_size(),
'free_mem': esp.freemem()
}
def get_gc_stats(self):
import gc
return {
'mem_alloc': gc.mem_alloc(),
'mem_free': gc.mem_free()
}
def get_network_stats(self):
return {
'phy_mode': self.get_phy_mode(),
'sta': self.get_sta_stats(),
'ap': self.get_ap_stats()
}
def get_sta_stats(self):
sta = network.WLAN(network.STA_IF)
return self.get_wlan_stats(sta)
def get_ap_stats(self):
ap = network.WLAN(network.AP_IF)
wlan_stats = self.get_wlan_stats(ap)
wlan_stats['config'] = self.get_wlan_config_stats(ap)
return wlan_stats
def get_wlan_stats(self, wlan):
if wlan.active():
ip, subnet, gateway, dns = wlan.ifconfig()
return {
'status': self.get_wlan_status(wlan),
'ifconfig': {
'ip': ip,
'subnet': subnet,
'gateway': gateway,
'dns': dns
}
}
else:
return {}
def get_wlan_config_stats(self, ap):
import ubinascii
return {
'mac': "0x{}".format(ubinascii.hexlify(ap.config('mac')).decode()),
'essid': ap.config('essid'),
'channel': ap.config('channel'),
'hidden': ap.config('hidden'),
'authmode': self.get_auth_mode(ap.config('authmode'))
}
def get_auth_mode(self, mode):
if mode == network.AUTH_OPEN:
return "AUTH_OPEN"
elif mode == network.AUTH_WEP:
return "AUTH_WEP"
elif mode == network.AUTH_WPA_PSK:
return "AUTH_WPA_PSK"
elif mode == network.AUTH_WPA2_PSK:
return "AUTH_WPA2_PSK"
elif mode == network.AUTH_WPA_WPA2_PSK:
return "AUTH_WPA_WPA2_PSK"
else:
return "Unknown auth_mode: {}".format(mode)
def get_wlan_status(self, wlan):
status = wlan.status()
if status == network.STAT_IDLE:
return 'STAT_IDLE'
elif status == network.STAT_CONNECTING:
return 'STAT_CONNECTING'
elif status == network.STAT_WRONG_PASSWORD:
return 'STAT_WRONG_PASSWORD'
elif status == network.STAT_NO_AP_FOUND:
return 'STAT_NO_AP_FOUND'
elif status == network.STAT_CONNECT_FAIL:
return 'STAT_CONNECT_FAIL'
elif status == network.STAT_GOT_IP:
return 'STAT_GOT_IP'
else:
return "Unknown wlan status: {}".format(status)
def extract_response(self, components, js):
ret = js
for component in components:
if component in ret:
ret = ret[component]
else:
raise uhttpd.NotFoundException(components)
return ret
def get_phy_mode(self):
phy_mode = network.phy_mode()
if phy_mode == network.MODE_11B:
return 'MODE_11B'
elif phy_mode == network.MODE_11G:
return 'MODE_11G'
elif phy_mode == network.MODE_11N:
return 'MODE_11N'
else:
return "Unknown phy_mode: {}".format(phy_mode)
| bsd-2-clause |
ai0/sanic | examples/try_everything.py | 1 | 2254 | import os
from sanic import Sanic
from sanic.log import log
from sanic.response import json, text, file
from sanic.exceptions import ServerError
app = Sanic(__name__)
@app.route("/")
async def test_async(request):
return json({"test": True})
@app.route("/sync", methods=['GET', 'POST'])
def test_sync(request):
return json({"test": True})
@app.route("/dynamic/<name>/<id:int>")
def test_params(request, name, id):
return text("yeehaww {} {}".format(name, id))
@app.route("/exception")
def exception(request):
raise ServerError("It's dead jim")
@app.route("/await")
async def test_await(request):
import asyncio
await asyncio.sleep(5)
return text("I'm feeling sleepy")
@app.route("/file")
async def test_file(request):
return await file(os.path.abspath("setup.py"))
# ----------------------------------------------- #
# Exceptions
# ----------------------------------------------- #
@app.exception(ServerError)
async def test(request, exception):
return json({"exception": "{}".format(exception), "status": exception.status_code}, status=exception.status_code)
# ----------------------------------------------- #
# Read from request
# ----------------------------------------------- #
@app.route("/json")
def post_json(request):
return json({"received": True, "message": request.json})
@app.route("/form")
def post_json(request):
return json({"received": True, "form_data": request.form, "test": request.form.get('test')})
@app.route("/query_string")
def query_string(request):
return json({"parsed": True, "args": request.args, "url": request.url, "query_string": request.query_string})
# ----------------------------------------------- #
# Run Server
# ----------------------------------------------- #
@app.listener('before_server_start')
def before_start(app, loop):
log.info("SERVER STARTING")
@app.listener('after_server_start')
def after_start(app, loop):
log.info("OH OH OH OH OHHHHHHHH")
@app.listener('before_server_stop')
def before_stop(app, loop):
log.info("SERVER STOPPING")
@app.listener('after_server_stop')
def after_stop(app, loop):
log.info("TRIED EVERYTHING")
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
| mit |
bdh1011/wau | venv/lib/python2.7/site-packages/jsonschema/exceptions.py | 35 | 6973 | from collections import defaultdict, deque
import itertools
import pprint
import textwrap
from jsonschema import _utils
from jsonschema.compat import PY3, iteritems
WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
STRONG_MATCHES = frozenset()
_unset = _utils.Unset()
class _Error(Exception):
def __init__(
self,
message,
validator=_unset,
path=(),
cause=None,
context=(),
validator_value=_unset,
instance=_unset,
schema=_unset,
schema_path=(),
parent=None,
):
super(_Error, self).__init__(
message,
validator,
path,
cause,
context,
validator_value,
instance,
schema,
schema_path,
parent,
)
self.message = message
self.path = self.relative_path = deque(path)
self.schema_path = self.relative_schema_path = deque(schema_path)
self.context = list(context)
self.cause = self.__cause__ = cause
self.validator = validator
self.validator_value = validator_value
self.instance = instance
self.schema = schema
self.parent = parent
for error in context:
error.parent = self
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.message)
def __str__(self):
return unicode(self).encode("utf-8")
def __unicode__(self):
essential_for_verbose = (
self.validator, self.validator_value, self.instance, self.schema,
)
if any(m is _unset for m in essential_for_verbose):
return self.message
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return self.message + textwrap.dedent("""
Failed validating %r in schema%s:
%s
On instance%s:
%s
""".rstrip()
) % (
self.validator,
_utils.format_as_index(list(self.relative_schema_path)[:-1]),
_utils.indent(pschema),
_utils.format_as_index(self.relative_path),
_utils.indent(pinstance),
)
if PY3:
__str__ = __unicode__
@classmethod
def create_from(cls, other):
return cls(**other._contents())
@property
def absolute_path(self):
parent = self.parent
if parent is None:
return self.relative_path
path = deque(self.relative_path)
path.extendleft(reversed(parent.absolute_path))
return path
@property
def absolute_schema_path(self):
parent = self.parent
if parent is None:
return self.relative_schema_path
path = deque(self.relative_schema_path)
path.extendleft(reversed(parent.absolute_schema_path))
return path
def _set(self, **kwargs):
for k, v in iteritems(kwargs):
if getattr(self, k) is _unset:
setattr(self, k, v)
def _contents(self):
attrs = (
"message", "cause", "context", "validator", "validator_value",
"path", "schema_path", "instance", "schema", "parent",
)
return dict((attr, getattr(self, attr)) for attr in attrs)
class ValidationError(_Error):
pass
class SchemaError(_Error):
pass
class RefResolutionError(Exception):
pass
class UnknownType(Exception):
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
self.schema = schema
def __str__(self):
return unicode(self).encode("utf-8")
def __unicode__(self):
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return textwrap.dedent("""
Unknown type %r for validator with schema:
%s
While checking instance:
%s
""".rstrip()
) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
if PY3:
__str__ = __unicode__
class FormatError(Exception):
def __init__(self, message, cause=None):
super(FormatError, self).__init__(message, cause)
self.message = message
self.cause = self.__cause__ = cause
def __str__(self):
return self.message.encode("utf-8")
def __unicode__(self):
return self.message
if PY3:
__str__ = __unicode__
class ErrorTree(object):
"""
ErrorTrees make it easier to check which validations failed.
"""
_instance = _unset
def __init__(self, errors=()):
self.errors = {}
self._contents = defaultdict(self.__class__)
for error in errors:
container = self
for element in error.path:
container = container[element]
container.errors[error.validator] = error
self._instance = error.instance
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
"""
return index in self._contents
def __getitem__(self, index):
"""
Retrieve the child tree one level down at the given ``index``.
If the index is not in the instance that this tree corresponds to and
is not known by this tree, whatever error would be raised by
``instance.__getitem__`` will be propagated (usually this is some
subclass of :class:`LookupError`.
"""
if self._instance is not _unset and index not in self:
self._instance[index]
return self._contents[index]
def __setitem__(self, index, value):
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
"""
return iter(self._contents)
def __len__(self):
"""
Same as :attr:`total_errors`.
"""
return self.total_errors
def __repr__(self):
return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
@property
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
return len(self.errors) + child_errors
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
def relevance(error):
validator = error.validator
return -len(error.path), validator not in weak, validator in strong
return relevance
relevance = by_relevance()
def best_match(errors, key=relevance):
errors = iter(errors)
best = next(errors, None)
if best is None:
return
best = max(itertools.chain([best], errors), key=key)
while best.context:
best = min(best.context, key=key)
return best
| mit |
allanlei/django-oauthentication | sample/myapp/settings.py | 1 | 6582 | # Django settings for myapp project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'default.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '//localhost:8100/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'jr9ncvyg_bh6yi&u9k539!jlm!5(&x)6_^8u0r@-llo-ruol-v'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myapp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'myapp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'INFO',
'handlers': ['console'],
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'simple': {
'format': '%(levelname)s (%(module)s): %(message)s',
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'request': {
'format': '[%(asctime)s] %(message)s',
'datefmt' : '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
INSTALLED_APPS += (
# 'oauthentication',
)
AUTHENTICATION_BACKENDS = (
'oauthentication.contrib.oauth2.google.backends.EmailAuthenticationBackend',
'oauthentication.contrib.oauth2.google.backends.GPlusAuthenticationBackend',
) + AUTHENTICATION_BACKENDS
| bsd-3-clause |
gauribhoite/personfinder | env/site-packages/django/contrib/gis/sitemaps/kml.py | 398 | 2544 | from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.sitemaps import Sitemap
from django.core import urlresolvers
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = apps.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.model_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None, protocol=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol)
for url in urls:
url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label': obj[0],
'model': obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
| apache-2.0 |
asrar7787/Test-Frontools | node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 1467 | 4228 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
if sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.