repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
lovetox/gajim | src/common/jingle_content.py | 1 | 9231 | ##
## Copyright (C) 2006 Gajim Team
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
"""
Handles Jingle contents (XEP 0166)
"""
import os
from common import gajim
import nbxmpp
from common.jingle_transport import JingleTransportIBB
from .jingle_xtls import SELF_SIGNED_CERTIFICATE
from .jingle_xtls import load_cert_file
contents = {}
def get_jingle_content(node):
namespace = node.getNamespace()
if namespace in contents:
return contents[namespace](node)
class JingleContentSetupException(Exception):
"""
Exception that should be raised when a content fails to setup.
"""
class JingleContent(object):
"""
An abstraction of content in Jingle sessions
"""
def __init__(self, session, transport):
self.session = session
self.transport = transport
# will be filled by JingleSession.add_content()
# don't uncomment these lines, we will catch more buggy code then
# (a JingleContent not added to session shouldn't send anything)
#self.creator = None
#self.name = None
self.accepted = False
self.sent = False
self.negotiated = False
self.media = None
self.senders = 'both' #FIXME
self.allow_sending = True # Used for stream direction, attribute 'senders'
self.callbacks = {
# these are called when *we* get stanzas
'content-accept': [self.__on_transport_info,
self.__on_content_accept],
'content-add': [self.__on_transport_info],
'content-modify': [],
'content-reject': [],
'content-remove': [],
'description-info': [],
'security-info': [],
'session-accept': [self.__on_transport_info,
self.__on_content_accept],
'session-info': [],
'session-initiate': [self.__on_transport_info],
'session-terminate': [],
'transport-info': [self.__on_transport_info],
'transport-replace': [self.__on_transport_replace],
'transport-accept': [],
'transport-reject': [],
'iq-result': [],
'iq-error': [],
# these are called when *we* sent these stanzas
'content-accept-sent': [self.__fill_jingle_stanza,
self.__on_content_accept],
'content-add-sent': [self.__fill_jingle_stanza],
'session-initiate-sent': [self.__fill_jingle_stanza],
'session-accept-sent': [self.__fill_jingle_stanza,
self.__on_content_accept],
'session-terminate-sent': [],
}
def is_ready(self):
return self.accepted and not self.sent
def __on_content_accept(self, stanza, content, error, action):
self.on_negotiated()
def on_negotiated(self):
if self.accepted:
self.negotiated = True
self.session.content_negotiated(self.media)
def add_remote_candidates(self, candidates):
"""
Add a list of candidates to the list of remote candidates
"""
self.transport.remote_candidates = candidates
def on_stanza(self, stanza, content, error, action):
"""
Called when something related to our content was sent by peer
"""
if action in self.callbacks:
for callback in self.callbacks[action]:
callback(stanza, content, error, action)
def __on_transport_replace(self, stanza, content, error, action):
content.addChild(node=self.transport.make_transport())
def __on_transport_info(self, stanza, content, error, action):
"""
Got a new transport candidate
"""
candidates = self.transport.parse_transport_stanza(
content.getTag('transport'))
if candidates:
self.add_remote_candidates(candidates)
def __content(self, payload=[]):
"""
Build a XML content-wrapper for our data
"""
return nbxmpp.Node('content',
attrs={'name': self.name, 'creator': self.creator},
payload=payload)
def send_candidate(self, candidate):
"""
Send a transport candidate for a previously defined transport.
"""
content = self.__content()
content.addChild(node=self.transport.make_transport([candidate]))
self.session.send_transport_info(content)
def send_error_candidate(self):
"""
Sends a candidate-error when we can't connect to a candidate.
"""
content = self.__content()
tp = self.transport.make_transport(add_candidates=False)
tp.addChild(name='candidate-error')
content.addChild(node=tp)
self.session.send_transport_info(content)
def send_description_info(self):
content = self.__content()
self._fill_content(content)
self.session.send_description_info(content)
def __fill_jingle_stanza(self, stanza, content, error, action):
"""
Add our things to session-initiate stanza
"""
self._fill_content(content)
self.sent = True
content.addChild(node=self.transport.make_transport())
def _fill_content(self, content):
description_node = nbxmpp.simplexml.Node(
tag=nbxmpp.NS_JINGLE_FILE_TRANSFER + ' description')
if self.session.werequest:
simode = nbxmpp.simplexml.Node(tag='request')
else:
simode = nbxmpp.simplexml.Node(tag='offer')
file_tag = simode.setTag('file')
if self.file_props.name:
node = nbxmpp.simplexml.Node(tag='name')
node.addData(self.file_props.name)
file_tag.addChild(node=node)
if self.file_props.date:
node = nbxmpp.simplexml.Node(tag='date')
node.addData(self.file_props.date)
file_tag.addChild(node=node)
if self.file_props.size:
node = nbxmpp.simplexml.Node(tag='size')
node.addData(self.file_props.size)
file_tag.addChild(node=node)
if self.file_props.type_ == 'r':
if self.file_props.hash_:
h = file_tag.addChild('hash', attrs={
'algo': self.file_props.algo}, namespace=nbxmpp.NS_HASHES,
payload=self.file_props.hash_)
else:
# if the file is less than 10 mb, then it is small
# lets calculate it right away
if self.file_props.size < 10000000 and not \
self.file_props.hash_:
h = self._calcHash()
if h:
file_tag.addChild(node=h)
pjid = gajim.get_jid_without_resource(self.session.peerjid)
file_info = {'name' : self.file_props.name,
'file-name' : self.file_props.file_name,
'hash' : self.file_props.hash_,
'size' : self.file_props.size,
'date' : self.file_props.date,
'peerjid' : pjid
}
self.session.connection.set_file_info(file_info)
desc = file_tag.setTag('desc')
if self.file_props.desc:
desc.setData(self.file_props.desc)
description_node.addChild(node=simode)
if self.use_security:
security = nbxmpp.simplexml.Node(
tag=nbxmpp.NS_JINGLE_XTLS + ' security')
certpath = os.path.join(gajim.MY_CERT_DIR, SELF_SIGNED_CERTIFICATE)\
+ '.cert'
cert = load_cert_file(certpath)
if cert:
try:
digest_algo = cert.get_signature_algorithm().decode('utf-8'
).split('With')[0]
except AttributeError as e:
# Old py-OpenSSL is missing get_signature_algorithm
digest_algo = "sha256"
security.addChild('fingerprint').addData(cert.digest(
digest_algo).decode('utf-8'))
for m in ('x509', ): # supported authentication methods
method = nbxmpp.simplexml.Node(tag='method')
method.setAttr('name', m)
security.addChild(node=method)
content.addChild(node=security)
content.addChild(node=description_node)
def destroy(self):
self.callbacks = None
del self.session.contents[(self.creator, self.name)]
| gpl-3.0 |
cliffe/SecGen | modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/collections.py | 28 | 25919 | __all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next # update link_prev[NEXT]
link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[0] # start at the last node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return '{typename}({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = map(str, field_names)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
if verbose:
print class_definition
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| gpl-3.0 |
ryfeus/lambda-packs | Spacy/source2.7/spacy/lang/entity_rules.py | 3 | 1732 | # coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, ENT_TYPE, LOWER
ENT_ID = "ent_id"
ENTITY_RULES = []
for name, tag, patterns in [
("Reddit", "PRODUCT", [[{LOWER: "reddit"}]]),
("Linux", "PRODUCT", [[{LOWER: "linux"}]]),
("Haskell", "PRODUCT", [[{LOWER: "haskell"}]]),
("HaskellCurry", "PERSON", [[{LOWER: "haskell"}, {LOWER: "curry"}]]),
("Javascript", "PRODUCT", [[{LOWER: "javascript"}]]),
("CSS", "PRODUCT", [[{LOWER: "css"}], [{LOWER: "css3"}]]),
("HTML", "PRODUCT", [[{LOWER: "html"}], [{LOWER: "html5"}]]),
("Python", "PRODUCT", [[{ORTH: "Python"}]]),
("Ruby", "PRODUCT", [[{ORTH: "Ruby"}]]),
("spaCy", "PRODUCT", [[{LOWER: "spacy"}]]),
("displaCy", "PRODUCT", [[{LOWER: "displacy"}]]),
("Digg", "PRODUCT", [[{LOWER: "digg"}]]),
("FoxNews", "ORG", [[{LOWER: "foxnews"}], [{LOWER: "fox"}, {LOWER: "news"}]]),
("Google", "ORG", [[{LOWER: "google"}]]),
("Mac", "PRODUCT", [[{LOWER: "mac"}]]),
("Wikipedia", "PRODUCT", [[{LOWER: "wikipedia"}]]),
("Windows", "PRODUCT", [[{LOWER: "windows"}]]),
("Dell", "ORG", [[{LOWER: "dell"}]]),
("Facebook", "ORG", [[{LOWER: "facebook"}]]),
("Blizzard", "ORG", [[{LOWER: "blizzard"}]]),
("Ubuntu", "ORG", [[{LOWER: "ubuntu"}]]),
("YouTube", "PRODUCT", [[{LOWER: "youtube"}]]),]:
ENTITY_RULES.append({ENT_ID: name, 'attrs': {ENT_TYPE: tag}, 'patterns': patterns})
FALSE_POSITIVES = [
[{ORTH: "Shit"}],
[{ORTH: "Weed"}],
[{ORTH: "Cool"}],
[{ORTH: "Btw"}],
[{ORTH: "Bah"}],
[{ORTH: "Bullshit"}],
[{ORTH: "Lol"}],
[{ORTH: "Yo"}, {LOWER: "dawg"}],
[{ORTH: "Yay"}],
[{ORTH: "Ahh"}],
[{ORTH: "Yea"}],
[{ORTH: "Bah"}]
]
| mit |
mdxs/test-ttf-on-gae | main/lib/itsdangerous.py | 626 | 31840 | # -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
This can also happen with a :class:`JSONWebSignatureSerializer` that
is subclassed and uses a different serializer for the payload than
the expected one.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(self, message, payload=None, header=None,
original_error=None):
BadSignature.__init__(self, message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header = header
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
try:
sig = base64_decode(sig)
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception occurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
except Exception as e:
raise BadHeader('Could not base64 decode the header because of '
'an exception', original_error=e)
try:
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
try:
header = Serializer.load_payload(self, json_header,
serializer=json)
except BadData as e:
raise BadHeader('Could not unserialize header because it was '
'malformed', original_error=e)
if not isinstance(header, dict):
raise BadHeader('Header payload is not a JSON object',
header=header)
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadHeader('Algorithm mismatch', header=header,
payload=payload)
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
| apache-2.0 |
Permutatrix/servo | tests/wpt/web-platform-tests/webdriver/tests/support/asserts.py | 5 | 4859 | from webdriver import Element, WebDriverException
# WebDriver specification ID: dfn-error-response-data
errors = {
"element click intercepted": 400,
"element not selectable": 400,
"element not interactable": 400,
"insecure certificate": 400,
"invalid argument": 400,
"invalid cookie domain": 400,
"invalid coordinates": 400,
"invalid element state": 400,
"invalid selector": 400,
"invalid session id": 404,
"javascript error": 500,
"move target out of bounds": 500,
"no such alert": 404,
"no such cookie": 404,
"no such element": 404,
"no such frame": 404,
"no such window": 404,
"script timeout": 408,
"session not created": 500,
"stale element reference": 400,
"timeout": 408,
"unable to set cookie": 500,
"unable to capture screen": 500,
"unexpected alert open": 500,
"unknown command": 404,
"unknown error": 500,
"unknown method": 405,
"unsupported operation": 500,
}
# WebDriver specification ID: dfn-send-an-error
#
# > When required to send an error, with error code, a remote end must run the
# > following steps:
# >
# > 1. Let http status and name be the error response data for error code.
# > 2. Let message be an implementation-defined string containing a
# > human-readable description of the reason for the error.
# > 3. Let stacktrace be an implementation-defined string containing a stack
# > trace report of the active stack frames at the time when the error
# > occurred.
# > 4. Let data be a new JSON Object initialised with the following properties:
# >
# > error
# > name
# > message
# > message
# > stacktrace
# > stacktrace
# >
# > 5. Send a response with status and data as arguments.
def assert_error(response, error_code):
"""Verify that the provided wdclient.Response instance described a valid
error response as defined by `dfn-send-an-error` and the provided error
code.
:param response: wdclient.Response instance
:param error_code: string value of the expected "error code"
"""
assert response.status == errors[error_code]
assert "value" in response.body
assert response.body["value"]["error"] == error_code
assert isinstance(response.body["value"]["message"], basestring)
assert isinstance(response.body["value"]["stacktrace"], basestring)
def assert_success(response, value=None):
"""Verify that the provided wdclient.Response instance described a valid
error response as defined by `dfn-send-an-error` and the provided error
code.
:param response: wdclient.Response instance.
:param value: Expected value of the response body, if any.
"""
assert response.status == 200, str(response.error)
if value is not None:
assert response.body["value"] == value
return response.body.get("value")
def assert_dialog_handled(session, expected_text):
result = session.transport.send("GET",
"session/%s/alert/text" % session.session_id)
# If there were any existing dialogs prior to the creation of this
# fixture's dialog, then the "Get Alert Text" command will return
# successfully. In that case, the text must be different than that
# of this fixture's dialog.
try:
assert_error(result, "no such alert")
except:
assert (result.status == 200 and
result.body["value"] != expected_text), (
"Dialog with text '%s' was not handled." % expected_text)
def assert_same_element(session, a, b):
"""Verify that two element references describe the same element."""
if isinstance(a, dict):
assert Element.identifier in a, "Actual value does not describe an element"
a_id = a[Element.identifier]
elif isinstance(a, Element):
a_id = a.id
else:
raise AssertionError("Actual value is not a dictionary or web element")
if isinstance(b, dict):
assert Element.identifier in b, "Expected value does not describe an element"
b_id = b[Element.identifier]
elif isinstance(b, Element):
b_id = b.id
else:
raise AssertionError("Expected value is not a dictionary or web element")
if a_id == b_id:
return
message = ("Expected element references to describe the same element, " +
"but they did not.")
# Attempt to provide more information, accounting for possible errors such
# as stale element references or not visible elements.
try:
a_markup = session.execute_script("return arguments[0].outerHTML;", args=(a,))
b_markup = session.execute_script("return arguments[0].outerHTML;", args=(b,))
message += " Actual: `%s`. Expected: `%s`." % (a_markup, b_markup)
except WebDriverException:
pass
raise AssertionError(message)
| mpl-2.0 |
yamila-moreno/django | django/contrib/gis/geos/prototypes/prepared.py | 288 | 1214 | from ctypes import c_char
from django.contrib.gis.geos.libgeos import (
GEOM_PTR, PREPGEOM_PTR, GEOSFuncFactory,
)
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
# Prepared geometry constructor and destructors.
geos_prepare = GEOSFuncFactory('GEOSPrepare', argtypes=[GEOM_PTR], restype=PREPGEOM_PTR)
prepared_destroy = GEOSFuncFactory('GEOSPreparedGeom_destroy', argtpes=[PREPGEOM_PTR])
# Prepared geometry binary predicate support.
class PreparedPredicate(GEOSFuncFactory):
argtypes = [PREPGEOM_PTR, GEOM_PTR]
restype = c_char
errcheck = staticmethod(check_predicate)
prepared_contains = PreparedPredicate('GEOSPreparedContains')
prepared_contains_properly = PreparedPredicate('GEOSPreparedContainsProperly')
prepared_covers = PreparedPredicate('GEOSPreparedCovers')
prepared_intersects = PreparedPredicate('GEOSPreparedIntersects')
# Functions added in GEOS 3.3
prepared_crosses = PreparedPredicate('GEOSPreparedCrosses')
prepared_disjoint = PreparedPredicate('GEOSPreparedDisjoint')
prepared_overlaps = PreparedPredicate('GEOSPreparedOverlaps')
prepared_touches = PreparedPredicate('GEOSPreparedTouches')
prepared_within = PreparedPredicate('GEOSPreparedWithin')
| bsd-3-clause |
shoelzer/buildbot | master/buildbot/util/state.py | 11 | 1756 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
class StateMixin(object):
# state management
_objectid = None
@defer.inlineCallbacks
def getState(self, *args, **kwargs):
# get the objectid, if not known
if self._objectid is None:
self._objectid = yield self.master.db.state.getObjectId(self.name,
self.__class__.__name__)
rv = yield self.master.db.state.getState(self._objectid, *args,
**kwargs)
defer.returnValue(rv)
@defer.inlineCallbacks
def setState(self, key, value):
# get the objectid, if not known
if self._objectid is None:
self._objectid = yield self.master.db.state.getObjectId(self.name,
self.__class__.__name__)
yield self.master.db.state.setState(self._objectid, key, value)
| gpl-2.0 |
janebeckman/gpdb | gpAux/extensions/pxf/regression/input/regression/createData.py | 23 | 7087 | #!/usr/bin/env python
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
############################################################################
# Set up some globals, and import gptest
# [YOU DO NOT NEED TO CHANGE THESE]
#
import sys,string, os, subprocess, signal,time
#MYD = os.path.abspath(os.path.dirname(__file__))
#mkpath = lambda *x: os.path.join(MYD, *x)
#UPD = os.path.abspath(mkpath('..'))
#if UPD not in sys.path:
# sys.path.append(UPD)
#import gptest
#from gptest import psql, shell
#import re
#from time import sleep
if len(sys.argv) < 2:
print "usage: createData.py <number of lines> <datatype>\n"
print "datatype:"
print "all:regression:time:timestamp:date:bigint:int:smallest:real:float:boolean:varchar:bpchar:numeric:text\n"
sys.exit()
LINE_NUM=int(sys.argv[1])
DATATYPE=sys.argv[2]
def printtimestr(x, datatype):
HH=str(x % 24).zfill(2)
MM=str(x % 60).zfill(2)
SS=str(x % 60).zfill(2)
sss=str(x % 999)
h=str(x % 12).zfill(2)
ampm=x % 2
ampmlist= ['AM','PM']
timezone= x % 5
#timezonelist= ['ACDT','ACT','PST','ADT','ACWST','GMT0','EST5EDT','zulu']
timezonelist= ['ACT','PST','ADT','GMT0','zulu']
year = str((x % 1000) + 1).zfill(4)
month = str((x % 12) + 1).zfill(2)
monthindex = x%24
monthlist = ['January','Jan','February','Feb','March','Mar','April','Apr','May','May','June','Jun','July','Jul','August','Aug','September','Sept','October','Oct','November','Nov','December','Dec']
day = str((x % 30) + 1).zfill(2)
daynofill = str((x % 30) + 1)
if (datatype == 'time'):
#col1 - HH:MM:SS
col1 = HH+ ':' +MM+ ':' +SS
#col2 - HH:MM:SS.sss
col2 = col1+ '.' +sss
#col3 - HHMMSS
col3 = HH+MM+SS
#col4 - HH:MM AM/PM
col4 = h+ ':' +MM+ ' ' +ampmlist[ampm]
#col5 - HH:MM:SS.sss-h (timeoffset)
col5 = col2+ '-' +str(timezone)
#col6 - HH:MM:SS-HH:MM(timeoffset)
col6 = col1+ '-' +h+ ':00'
#col7 - HH:MM-HH:MM(timeoffset)
col7 = HH+':'+MM+ '-' +h+ ':00'
#col8 - HHMMSS-HH(timeoffset)
col8 = col3+ '-' +h
#col9 - HH:MM:SS XXX(timezone)
col9 = col1+ " " +timezonelist[timezone]
return col1+'\t'+col2+'\t'+col3+'\t'+col4+'\t'+col5+'\t'+col6+'\t'+col7+'\t'+col8+'\t'+col9+'\t\\N'
elif (datatype == 'timestamp'):
#1999-01-08 04:05:06
col1 = year+'-' +month+ '-' +day+ ' ' +HH+ ':' +MM+ ':' +SS
#1999-01-08 04:05:06 -8:00
col2 = col1+ ' -' +str(timezone)+ ':00'
#January 8 04:05:06 1999 PST
col3 = monthlist[monthindex]+ ' ' +daynofill+ ' ' +HH+ ':' +MM+ ':' +SS+ ' ' +year+ ' ' +timezonelist[timezone]
return col1+'\t'+col2+'\t'+col3+'\t\\N'
elif (datatype == 'date'):
#1900-01-01
col1 = year+ '-' +month+ '-' +day
#September 01, 1999
col2 = monthlist[monthindex]+ ' ' +day+ ', ' +year
#1/8/1999
col3 = month+ '/' +day+ '/' +year
#1999-Jan-08
col4 = year+ '-' +monthlist[monthindex]+ '-' +day
#Jan-08-1999
col5 = monthlist[monthindex]+ '-' +month+ '-' +year
#08-Jan-1999
col6 = month+ '-' +monthlist[monthindex]+ '-' +year
#January 8, 99 BC
col7 = monthlist[monthindex]+' ' +month+ ', ' +year+ ' BC'
return col1+'\t'+col2+'\t'+col3+'\t'+col4+'\t'+col5+'\t'+col6+'\t'+col7+'\t\\N'
def regression(x):
numRecipes = str(1664525 * x + 1013904223)
Borland = str(22695477 * x + 1)
glibc = str(1103515245 * x + 12345)
appCarbonLib = str((16807 * x) % 2147483647)
vax = str(69069 * x + 1)
javaRandomClass = str(25214903917 * x + 11)
return str(x)+'\t'+str(hex(x))+'\t'+numRecipes+'\t'+Borland+'\t'+glibc+'\t'+appCarbonLib+'\t'+vax+'\t'+javaRandomClass
def printintstr(x, max, min):
if (x < max):
m = x
else:
m = 0
maxsubx = max - m
minplusx = min + m
return str(max)+'\t'+str(min)+'\t'+str(m)+'\t'+str(maxsubx)+'\t'+str(minplusx)+'\t\\N'
def printfloatstr(x,max,min):
pi = float(22)/float(7)
pimulti = pi*x
return str(max)+'\t'+str(min)+'\t'+str(pi)+'\t'+str(pimulti)+'\t\\N'
def printbool(x):
n = x % 2
if n == 0:
return 'true\t\\N'
else:
return 'false\t\\N'
def printchar(x):
strx = ''
currentchar = x%128
for m in range(currentchar):
strx = strx+chr(currentchar)
if (currentchar == 9 or currentchar == 13 or currentchar == 10):
strx = 'skip'
return str(x)+'\t'+strx+'\t\\N'
for x in range(LINE_NUM):
if (DATATYPE == 'regression'):
print regression(x)
elif (DATATYPE == 'time'):
print 'time\t'+printtimestr(x,'time')
elif (DATATYPE == 'timestamp'):
print 'timestamp\t'+printtimestr(x,'timestamp')
elif (DATATYPE == 'date'):
print 'date\t'+printtimestr(x,'date')
elif (DATATYPE == 'bigint'):
print 'bigint\t'+printintstr(x,9223372036854775807,-9223372036854775808)
elif (DATATYPE == 'int'):
print 'int\t'+printintstr(x,2147483647,-2147483648)
elif (DATATYPE == 'smallint'):
print 'smallint\t'+printintstr(x,32767,-32768)
elif (DATATYPE == 'real'):
print 'real\t'+printfloatstr(x, 3.4028235E+38, -3.4028234E+38)
elif (DATATYPE == 'float'):
print 'float\t'+printfloatstr(x,+1.797693134862315E+308, -1.797693134862315E+308)
elif (DATATYPE == 'boolean'):
print 'boolean\t'+printbool(x)
elif (DATATYPE == 'varchar'):
print 'varchar\t'+printchar(x)
elif (DATATYPE == 'bpchar'):
print 'bpchar\t'+printchar(x)
elif (DATATYPE == 'numeric'):
print 'numeric\t'+printintstr(x, 9223372036854775807000, -9223372036854775808000)
elif (DATATYPE == 'text'):
print 'text\t'+printchar(x)
elif (DATATYPE == 'all'):
print regression(x)+ '\t' +printtimestr(x,'time')+ '\t' +printtimestr(x,'timestamp')+ '\t' +printtimestr(x,'date')+ '\t' +printintstr(x,9223372036854775807,-9223372036854775808)+ '\t' +printintstr(x,2147483647,-2147483648)+ '\t' +printintstr(x,32767,-32768)+ '\t' +printfloatstr(x, 3.4028235E+38, -3.4028234E+38)+ '\t' +printfloatstr(x, +1.797693134862315E+308, -1.797693134862315E+308)+ '\t' +printbool(x)+ '\t' +printchar(x)+ '\t'+printchar(x)+ '\t'+printintstr(x,9223372036854775807000,-9223372036854775808000)+ '\t'+printchar(x)
| apache-2.0 |
geowurster/FS-Nav | fsnav/__init__.py | 1 | 1782 | """
FS Nav - File System Navigation shortcuts for the commandline
"""
from .core import Aliases, CONFIGFILE, DEFAULT_ALIASES
__version__ = '0.9.2'
__release__ = '2014-06-28'
__author__ = 'Kevin Wurster'
__email__ = 'wursterk@gmail.com'
__source__ = 'https://github.com/geowurster/FS-Nav'
__license__ = '''
New BSD License
Copyright (c) 2014, Kevin D. Wurster
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The names of its contributors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| bsd-3-clause |
biddisco/VTK | ThirdParty/Twisted/twisted/conch/test/test_scripts.py | 41 | 1874 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the command-line interfaces to conch.
"""
try:
import pyasn1
except ImportError:
pyasn1Skip = "Cannot run without PyASN1"
else:
pyasn1Skip = None
try:
import Crypto
except ImportError:
cryptoSkip = "can't run w/o PyCrypto"
else:
cryptoSkip = None
try:
import tty
except ImportError:
ttySkip = "can't run w/o tty"
else:
ttySkip = None
try:
import Tkinter
except ImportError:
tkskip = "can't run w/o Tkinter"
else:
try:
Tkinter.Tk().destroy()
except Tkinter.TclError, e:
tkskip = "Can't test Tkinter: " + str(e)
else:
tkskip = None
from twisted.trial.unittest import TestCase
from twisted.scripts.test.test_scripts import ScriptTestsMixin
from twisted.python.test.test_shellcomp import ZshScriptTestMixin
class ScriptTests(TestCase, ScriptTestsMixin):
"""
Tests for the Conch scripts.
"""
skip = pyasn1Skip or cryptoSkip
def test_conch(self):
self.scriptTest("conch/conch")
test_conch.skip = ttySkip or skip
def test_cftp(self):
self.scriptTest("conch/cftp")
test_cftp.skip = ttySkip or skip
def test_ckeygen(self):
self.scriptTest("conch/ckeygen")
def test_tkconch(self):
self.scriptTest("conch/tkconch")
test_tkconch.skip = tkskip or skip
class ZshIntegrationTestCase(TestCase, ZshScriptTestMixin):
"""
Test that zsh completion functions are generated without error
"""
generateFor = [('conch', 'twisted.conch.scripts.conch.ClientOptions'),
('cftp', 'twisted.conch.scripts.cftp.ClientOptions'),
('ckeygen', 'twisted.conch.scripts.ckeygen.GeneralOptions'),
('tkconch', 'twisted.conch.scripts.tkconch.GeneralOptions'),
]
| bsd-3-clause |
qsnake/pygments | external/markdown-processor.py | 46 | 1945 | # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
from markdown import Markdown
md = Markdown()
md.textPreprocessors.insert(0, CodeBlockPreprocessor())
html = md.convert(someText)
markdown is then a callable that can be passed to the context of
a template and used in that template, for example.
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown import TextPreprocessor
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(TextPreprocessor):
pattern = re.compile(
r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
return self.pattern.sub(
repl, lines) | bsd-2-clause |
cocasse/linux-h3 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
BigTone2009/sms-tools | lectures/09-Sound-description/plots-code/freesoundDownload.py | 21 | 4979 | import os, sys
import freesound as fs
import json
# obtain the API key from freesound.org and add it here
Key = "????????????"
descriptors = [ 'lowlevel.spectral_centroid.mean',
'lowlevel.spectral_centroid.var',
'lowlevel.mfcc.mean',
'lowlevel.mfcc.var',
'lowlevel.pitch_salience.mean',
'lowlevel.pitch_salience.var',
'sfx.logattacktime.mean']
stats = ['mean', 'var']
def downloadSoundsFreesound(queryText = "", API_Key = "", outputDir = "", topNResults = 5, tag=None, duration=None, featureExt = '.json'):
"""
This function downloads sounds and their descriptors from freesound based on the queryText and the tag specified in the
input. Additionally to filter the sounds based on the duration you can also specify the duration range.
Inputs:
queryText (string): query text for the sounds (eg. "violin", "trumpet", "bass", "Carnatic" etc.)
tag* (string): tag to be used while searching for sounds. (eg. "multisample" etc.)
duration* (tuple): min and the max duration (seconds) of the sound to filter (eg (1,15))
API_Key (string): your api key, which you can obtain from : www.freesound.org/apiv2/apply/
outputDir (string): path to the directory where you want to store the sounds and their descriptors
topNResults (integer): number of results/sounds that you want to download
output:
This function downloads sounds and descriptors and stores them in appropriate folders within outputDir.
The name of the directory for each sound is the freesound id of that sound.
NOTE: input parameters with * are optional.
"""
#checking if the compulsory input parameters are provided
if queryText == "":
print "\n"
print "Provide a query text to search for sounds"
return -1
if API_Key == "":
print "\n"
print "You need a valid freesound API key to be able to download sounds."
print "Please apply for one here: www.freesound.org/apiv2/apply/"
print "\n"
return -1
if outputDir == "" or not os.path.exists(outputDir):
print "\n"
print "Please provide a valid output directory"
return -1
#checking authentication stuff
fsClnt = fs.FreesoundClient()
fsClnt.set_token(API_Key,"token")
#creating a filter string which freesound API understands
if duration and type(duration) == tuple:
flt_dur = " duration:[" + str(duration[0])+ " TO " +str(duration[1]) + "]"
else:
flt_dur = ""
if tag and type(tag) == str:
flt_tag = "tag:"+tag
else:
flt_tag = ""
#querying freesund
page_size = 20
if not flt_tag + flt_dur == "":
qRes = fsClnt.text_search(query=queryText ,filter = flt_tag + flt_dur,sort="rating_desc",fields="id,name,previews,username,url,analysis", descriptors=','.join(descriptors), page_size=page_size, normalized=1)
else:
qRes = fsClnt.text_search(query=queryText ,sort="rating_desc",fields="id,name,previews,username,url,analysis", descriptors=','.join(descriptors), page_size=page_size, normalized=1)
outDir2 = os.path.join(outputDir, queryText)
if os.path.exists(outDir2):
os.system("rm -r " + outDir2)
os.mkdir(outDir2)
pageNo = 1
sndCnt = 0
indCnt = 0
totalSnds = qRes.count
#creating directories to store output and downloading sounds and their descriptors
while(1):
sound = qRes[indCnt - ((pageNo-1)*page_size)]
outDir1 = os.path.join(outputDir, queryText, str(sound.id))
if os.path.exists(outDir1):
os.system("rm -r " + outDir1)
os.system("mkdir " + outDir1)
mp3Path = os.path.join(outDir1, str(sound.previews.preview_lq_mp3.split("/")[-1]))
ftrPath = mp3Path.replace('.mp3', featureExt)
try:
fs.FSRequest.retrieve(sound.previews.preview_lq_mp3, fsClnt, mp3Path)
#initialize dictionary to store features/descriptors
features = {}
#obtaining all the features/descriptors
for desc in descriptors:
features[desc]=[]
features[desc].append(eval("sound.analysis."+desc))
#once we have all the descriptors, lets store them in a json file
json.dump(features, open(ftrPath,'w'))
sndCnt+=1
except:
if os.path.exists(outDir1):
os.system("rm -r " + outDir1)
indCnt +=1
if indCnt%page_size==0:
qRes = qRes.next_page()
pageNo+=1
if sndCnt>=topNResults or indCnt >= totalSnds:
break
######
downloadSoundsFreesound(queryText = 'trumpet', API_Key = Key, tag = 'single-note', duration=(0.5, 4), topNResults = 20, outputDir = 'freesound-sounds')
downloadSoundsFreesound(queryText = 'violin', API_Key = Key, tag = 'single-note', duration=(0.5, 4), topNResults = 20, outputDir = 'freesound-sounds')
downloadSoundsFreesound(queryText = 'flute', API_Key = Key, tag = 'single-note', duration=(0.5, 4), topNResults = 20, outputDir = 'freesound-sounds')
| agpl-3.0 |
imang/gcore_kernel | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
idncom/odoo | addons/l10n_fr_hr_payroll/l10n_fr_hr_payroll.py | 340 | 2012 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'plafond_secu': fields.float('Plafond de la Securite Sociale', digits_compute=dp.get_precision('Payroll')),
'nombre_employes': fields.integer('Nombre d\'employes'),
'cotisation_prevoyance': fields.float('Cotisation Patronale Prevoyance', digits_compute=dp.get_precision('Payroll')),
'org_ss': fields.char('Organisme de securite sociale'),
'conv_coll': fields.char('Convention collective'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_columns = {
'qualif': fields.char('Qualification'),
'niveau': fields.char('Niveau'),
'coef': fields.char('Coefficient'),
}
class hr_payslip(osv.osv):
_inherit = 'hr.payslip'
_columns = {
'payment_mode': fields.char('Mode de paiement'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
funkypawz/MakerRobot | peewee-master/playhouse/migrate.py | 11 | 22700 | """
Lightweight schema migrations.
NOTE: Currently tested with SQLite and Postgresql. MySQL may be missing some
features.
Example Usage
-------------
Instantiate a migrator:
# Postgres example:
my_db = PostgresqlDatabase(...)
migrator = PostgresqlMigrator(my_db)
# SQLite example:
my_db = SqliteDatabase('my_database.db')
migrator = SqliteMigrator(my_db)
Then you will use the `migrate` function to run various `Operation`s which
are generated by the migrator:
migrate(
migrator.add_column('some_table', 'column_name', CharField(default=''))
)
Migrations are not run inside a transaction, so if you wish the migration to
run in a transaction you will need to wrap the call to `migrate` in a
transaction block, e.g.:
with my_db.transaction():
migrate(...)
Supported Operations
--------------------
Add new field(s) to an existing model:
# Create your field instances. For non-null fields you must specify a
# default value.
pubdate_field = DateTimeField(null=True)
comment_field = TextField(default='')
# Run the migration, specifying the database table, field name and field.
migrate(
migrator.add_column('comment_tbl', 'pub_date', pubdate_field),
migrator.add_column('comment_tbl', 'comment', comment_field),
)
Renaming a field:
# Specify the table, original name of the column, and its new name.
migrate(
migrator.rename_column('story', 'pub_date', 'publish_date'),
migrator.rename_column('story', 'mod_date', 'modified_date'),
)
Dropping a field:
migrate(
migrator.drop_column('story', 'some_old_field'),
)
Making a field nullable or not nullable:
# Note that when making a field not null that field must not have any
# NULL values present.
migrate(
# Make `pub_date` allow NULL values.
migrator.drop_not_null('story', 'pub_date'),
# Prevent `modified_date` from containing NULL values.
migrator.add_not_null('story', 'modified_date'),
)
Renaming a table:
migrate(
migrator.rename_table('story', 'stories_tbl'),
)
Adding an index:
# Specify the table, column names, and whether the index should be
# UNIQUE or not.
migrate(
# Create an index on the `pub_date` column.
migrator.add_index('story', ('pub_date',), False),
# Create a multi-column index on the `pub_date` and `status` fields.
migrator.add_index('story', ('pub_date', 'status'), False),
# Create a unique index on the category and title fields.
migrator.add_index('story', ('category_id', 'title'), True),
)
Dropping an index:
# Specify the index name.
migrate(migrator.drop_index('story', 'story_pub_date_status'))
"""
from collections import namedtuple
import functools
import re
from peewee import *
from peewee import CommaClause
from peewee import EnclosedClause
from peewee import Entity
from peewee import Expression
from peewee import Node
from peewee import OP
class Operation(object):
"""Encapsulate a single schema altering operation."""
def __init__(self, migrator, method, *args, **kwargs):
self.migrator = migrator
self.method = method
self.args = args
self.kwargs = kwargs
def _parse_node(self, node):
compiler = self.migrator.database.compiler()
return compiler.parse_node(node)
def execute(self, node):
sql, params = self._parse_node(node)
self.migrator.database.execute_sql(sql, params)
def _handle_result(self, result):
if isinstance(result, Node):
self.execute(result)
elif isinstance(result, Operation):
result.run()
elif isinstance(result, (list, tuple)):
for item in result:
self._handle_result(item)
def run(self):
kwargs = self.kwargs.copy()
kwargs['generate'] = True
self._handle_result(
getattr(self.migrator, self.method)(*self.args, **kwargs))
def operation(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
generate = kwargs.pop('generate', False)
if generate:
return fn(self, *args, **kwargs)
return Operation(self, fn.__name__, *args, **kwargs)
return inner
class SchemaMigrator(object):
explicit_create_foreign_key = False
explicit_delete_foreign_key = False
def __init__(self, database):
self.database = database
@classmethod
def from_database(cls, database):
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
elif isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
else:
return SqliteMigrator(database)
@operation
def apply_default(self, table, column_name, field):
default = field.default
if callable(default):
default = default()
return Clause(
SQL('UPDATE'),
Entity(table),
SQL('SET'),
Expression(
Entity(column_name),
OP.EQ,
Param(field.db_value(default)),
flat=True))
@operation
def alter_add_column(self, table, column_name, field):
# Make field null at first.
field_null, field.null = field.null, True
field.name = field.db_column = column_name
field_clause = self.database.compiler().field_definition(field)
field.null = field_null
parts = [
SQL('ALTER TABLE'),
Entity(table),
SQL('ADD COLUMN'),
field_clause]
if isinstance(field, ForeignKeyField):
parts.extend(self.get_inline_fk_sql(field))
return Clause(*parts)
def get_inline_fk_sql(self, field):
return [
SQL('REFERENCES'),
Entity(field.rel_model._meta.db_table),
EnclosedClause(Entity(field.to_field.db_column))
]
@operation
def add_foreign_key_constraint(self, table, column_name, field):
raise NotImplementedError
@operation
def add_column(self, table, column_name, field):
# Adding a column is complicated by the fact that if there are rows
# present and the field is non-null, then we need to first add the
# column as a nullable field, then set the value, then add a not null
# constraint.
if not field.null and field.default is None:
raise ValueError('%s is not null but has no default' % column_name)
is_foreign_key = isinstance(field, ForeignKeyField)
# Foreign key fields must explicitly specify a `to_field`.
if is_foreign_key and not field.to_field:
raise ValueError('Foreign keys must specify a `to_field`.')
operations = [self.alter_add_column(table, column_name, field)]
# In the event the field is *not* nullable, update with the default
# value and set not null.
if not field.null:
operations.extend([
self.apply_default(table, column_name, field),
self.add_not_null(table, column_name)])
if is_foreign_key and self.explicit_create_foreign_key:
operations.append(
self.add_foreign_key_constraint(
table,
column_name,
field.rel_model._meta.db_table,
field.to_field.db_column))
return operations
@operation
def drop_foreign_key_constraint(self, table, column_name):
raise NotImplementedError
@operation
def drop_column(self, table, column_name, cascade=True):
nodes = [
SQL('ALTER TABLE'),
Entity(table),
SQL('DROP COLUMN'),
Entity(column_name)]
if cascade:
nodes.append(SQL('CASCADE'))
drop_column_node = Clause(*nodes)
fk_columns = [
foreign_key.column
for foreign_key in self.database.get_foreign_keys(table)]
if column_name in fk_columns and self.explicit_delete_foreign_key:
return [
self.drop_foreign_key_constraint(table, column_name),
drop_column_node]
else:
return drop_column_node
@operation
def rename_column(self, table, old_name, new_name):
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('RENAME COLUMN'),
Entity(old_name),
SQL('TO'),
Entity(new_name))
def _alter_column(self, table, column):
return [
SQL('ALTER TABLE'),
Entity(table),
SQL('ALTER COLUMN'),
Entity(column)]
@operation
def add_not_null(self, table, column):
nodes = self._alter_column(table, column)
nodes.append(SQL('SET NOT NULL'))
return Clause(*nodes)
@operation
def drop_not_null(self, table, column):
nodes = self._alter_column(table, column)
nodes.append(SQL('DROP NOT NULL'))
return Clause(*nodes)
@operation
def rename_table(self, old_name, new_name):
return Clause(
SQL('ALTER TABLE'),
Entity(old_name),
SQL('RENAME TO'),
Entity(new_name))
@operation
def add_index(self, table, columns, unique=False):
compiler = self.database.compiler()
statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX'
return Clause(
SQL(statement),
Entity(compiler.index_name(table, columns)),
SQL('ON'),
Entity(table),
EnclosedClause(*[Entity(column) for column in columns]))
@operation
def drop_index(self, table, index_name):
return Clause(
SQL('DROP INDEX'),
Entity(index_name))
class PostgresqlMigrator(SchemaMigrator):
def _primary_key_columns(self, tbl):
query = """
SELECT pg_attribute.attname
FROM pg_index, pg_class, pg_attribute
WHERE
pg_class.oid = '%s'::regclass AND
indrelid = pg_class.oid AND
pg_attribute.attrelid = pg_class.oid AND
pg_attribute.attnum = any(pg_index.indkey) AND
indisprimary;
"""
cursor = self.database.execute_sql(query % tbl)
return [row[0] for row in cursor.fetchall()]
@operation
def rename_table(self, old_name, new_name):
pk_names = self._primary_key_columns(old_name)
ParentClass = super(PostgresqlMigrator, self)
operations = [
ParentClass.rename_table(old_name, new_name, generate=True)]
if len(pk_names) == 1:
# Check for existence of primary key sequence.
seq_name = '%s_%s_seq' % (old_name, pk_names[0])
query = """
SELECT 1
FROM information_schema.sequences
WHERE LOWER(sequence_name) = LOWER(%s)
"""
cursor = self.database.execute_sql(query, (seq_name,))
if bool(cursor.fetchone()):
new_seq_name = '%s_%s_seq' % (new_name, pk_names[0])
operations.append(ParentClass.rename_table(
seq_name, new_seq_name, generate=True))
return operations
_column_attributes = ('name', 'definition', 'null', 'pk', 'default', 'extra')
class MySQLColumn(namedtuple('_Column', _column_attributes)):
@property
def is_pk(self):
return self.pk == 'PRI'
@property
def is_unique(self):
return self.pk == 'UNI'
@property
def is_null(self):
return self.null == 'YES'
def sql(self, column_name=None, is_null=None):
if is_null is None:
is_null = self.is_null
if column_name is None:
column_name = self.name
parts = [
Entity(column_name),
SQL(self.definition)]
if self.is_unique:
parts.append(SQL('UNIQUE'))
if is_null:
parts.append(SQL('NULL'))
else:
parts.append(SQL('NOT NULL'))
if self.is_pk:
parts.append(SQL('PRIMARY KEY'))
if self.extra:
parts.append(SQL(self.extra))
return Clause(*parts)
class MySQLMigrator(SchemaMigrator):
explicit_create_foreign_key = True
explicit_delete_foreign_key = True
@operation
def rename_table(self, old_name, new_name):
return Clause(
SQL('RENAME TABLE'),
Entity(old_name),
SQL('TO'),
Entity(new_name))
def _get_column_definition(self, table, column_name):
cursor = self.database.execute_sql('DESCRIBE %s;' % table)
rows = cursor.fetchall()
for row in rows:
column = MySQLColumn(*row)
if column.name == column_name:
return column
return False
@operation
def add_foreign_key_constraint(self, table, column_name, rel, rel_column):
# TODO: refactor, this duplicates QueryCompiler._create_foreign_key
constraint = 'fk_%s_%s_refs_%s' % (table, column_name, rel)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('ADD CONSTRAINT'),
Entity(constraint),
SQL('FOREIGN KEY'),
EnclosedClause(Entity(column_name)),
SQL('REFERENCES'),
Entity(rel),
EnclosedClause(Entity(rel_column)))
def get_foreign_key_constraint(self, table, column_name):
cursor = self.database.execute_sql(
('SELECT constraint_name '
'FROM information_schema.key_column_usage WHERE '
'table_schema = DATABASE() AND '
'table_name = %s AND '
'column_name = %s;'),
(table, column_name))
result = cursor.fetchone()
if not result:
raise AttributeError(
'Unable to find foreign key constraint for '
'"%s" on table "%s".' % (table, column_name))
return result[0]
@operation
def drop_foreign_key_constraint(self, table, column_name):
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('DROP FOREIGN KEY'),
Entity(self.get_foreign_key_constraint(table, column_name)))
def get_inline_fk_sql(self, field):
return []
@operation
def add_not_null(self, table, column):
column = self._get_column_definition(table, column)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('MODIFY'),
column.sql(is_null=False))
@operation
def drop_not_null(self, table, column):
column = self._get_column_definition(table, column)
if column.is_pk:
raise ValueError('Primary keys can not be null')
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('MODIFY'),
column.sql(is_null=True))
@operation
def rename_column(self, table, old_name, new_name):
fk_objects = dict(
(fk.column, fk)
for fk in self.database.get_foreign_keys(table))
is_foreign_key = old_name in fk_objects
column = self._get_column_definition(table, old_name)
rename_clause = Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('CHANGE'),
Entity(old_name),
column.sql(column_name=new_name))
if is_foreign_key:
fk_metadata = fk_objects[old_name]
return [
self.drop_foreign_key_constraint(table, old_name),
rename_clause,
self.add_foreign_key_constraint(
table,
new_name,
fk_metadata.dest_table,
fk_metadata.dest_column),
]
else:
return rename_clause
@operation
def drop_index(self, table, index_name):
return Clause(
SQL('DROP INDEX'),
Entity(index_name),
SQL('ON'),
Entity(table))
class SqliteMigrator(SchemaMigrator):
"""
SQLite supports a subset of ALTER TABLE queries, view the docs for the
full details http://sqlite.org/lang_altertable.html
"""
column_re = re.compile('(.+?)\((.+)\)')
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
column_name_re = re.compile('["`]?([\w]+)')
fk_re = re.compile('FOREIGN KEY\s+\("?([\w]+)"?\)\s+', re.I)
def _get_column_names(self, table):
res = self.database.execute_sql('select * from "%s" limit 1' % table)
return [item[0] for item in res.description]
def _get_create_table(self, table):
res = self.database.execute_sql(
('select name, sql from sqlite_master '
'where type=? and LOWER(name)=?'),
['table', table.lower()])
return res.fetchone()
@operation
def _update_column(self, table, column_to_update, fn):
columns = set(column.name.lower()
for column in self.database.get_columns(table))
if column_to_update.lower() not in columns:
raise ValueError('Column "%s" does not exist on "%s"' %
(column_to_update, table))
# Get the SQL used to create the given table.
table, create_table = self._get_create_table(table)
# Get the indexes and SQL to re-create indexes.
indexes = self.database.get_indexes(table)
# Find any foreign keys we may need to remove.
self.database.get_foreign_keys(table)
# Make sure the create_table does not contain any newlines or tabs,
# allowing the regex to work correctly.
create_table = re.sub(r'\s+', ' ', create_table)
# Parse out the `CREATE TABLE` and column list portions of the query.
raw_create, raw_columns = self.column_re.search(create_table).groups()
# Clean up the individual column definitions.
column_defs = [
col.strip() for col in self.column_split_re.findall(raw_columns)]
new_column_defs = []
new_column_names = []
original_column_names = []
for column_def in column_defs:
column_name, = self.column_name_re.match(column_def).groups()
if column_name == column_to_update:
new_column_def = fn(column_name, column_def)
if new_column_def:
new_column_defs.append(new_column_def)
original_column_names.append(column_name)
column_name, = self.column_name_re.match(
new_column_def).groups()
new_column_names.append(column_name)
else:
new_column_defs.append(column_def)
if not column_name.lower().startswith(('foreign', 'primary')):
new_column_names.append(column_name)
original_column_names.append(column_name)
# Create a mapping of original columns to new columns.
original_to_new = dict(zip(original_column_names, new_column_names))
new_column = original_to_new.get(column_to_update)
fk_filter_fn = lambda column_def: column_def
if not new_column:
# Remove any foreign keys associated with this column.
fk_filter_fn = lambda column_def: None
elif new_column != column_to_update:
# Update any foreign keys for this column.
fk_filter_fn = lambda column_def: self.fk_re.sub(
'FOREIGN KEY ("%s") ' % new_column,
column_def)
cleaned_columns = []
for column_def in new_column_defs:
match = self.fk_re.match(column_def)
if match is not None and match.groups()[0] == column_to_update:
column_def = fk_filter_fn(column_def)
if column_def:
cleaned_columns.append(column_def)
# Update the name of the new CREATE TABLE query.
temp_table = table + '__tmp__'
rgx = re.compile('("?)%s("?)' % table, re.I)
create = rgx.sub(
'\\1%s\\2' % temp_table,
raw_create)
# Create the new table.
columns = ', '.join(cleaned_columns)
queries = [
Clause(SQL('DROP TABLE IF EXISTS'), Entity(temp_table)),
SQL('%s (%s)' % (create.strip(), columns))]
# Populate new table.
populate_table = Clause(
SQL('INSERT INTO'),
Entity(temp_table),
EnclosedClause(*[Entity(col) for col in new_column_names]),
SQL('SELECT'),
CommaClause(*[Entity(col) for col in original_column_names]),
SQL('FROM'),
Entity(table))
queries.append(populate_table)
# Drop existing table and rename temp table.
queries.append(Clause(
SQL('DROP TABLE'),
Entity(table)))
queries.append(self.rename_table(temp_table, table))
# Re-create indexes.
for index in indexes:
# Auto-generated indexes in SQLite will not have associated SQL,
# so pass over them.
if not index.sql:
continue
if column_to_update in index.columns:
if new_column:
queries.append(
SQL(index.sql.replace(column_to_update, new_column)))
else:
queries.append(SQL(index.sql))
return queries
@operation
def drop_column(self, table, column_name, cascade=True):
return self._update_column(table, column_name, lambda a, b: None)
@operation
def rename_column(self, table, old_name, new_name):
def _rename(column_name, column_def):
return column_def.replace(column_name, new_name)
return self._update_column(table, old_name, _rename)
@operation
def add_not_null(self, table, column):
def _add_not_null(column_name, column_def):
return column_def + ' NOT NULL'
return self._update_column(table, column, _add_not_null)
@operation
def drop_not_null(self, table, column):
def _drop_not_null(column_name, column_def):
return column_def.replace('NOT NULL', '')
return self._update_column(table, column, _drop_not_null)
def migrate(*operations, **kwargs):
for operation in operations:
operation.run()
| gpl-3.0 |
chris-chris/tensorflow | tensorflow/contrib/integrate/python/ops/odes.py | 69 | 20508 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODE solvers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
_ButcherTableau = collections.namedtuple(
'_ButcherTableau', 'alpha beta c_sol c_mid c_error')
# Parameters from Shampine (1986), section 4.
_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
alpha=[1/5, 3/10, 4/5, 8/9, 1., 1.],
beta=[[1/5],
[3/40, 9/40],
[44/45, -56/15, 32/9],
[19372/6561, -25360/2187, 64448/6561, -212/729],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656],
[35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]],
c_sol=[35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0],
c_mid=[6025192743/30085553152 / 2, 0, 51252292925/65400821598 / 2,
-2691868925/45128329728 / 2, 187940372067/1594534317056 / 2,
-1776094331/19743644256 / 2, 11237099/235043384 / 2],
c_error=[1951/21600 - 35/384,
0,
22642/50085 - 500/1113,
451/720 - 125/192,
-12231/42400 - -2187/6784,
649/6300 - 11/84,
1/60],
)
def _possibly_nonzero(x):
return isinstance(x, ops.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys, name=None):
"""Calculate a scaled, vector inner product between lists of Tensors."""
with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
# Some of the parameters in our Butcher tableau include zeros. Using
# _possibly_nonzero lets us avoid wasted computation.
return math_ops.add_n([(scale * x) * y for x, y in zip(xs, ys)
if _possibly_nonzero(x) or _possibly_nonzero(y)],
name=scope)
def _dot_product(xs, ys, name=None):
"""Calculate the vector inner product between two lists of Tensors."""
with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
def _runge_kutta_step(func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(dt_cast, tableau.c_error, k,
name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = dt * f0
e = y0
return [a, b, c, d, e]
def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
with ops.name_scope('interp_fit_rk'):
dt = math_ops.cast(dt, y0.dtype)
y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
f0 = k[0]
f1 = k[-1]
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
def _optimal_step_size(last_step,
error_ratio,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
order=5,
name=None):
"""Calculate the optimal size for the next Runge-Kutta step."""
with ops.name_scope(
name, 'optimal_step_size', [last_step, error_ratio]) as scope:
error_ratio = math_ops.cast(error_ratio, last_step.dtype)
exponent = math_ops.cast(1 / order, last_step.dtype)
# this looks more complex than necessary, but importantly it keeps
# error_ratio in the numerator so we can't divide by zero:
factor = math_ops.maximum(
1 / ifactor,
math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
return math_ops.div(last_step, factor, name=scope)
def _abs_square(x):
if x.dtype.is_complex:
return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
else:
return math_ops.square(x)
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
class _RungeKuttaState(collections.namedtuple(
'_RungeKuttaState', 'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
class _History(collections.namedtuple(
'_History', 'integrate_points, error_ratio')):
"""Saved integration history for use in `info_dict`.
Attributes:
integrate_points: tf.TensorArray storing integrating time points.
error_ratio: tf.TensorArray storing computed error ratios at each
integration step.
"""
def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(
name, 'dopri5',
[y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps]) as scope:
first_step = ops.convert_to_tensor(first_step, dtype=t.dtype,
name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(max_num_steps, dtype=dtypes.int32,
name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(
t0 + dt > t0, ['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step,
lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(
y_next, f_next, t0, t_next, dt_next, interp_coeff)
with ops.name_scope('update/history'):
history = _History(_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step,
(rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(
rk_state.interp_coeff, rk_state.t0, rk_state.t1, t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
assert_increasing = control_flow_ops.Assert(
math_ops.reduce_all(t[1:] > t[:-1]),
['`t` must be monotonic increasing'])
with ops.control_dependencies([assert_increasing]):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate,
(solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()}
return (y, info_dict)
def odeint(func,
y0,
t,
rtol=1e-6,
atol=1e-12,
method=None,
options=None,
full_output=False,
name=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ode-s:
```
dy/dt = func(y, t), y(t[0]) = y0
```
where y is a Tensor of any shape.
For example:
```
# solve `dy/dt = -y`, corresponding to exponential decay
tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
=> [1, exp(-1), exp(-2)]
```
Output dtypes and numerical precision are based on the dtypes of the inputs
`y0` and `t`.
Currently, implements 5th order Runge-Kutta with adaptive step size control
and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
method of `scipy.integrate.ode` and MATLAB's `ode45`.
Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
doi:10.2307/2008219
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. If not provided as a Tensor, converted to a Tensor with
float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use. Currently,
the only valid option is `'dopri5'`.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set. For
`'dopri5'`, valid options include:
* first_step: an initial guess for the size of the first integration
(current default: 1.0, but may later be changed to use heuristics based
on the gradient).
* safety: safety factor for adaptive step control, generally a constant
in the range 0.8-1 (default: 0.9).
* ifactor: maximum factor by which the adaptive step may be increased
(default: 10.0).
* dfactor: maximum factor by which the adpative step may be decreased
(default: 0.2).
* max_num_steps: integer maximum number of integrate steps between time
points in `t` (default: 1000).
full_output: optional boolean. If True, `odeint` returns a tuple
`(y, info_dict)` describing the integration process.
name: Optional name for this operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
info_dict: only if `full_output == True`. A dict with the following values:
* num_func_evals: integer Tensor counting the number of function
evaluations.
* integrate_points: 1D float64 Tensor with the upper bound of each
integration time step.
* error_ratio: 1D float Tensor with the estimated ratio of the integration
error to the error tolerance at each integration step. An ratio greater
than 1 corresponds to rejected steps.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
if method is not None and method != 'dopri5':
raise ValueError('invalid method: %r' % method)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
# TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
# arbitrarily nested tuple. This will help performance and usability by
# avoiding the need to pack/unpack in user functions.
y0 = ops.convert_to_tensor(y0, name='y0')
if not (y0.dtype.is_floating or y0.dtype.is_complex):
raise TypeError('`y0` must have a floating point or complex floating '
'point dtype')
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
if not t.dtype.is_floating:
raise TypeError('`t` must have a floating point dtype')
error_dtype = abs(y0).dtype
rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
return _dopri5(func, y0, t,
rtol=rtol,
atol=atol,
full_output=full_output,
name=scope,
**options)
| apache-2.0 |
magfest/ubersystem | alembic/versions/4947b38a18b1_add_confirmed_column.py | 1 | 1940 | """Add confirmed column
Revision ID: 4947b38a18b1
Revises: 6aef7396c197
Create Date: 2017-08-06 12:48:43.186696
"""
# revision identifiers, used by Alembic.
revision = '4947b38a18b1'
down_revision = '6aef7396c197'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import residue
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
if is_sqlite:
with op.batch_alter_table('attendee', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
batch_op.add_column(sa.Column('confirmed', residue.UTCDateTime(), nullable=True))
else:
op.add_column('attendee', sa.Column('confirmed', residue.UTCDateTime(), nullable=True))
def downgrade():
op.drop_column('attendee', 'confirmed')
| agpl-3.0 |
esaezgil/aiohttp | tests/test_pytest_plugin.py | 2 | 3580 | pytest_plugins = 'pytester'
def test_myplugin(testdir):
testdir.makepyfile("""\
import asyncio
import pytest
from unittest import mock
from aiohttp import web
pytest_plugins = 'aiohttp.pytest_plugin'
@asyncio.coroutine
def hello(request):
return web.Response(body=b'Hello, world')
def create_app(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', hello)
return app
@asyncio.coroutine
def test_hello(test_client):
client = yield from test_client(create_app)
resp = yield from client.get('/')
assert resp.status == 200
text = yield from resp.text()
assert 'Hello, world' in text
@asyncio.coroutine
def test_hello_from_app(test_client, loop):
app = web.Application(loop=loop)
app.router.add_get('/', hello)
client = yield from test_client(app)
resp = yield from client.get('/')
assert resp.status == 200
text = yield from resp.text()
assert 'Hello, world' in text
@asyncio.coroutine
def test_hello_with_loop(test_client, loop):
client = yield from test_client(create_app)
resp = yield from client.get('/')
assert resp.status == 200
text = yield from resp.text()
assert 'Hello, world' in text
@asyncio.coroutine
def test_hello_fails(test_client):
client = yield from test_client(create_app)
resp = yield from client.get('/')
assert resp.status == 200
text = yield from resp.text()
assert 'Hello, wield' in text
@asyncio.coroutine
def test_hello_with_fake_loop(test_client):
with pytest.raises(AssertionError):
fake_loop = mock.Mock()
yield from test_client(web.Application(loop=fake_loop))
@asyncio.coroutine
def test_set_args(test_client, loop):
with pytest.raises(AssertionError):
app = web.Application(loop=loop)
yield from test_client(app, 1, 2, 3)
@asyncio.coroutine
def test_set_keyword_args(test_client, loop):
with pytest.raises(AssertionError):
app = web.Application(loop=loop)
yield from test_client(app, param=1)
@asyncio.coroutine
def test_noop():
pass
@asyncio.coroutine
def previous(request):
if request.method == 'POST':
request.app['value'] = (yield from request.post())['value']
return web.Response(body=b'thanks for the data')
else:
v = request.app.get('value', 'unknown')
return web.Response(body='value: {}'.format(v).encode())
def create_stateful_app(loop):
app = web.Application(loop=loop)
app.router.add_route('*', '/', previous)
return app
@pytest.fixture
def cli(loop, test_client):
return loop.run_until_complete(test_client(create_stateful_app))
@asyncio.coroutine
def test_set_value(cli):
resp = yield from cli.post('/', data={'value': 'foo'})
assert resp.status == 200
text = yield from resp.text()
assert text == 'thanks for the data'
assert cli.app['value'] == 'foo'
@asyncio.coroutine
def test_get_value(cli):
resp = yield from cli.get('/')
assert resp.status == 200
text = yield from resp.text()
assert text == 'value: unknown'
cli.app['value'] = 'bar'
resp = yield from cli.get('/')
assert resp.status == 200
text = yield from resp.text()
assert text == 'value: bar'
def test_noncoro():
assert True
@asyncio.coroutine
def test_client_failed_to_create(test_client):
def make_app(loop):
raise RuntimeError()
with pytest.raises(RuntimeError):
yield from test_client(make_app)
""")
result = testdir.runpytest('-p', 'no:sugar')
result.assert_outcomes(passed=11, failed=1)
| apache-2.0 |
wang1352083/pythontool | python-2.7.12-lib/test/test_telnetlib.py | 12 | 16064 | import socket
import telnetlib
import time
import Queue
import unittest
from unittest import TestCase
from test import test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
EOF_sigil = object()
def server(evt, serv, dataq=None):
""" Open a tcp server in three steps
1) set evt to true to let the parent know we are ready
2) [optional] if is not False, write the list of data from dataq.get()
to the socket.
"""
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
if dataq:
data = ''
new_data = dataq.get(True, 0.5)
dataq.task_done()
for item in new_data:
if item == EOF_sigil:
break
if type(item) in [int, float]:
time.sleep(item)
else:
data += item
written = conn.send(data)
data = data[written:]
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
def _read_setUp(self):
self.evt = threading.Event()
self.dataq = Queue.Queue()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock, self.dataq))
self.thread.start()
self.evt.wait()
def _read_tearDown(self):
self.thread.join()
class ReadTests(TestCase):
setUp = _read_setUp
tearDown = _read_tearDown
# use a similar approach to testing timeouts as test_timeout.py
# these will never pass 100% but make the fuzz big enough that it is rare
block_long = 0.6
block_short = 0.3
def test_read_until_A(self):
"""
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_until('match')
self.assertEqual(data, ''.join(want[:-2]))
def test_read_until_B(self):
# test the timeout - it does NOT raise socket.timeout
want = ['hello', self.block_long, 'not seen', EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_until('not seen', self.block_short)
self.assertEqual(data, want[0])
self.assertEqual(telnet.read_all(), 'not seen')
def test_read_until_with_poll(self):
"""Use select.poll() to implement telnet.read_until()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
if not telnet._has_poll:
raise unittest.SkipTest('select.poll() is required')
telnet._has_poll = True
self.dataq.join()
data = telnet.read_until('match')
self.assertEqual(data, ''.join(want[:-2]))
def test_read_until_with_select(self):
"""Use select.select() to implement telnet.read_until()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
telnet._has_poll = False
self.dataq.join()
data = telnet.read_until('match')
self.assertEqual(data, ''.join(want[:-2]))
def test_read_all_A(self):
"""
read_all()
Read all data until EOF; may block.
"""
want = ['x' * 500, 'y' * 500, 'z' * 500, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_all()
self.assertEqual(data, ''.join(want[:-1]))
def _test_blocking(self, func):
self.dataq.put([self.block_long, EOF_sigil])
self.dataq.join()
start = time.time()
data = func()
self.assertTrue(self.block_short <= time.time() - start)
def test_read_all_B(self):
self._test_blocking(telnetlib.Telnet(HOST, self.port).read_all)
def test_read_all_C(self):
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
telnet.read_all()
telnet.read_all() # shouldn't raise
def test_read_some_A(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
want = ['x' * 500, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
data = telnet.read_all()
self.assertTrue(len(data) >= 1)
def test_read_some_B(self):
# test EOF
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
self.assertEqual('', telnet.read_some())
def test_read_some_C(self):
self._test_blocking(telnetlib.Telnet(HOST, self.port).read_some)
def _test_read_any_eager_A(self, func_name):
"""
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = [self.block_long, 'x' * 100, 'y' * 100, EOF_sigil]
expects = want[1] + want[2]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
func = getattr(telnet, func_name)
data = ''
while True:
try:
data += func()
self.assertTrue(expects.startswith(data))
except EOFError:
break
self.assertEqual(expects, data)
def _test_read_any_eager_B(self, func_name):
# test EOF
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
time.sleep(self.block_short)
func = getattr(telnet, func_name)
self.assertRaises(EOFError, func)
# read_eager and read_very_eager make the same guarantees
# (they behave differently but we only test the guarantees)
def test_read_very_eager_A(self):
self._test_read_any_eager_A('read_very_eager')
def test_read_very_eager_B(self):
self._test_read_any_eager_B('read_very_eager')
def test_read_eager_A(self):
self._test_read_any_eager_A('read_eager')
def test_read_eager_B(self):
self._test_read_any_eager_B('read_eager')
# NB -- we need to test the IAC block which is mentioned in the docstring
# but not in the module docs
def _test_read_any_lazy_B(self, func_name):
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
func = getattr(telnet, func_name)
telnet.fill_rawq()
self.assertRaises(EOFError, func)
def test_read_lazy_A(self):
want = ['x' * 100, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
time.sleep(self.block_short)
self.assertEqual('', telnet.read_lazy())
data = ''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want[0].startswith(data))
self.assertEqual(data, want[0])
def test_read_lazy_B(self):
self._test_read_any_lazy_B('read_lazy')
def test_read_very_lazy_A(self):
want = ['x' * 100, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
time.sleep(self.block_short)
self.assertEqual('', telnet.read_very_lazy())
data = ''
while True:
try:
read_data = telnet.read_very_lazy()
except EOFError:
break
data += read_data
if not read_data:
telnet.fill_rawq()
self.assertEqual('', telnet.cookedq)
telnet.process_rawq()
self.assertTrue(want[0].startswith(data))
self.assertEqual(data, want[0])
def test_read_very_lazy_B(self):
self._test_read_any_lazy_B('read_very_lazy')
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = ''
self.sb_getter = sb_getter
self.sb_seen = ''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class OptionTests(TestCase):
setUp = _read_setUp
tearDown = _read_tearDown
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
self.setUp()
self.dataq.put(data)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[0], self.cmds)
self.assertEqual(cmd[1], tl.NOOPT)
self.assertEqual(len(''.join(data[:-1])), len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
self.tearDown()
def test_IAC_commands(self):
# reset our setup
self.dataq.put([EOF_sigil])
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
self.tearDown()
for cmd in self.cmds:
self._test_command(['x' * 100, tl.IAC + cmd, 'y'*100, EOF_sigil])
self._test_command(['x' * 10, tl.IAC + cmd, 'y'*10, EOF_sigil])
self._test_command([tl.IAC + cmd, EOF_sigil])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds] + [EOF_sigil])
self.assertEqual('', telnet.read_sb_data())
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + 'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + 'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + 'cc' + tl.IAC + tl.IAC + 'dd' + tl.IAC + tl.SE,
EOF_sigil,
]
self.dataq.put(send)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, '')
want_sb_data = tl.IAC + tl.IAC + 'aabb' + tl.IAC + 'cc' + tl.IAC + 'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual('', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
class ExpectTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.dataq = Queue.Queue()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock,
self.dataq))
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
# use a similar approach to testing timeouts as test_timeout.py
# these will never pass 100% but make the fuzz big enough that it is rare
block_long = 0.6
block_short = 0.3
def test_expect_A(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
(_,_,data) = telnet.expect(['match'])
self.assertEqual(data, ''.join(want[:-2]))
def test_expect_B(self):
# test the timeout - it does NOT raise socket.timeout
want = ['hello', self.block_long, 'not seen', EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
self.dataq.join()
(_,_,data) = telnet.expect(['not seen'], self.block_short)
self.assertEqual(data, want[0])
self.assertEqual(telnet.read_all(), 'not seen')
def test_expect_with_poll(self):
"""Use select.poll() to implement telnet.expect()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
if not telnet._has_poll:
raise unittest.SkipTest('select.poll() is required')
telnet._has_poll = True
self.dataq.join()
(_,_,data) = telnet.expect(['match'])
self.assertEqual(data, ''.join(want[:-2]))
def test_expect_with_select(self):
"""Use select.select() to implement telnet.expect()."""
want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
self.dataq.put(want)
telnet = telnetlib.Telnet(HOST, self.port)
telnet._has_poll = False
self.dataq.join()
(_,_,data) = telnet.expect(['match'])
self.assertEqual(data, ''.join(want[:-2]))
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, ReadTests, OptionTests,
ExpectTests)
if __name__ == '__main__':
test_main()
| mit |
babycaseny/audacity | lib-src/lv2/lv2/waflib/Tools/cs.py | 198 | 4165 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
import os,tempfile
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
try:
tmp=None
if isinstance(cmd,list)and len(' '.join(cmd))>=8192:
program=cmd[0]
cmd=[self.quote_response_command(x)for x in cmd]
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(i.replace('\\','\\\\')for i in cmd[1:]))
os.close(fd)
cmd=[program,'@'+tmp]
ret=self.generator.bld.exec_command(cmd,**kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
def quote_response_command(self,flag):
if flag.lower()=='/noconfig':
return''
if flag.find(' ')>-1:
for x in('/r:','/reference:','/resource:','/lib:','/out:'):
if flag.startswith(x):
flag='%s"%s"'%(x,'","'.join(flag[len(x):].split(',')))
break
else:
flag='"%s"'%flag
return flag
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
| gpl-2.0 |
develersrl/bertos | wizard/BFinalPage.py | 8 | 4486 | #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of BeRTOS.
#
# Bertos is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# As a special exception, you may use this file as part of a free software
# library without restriction. Specifically, if other files instantiate
# templates or use macros or inline functions from this file, or you compile
# this file and link it with other files to produce an executable, this
# file does not by itself cause the resulting executable to be covered by
# the GNU General Public License. This exception does not however
# invalidate any other reasons why the executable file might be covered by
# the GNU General Public License.
#
# Copyright 2008 Develer S.r.l. (http://www.develer.com/)
#
#
# Author: Lorenzo Berni <duplo@develer.com>
#
import os
from PyQt4.QtGui import *
from BWizardPage import *
import bertos_utils
from const import *
class BFinalPage(BWizardPage):
"""
Last page of the wizard. It creates the project and show a success message.
"""
def __init__(self):
BWizardPage.__init__(self, UI_LOCATION + "/final_page.ui")
self.setTitle(self.tr("Project created successfully!"))
## Overloaded BWizardPage methods ##
def reloadData(self, previous_id=None):
self.setVisible(False)
"""
Overload of the BWizardPage reloadData method.
"""
try:
QApplication.instance().setOverrideCursor(Qt.WaitCursor)
try:
# This operation can throw WindowsError, if the directory is
# locked.
self.project.createBertosProject()
except OSError, e:
QMessageBox.critical(
self,
self.tr("Error removing destination directory"),
self.tr("Error removing the destination directory. This directory or a file in it is in use by another user or application.\nClose the application which is using the directory and retry."))
self.wizard().back()
return
finally:
QApplication.instance().restoreOverrideCursor()
self.setVisible(True)
self._plugin_dict = {}
if os.name == "nt":
output = self.projectInfo("OUTPUT")
import winreg_importer
command_lines = winreg_importer.getCommandLines()
self.setProjectInfo("COMMAND_LINES", command_lines)
layout = QVBoxLayout()
for plugin in output:
if plugin in command_lines:
module = bertos_utils.loadPlugin(plugin)
check = QCheckBox(self.tr("Open project in %s" %module.PLUGIN_NAME))
if len(output) == 1:
check.setCheckState(Qt.Checked)
else:
check.setCheckState(Qt.Unchecked)
layout.addWidget(check)
self._plugin_dict[check] = plugin
widget = QWidget()
widget.setLayout(layout)
if len(self._plugin_dict) > 0:
self.pageContent.scrollArea.setVisible(True)
self.pageContent.scrollArea.setWidget(widget)
for plugin in self._plugin_dict:
self.connect(plugin, SIGNAL("stateChanged(int)"), self.modeChecked)
self.modeChecked()
def setupUi(self):
"""
Overload of the BWizardPage setupUi method.
"""
self.pageContent.scrollArea.setVisible(False)
####
## Slots ##
def modeChecked(self):
to_be_opened = []
for check, plugin in self._plugin_dict.items():
if check.checkState() == Qt.Checked:
to_be_opened.append(plugin)
self.setProjectInfo("TO_BE_OPENED", to_be_opened)
####
| gpl-2.0 |
J861449197/edx-platform | lms/djangoapps/teams/models.py | 3 | 7907 | """Django models related to teams functionality."""
from datetime import datetime
from uuid import uuid4
import pytz
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy
from django_countries.fields import CountryField
from django_comment_common.signals import (
thread_created,
thread_edited,
thread_deleted,
thread_voted,
comment_created,
comment_edited,
comment_deleted,
comment_voted,
comment_endorsed
)
from xmodule_django.models import CourseKeyField
from util.model_utils import slugify
from student.models import LanguageField, CourseEnrollment
from .errors import AlreadyOnTeamInCourse, NotEnrolledInCourseForTeam
from teams import TEAM_DISCUSSION_CONTEXT
@receiver(thread_voted)
@receiver(thread_created)
@receiver(comment_voted)
@receiver(comment_created)
def post_create_vote_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon creating or voting for a
post."""
handle_activity(kwargs['user'], kwargs['post'])
@receiver(thread_edited)
@receiver(thread_deleted)
@receiver(comment_edited)
@receiver(comment_deleted)
def post_edit_delete_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon editing or deleting a
post."""
post = kwargs['post']
handle_activity(kwargs['user'], post, long(post.user_id))
@receiver(comment_endorsed)
def comment_endorsed_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon endorsing a comment."""
comment = kwargs['post']
handle_activity(kwargs['user'], comment, long(comment.thread.user_id))
def handle_activity(user, post, original_author_id=None):
"""Handle user activity from django_comment_client and discussion_api
and update the user's last activity date. Checks if the user who
performed the action is the original author, and that the
discussion has the team context.
"""
if original_author_id is not None and user.id != original_author_id:
return
if getattr(post, "context", "course") == TEAM_DISCUSSION_CONTEXT:
CourseTeamMembership.update_last_activity(user, post.commentable_id)
class CourseTeam(models.Model):
"""This model represents team related info."""
team_id = models.CharField(max_length=255, unique=True)
discussion_topic_id = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, db_index=True)
is_active = models.BooleanField(default=True)
course_id = CourseKeyField(max_length=255, db_index=True)
topic_id = models.CharField(max_length=255, db_index=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=300)
country = CountryField(blank=True)
language = LanguageField(
blank=True,
help_text=ugettext_lazy("Optional language the team uses as ISO 639-1 code."),
)
last_activity_at = models.DateTimeField()
users = models.ManyToManyField(User, db_index=True, related_name='teams', through='CourseTeamMembership')
@classmethod
def create(cls, name, course_id, description, topic_id=None, country=None, language=None):
"""Create a complete CourseTeam object.
Args:
name (str): The name of the team to be created.
course_id (str): The ID string of the course associated
with this team.
description (str): A description of the team.
topic_id (str): An optional identifier for the topic the
team formed around.
country (str, optional): An optional country where the team
is based, as ISO 3166-1 code.
language (str, optional): An optional language which the
team uses, as ISO 639-1 code.
"""
unique_id = uuid4().hex
team_id = slugify(name)[0:20] + '-' + unique_id
discussion_topic_id = unique_id
course_team = cls(
team_id=team_id,
discussion_topic_id=discussion_topic_id,
name=name,
course_id=course_id,
topic_id=topic_id if topic_id else '',
description=description,
country=country if country else '',
language=language if language else '',
last_activity_at=datetime.utcnow().replace(tzinfo=pytz.utc)
)
return course_team
def add_user(self, user):
"""Adds the given user to the CourseTeam."""
if not CourseEnrollment.is_enrolled(user, self.course_id):
raise NotEnrolledInCourseForTeam
if CourseTeamMembership.user_in_team_for_course(user, self.course_id):
raise AlreadyOnTeamInCourse
return CourseTeamMembership.objects.create(
user=user,
team=self
)
class CourseTeamMembership(models.Model):
"""This model represents the membership of a single user in a single team."""
class Meta(object):
"""Stores meta information for the model."""
unique_together = (('user', 'team'),)
user = models.ForeignKey(User)
team = models.ForeignKey(CourseTeam, related_name='membership')
date_joined = models.DateTimeField(auto_now_add=True)
last_activity_at = models.DateTimeField()
def save(self, *args, **kwargs):
""" Customize save method to set the last_activity_at if it does not currently exist. """
if not self.last_activity_at:
self.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
super(CourseTeamMembership, self).save(*args, **kwargs)
@classmethod
def get_memberships(cls, username=None, course_ids=None, team_id=None):
"""
Get a queryset of memberships.
Args:
username (unicode, optional): The username to filter on.
course_ids (list of unicode, optional) Course IDs to filter on.
team_id (unicode, optional): The team_id to filter on.
"""
queryset = cls.objects.all()
if username is not None:
queryset = queryset.filter(user__username=username)
if course_ids is not None:
queryset = queryset.filter(team__course_id__in=course_ids)
if team_id is not None:
queryset = queryset.filter(team__team_id=team_id)
return queryset
@classmethod
def user_in_team_for_course(cls, user, course_id):
"""
Checks whether or not a user is already in a team in the given course.
Args:
user: the user that we want to query on
course_id: the course_id of the course we're interested in
Returns:
True if the user is on a team in the course already
False if not
"""
return cls.objects.filter(user=user, team__course_id=course_id).exists()
@classmethod
def update_last_activity(cls, user, discussion_topic_id):
"""Set the `last_activity_at` for both this user and their team in the
given discussion topic. No-op if the user is not a member of
the team for this discussion.
"""
try:
membership = cls.objects.get(user=user, team__discussion_topic_id=discussion_topic_id)
# If a privileged user is active in the discussion of a team
# they do not belong to, do not update their last activity
# information.
except ObjectDoesNotExist:
return
now = datetime.utcnow().replace(tzinfo=pytz.utc)
membership.last_activity_at = now
membership.team.last_activity_at = now
membership.team.save()
membership.save()
| agpl-3.0 |
abelhj/svtools | tests/bedpe_tests.py | 1 | 6338 | from unittest import TestCase, main
from svtools.bedpe import Bedpe
class BedpeTests(TestCase):
def test_parse_score(self):
self.assertEqual(Bedpe.parse_score('20'), 20)
self.assertEqual(Bedpe.parse_score('.'), '.')
def test_parse_info_tag(self):
self.assertEqual(Bedpe.parse_info_tag('SVTYPE', 'SVTYPE'), True)
self.assertEqual(Bedpe.parse_info_tag('SVTYPE', 'AF='), False)
self.assertEqual(Bedpe.parse_info_tag('SVTYPE=BND;AF=0.2', 'AF='), '0.2')
self.assertEqual(Bedpe.parse_info_tag('SVTYPE=BND;AF=0.2', 'SVTYPE='), 'BND')
self.assertEqual(Bedpe.parse_info_tag('SVTYPE=BND;SECONDARY;AF=0.2', 'SECONDARY'), True)
def test_parse_info_tag(self):
self.assertEqual(Bedpe.update_info_tag('SNAME=sample', 'SNAME=', 'sample,sample2'), 'SNAME=sample,sample2')
self.assertEqual(Bedpe.update_info_tag('SNAME=sample;AF=0.75', 'SNAME=', 'sample,sample2'), 'SNAME=sample,sample2;AF=0.75')
with self.assertRaises(ValueError):
Bedpe.update_info_tag('AF=0.75', 'SNAME=', 'sample,sample2')
with self.assertRaises(ValueError):
Bedpe.update_info_tag('SECONDARY;AF=0.5', 'SECONDARY', 'NEW_VALUE')
with self.assertRaises(ValueError):
Bedpe.update_info_tag('AF=0.5;SECONDARY', 'SECONDARY', 'NEW_VALUE')
def test_malformed(self):
entry1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(entry1)
self.assertEqual(b1.malformedFlag, 1)
entry2 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND;AF=0.2', 'MISSING' ]
b2 = Bedpe(entry2)
self.assertEqual(b2.malformedFlag, 2)
self.assertEqual(b2.info1, entry2[18])
def test_info(self):
entry1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(entry1)
self.assertEqual(b1.info, 'SVTYPE=BND;AF=0.2')
entry2 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND;AF=0.2', 'MISSING' ]
b2 = Bedpe(entry2)
self.assertEqual(b2.info, 'SVTYPE=BND;AF=0.2')
entry3 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND;AF=0.2', 'SECONDARY' ]
b3 = Bedpe(entry3)
self.assertEqual(b3.info, 'SVTYPE=BND;AF=0.2')
def test_set_info(self):
entry1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND' ]
b1 = Bedpe(entry1)
b1.set_info('AF', '0.2')
self.assertEqual(b1.info, 'SVTYPE=BND;AF=0.2')
entry2 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND', 'MISSING' ]
b2 = Bedpe(entry2)
b2.set_info('AF', '0.2')
self.assertEqual(b2.info, 'SVTYPE=BND;AF=0.2')
entry3 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND', 'SECONDARY' ]
b3 = Bedpe(entry3)
b3.set_info('AF', '0.2')
self.assertEqual(b3.info1, 'SVTYPE=BND;AF=0.2')
self.assertEqual(b3.info2, 'SECONDARY;AF=0.2')
entry4 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND', '.' ]
b4 = Bedpe(entry4)
b4.set_info('PRESENT', None)
self.assertEqual(b4.info, 'SVTYPE=BND;PRESENT')
self.assertEqual(b4.info2, '.')
def test_retrieve_svtype(self):
entry1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND;AF=0.2', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(entry1)
self.assertEqual(b1.retrieve_svtype(), 'BND')
entry2 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'AF=0.2', 'AF=0.2' ]
with self.assertRaises(SystemExit):
b = Bedpe(entry2)
def test_retrieve_af(self):
entry1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND;AF=0.2', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(entry1)
self.assertEqual(b1.retrieve_af(), '0.2')
entry2 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND', 'SVTYPE=BND' ]
b2 = Bedpe(entry2)
self.assertIsNone(b2.retrieve_af())
def test_str(self):
# Note that we are testing float to float equivalence. Actually passing in an integer will result in it being converted to float with
# with decimal place
entry1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57.0', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'SVTYPE=BND;AF=0.2', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(entry1)
self.assertEqual(str(b1), '\t'.join(entry1))
def test_sname_value(self):
self.assertEqual(Bedpe.sname_value('SNAME=sample1:2,sample2:3'), 'sample1:2,sample2:3')
self.assertIsNone(Bedpe.sname_value('AF'))
self.assertIsNone(Bedpe.sname_value('SNAME='))
def test__combine_sname_values(self):
self.assertEqual(set(Bedpe._combine_sname_values('sample1:2', 'sample2:4,sample3:5').split(',')), set(['sample1:2', 'sample2:4', 'sample3:5']))
self.assertEqual(Bedpe._combine_sname_values(None, 'sample2:4,sample3:5'), 'sample2:4,sample3:5')
self.assertEqual(Bedpe._combine_sname_values('sample2:4,sample3:5', None), 'sample2:4,sample3:5')
def test__update_sname_field(self):
expected = set(['sample2:4', 'sample3:12'])
result = Bedpe._update_sname_field('SNAME=sample2:4', 'SNAME=sample3:12')
tag_name, values = result.split('=')
self.assertEqual(tag_name, 'SNAME')
result_set = set(values.split(','))
self.assertEqual(result_set, expected)
if __name__ == "__main__":
main()
| mit |
craigds/mapnik2 | scons/scons-local-1.2.0/SCons/Platform/win32.py | 12 | 12279 | """SCons.Platform.win32
Platform-specific initialization for Win32 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/win32.py 3842 2008/12/20 22:59:52 scons"
import os
import os.path
import string
import sys
import tempfile
from SCons.Platform.posix import exitvalmap
from SCons.Platform import TempFileMunge
import SCons.Util
try:
import msvcrt
import win32api
import win32con
msvcrt.get_osfhandle
win32api.SetHandleInformation
win32con.HANDLE_FLAG_INHERIT
except ImportError:
parallel_msg = \
"you do not seem to have the pywin32 extensions installed;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
except AttributeError:
parallel_msg = \
"your pywin32 extensions do not support file handle operations;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
else:
parallel_msg = None
import __builtin__
_builtin_file = __builtin__.file
_builtin_open = __builtin__.open
def _scons_file(*args, **kw):
fp = apply(_builtin_file, args, kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()),
win32con.HANDLE_FLAG_INHERIT,
0)
return fp
def _scons_open(*args, **kw):
fp = apply(_builtin_open, args, kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()),
win32con.HANDLE_FLAG_INHERIT,
0)
return fp
__builtin__.file = _scons_file
__builtin__.open = _scons_open
# The upshot of all this is that, if you are using Python 1.5.2,
# you had better have cmd or command.com in your PATH when you run
# scons.
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
# There is no direct way to do that in python. What we do
# here should work for most cases:
# In case stdout (stderr) is not redirected to a file,
# we redirect it into a temporary file tmpFileStdout
# (tmpFileStderr) and copy the contents of this file
# to stdout (stderr) given in the argument
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
else:
# one temporary file for stdout and stderr
tmpFileStdout = os.path.normpath(tempfile.mktemp())
tmpFileStderr = os.path.normpath(tempfile.mktemp())
# check if output is redirected
stdoutRedirected = 0
stderrRedirected = 0
for arg in args:
# are there more possibilities to redirect stdout ?
if (string.find( arg, ">", 0, 1 ) != -1 or
string.find( arg, "1>", 0, 2 ) != -1):
stdoutRedirected = 1
# are there more possibilities to redirect stderr ?
if string.find( arg, "2>", 0, 2 ) != -1:
stderrRedirected = 1
# redirect output of non-redirected streams to our tempfiles
if stdoutRedirected == 0:
args.append(">" + str(tmpFileStdout))
if stderrRedirected == 0:
args.append("2>" + str(tmpFileStderr))
# actually do the spawn
try:
args = [sh, '/C', escape(string.join(args)) ]
ret = os.spawnve(os.P_WAIT, sh, args, env)
except OSError, e:
# catch any error
try:
ret = exitvalmap[e[0]]
except KeyError:
sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1]))
if stderr != None:
stderr.write("scons: %s: %s\n" % (cmd, e[1]))
# copy child output from tempfiles to our streams
# and do clean up stuff
if stdout != None and stdoutRedirected == 0:
try:
stdout.write(open( tmpFileStdout, "r" ).read())
os.remove( tmpFileStdout )
except (IOError, OSError):
pass
if stderr != None and stderrRedirected == 0:
try:
stderr.write(open( tmpFileStderr, "r" ).read())
os.remove( tmpFileStderr )
except (IOError, OSError):
pass
return ret
def exec_spawn(l, env):
try:
result = os.spawnve(os.P_WAIT, l[0], l, env)
except OSError, e:
try:
result = exitvalmap[e[0]]
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
except KeyError:
result = 127
if len(l) > 2:
if len(l[2]) < 1000:
command = string.join(l[0:3])
else:
command = l[0]
else:
command = l[0]
sys.stderr.write("scons: unknown OSError exception code %d - '%s': %s\n" % (e[0], command, e[1]))
return result
def spawn(sh, escape, cmd, args, env):
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
return exec_spawn([sh, '/C', escape(string.join(args))], env)
# Windows does not allow special characters in file names anyway, so no
# need for a complex escape function, we will just quote the arg, except
# that "cmd /c" requires that if an argument ends with a backslash it
# needs to be escaped so as not to interfere with closing double quote
# that we add.
def escape(x):
if x[-1] == '\\':
x = x + '\\'
return '"' + x + '"'
# Get the windows system directory name
def get_system_root():
# A resonable default if we can't read the registry
try:
val = os.environ['SYSTEMROOT']
except KeyError:
val = "C:/WINDOWS"
pass
# First see if we can look in the registry...
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except KeyboardInterrupt:
raise
except:
pass
return val
# Get the location of the program files directory
def get_program_files_dir():
# Now see if we can look in the registry...
val = ''
if SCons.Util.can_read_reg:
try:
# Look for Windows Program Files directory
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'ProgramFilesDir')
except SCons.Util.RegError:
val = ''
pass
if val == '':
# A reasonable default if we can't read the registry
# (Actually, it's pretty reasonable even if we can :-)
val = os.path.join(os.path.dirname(get_system_root()),"Program Files")
return val
def generate(env):
# Attempt to find cmd.exe (for WinNT/2k/XP) or
# command.com for Win9x
cmd_interp = ''
# First see if we can look in the registry...
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'System32\\cmd.exe')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'command.com')
except KeyboardInterrupt:
raise
except:
pass
# For the special case of not having access to the registry, we
# use a temporary path and pathext to attempt to find the command
# interpreter. If we fail, we try to find the interpreter through
# the env's PATH. The problem with that is that it might not
# contain an ENV and a PATH.
if not cmd_interp:
systemroot = r'C:\Windows'
if os.environ.has_key('SYSTEMROOT'):
systemroot = os.environ['SYSTEMROOT']
tmp_path = systemroot + os.pathsep + \
os.path.join(systemroot,'System32')
tmp_pathext = '.com;.exe;.bat;.cmd'
if os.environ.has_key('PATHEXT'):
tmp_pathext = os.environ['PATHEXT']
cmd_interp = SCons.Util.WhereIs('cmd', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = SCons.Util.WhereIs('command', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = env.Detect('cmd')
if not cmd_interp:
cmd_interp = env.Detect('command')
if not env.has_key('ENV'):
env['ENV'] = {}
# Import things from the external environment to the construction
# environment's ENV. This is a potential slippery slope, because we
# *don't* want to make builds dependent on the user's environment by
# default. We're doing this for SYSTEMROOT, though, because it's
# needed for anything that uses sockets, and seldom changes, and
# for SYSTEMDRIVE because it's related.
#
# Weigh the impact carefully before adding other variables to this list.
import_env = [ 'SYSTEMDRIVE', 'SYSTEMROOT', 'TEMP', 'TMP' ]
for var in import_env:
v = os.environ.get(var)
if v:
env['ENV'][var] = v
env['ENV']['PATHEXT'] = '.COM;.EXE;.BAT;.CMD'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX' ]
env['PSPAWN'] = piped_spawn
env['SPAWN'] = spawn
env['SHELL'] = cmd_interp
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
env['MAXLINELENGTH'] = 2048
env['ESCAPE'] = escape
| lgpl-2.1 |
husigeza/pycom-micropython-sigfox | esp32/tools/lopy_initial_test_board_script.py | 2 | 1475 | #
# Copyright (c) 2016, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
import time
import machine
import pycom
from network import WLAN
from machine import Pin
wlan = WLAN(mode=WLAN.STA)
wifi_passed = False
lora_passed = False
red_led = Pin('P10', mode=Pin.OUT, value=0)
green_led = Pin('P11', mode=Pin.OUT, value=0)
time.sleep(1.0)
def test_wifi():
global wifi_passed
nets = wlan.scan()
for net in nets:
if net.ssid == 'pycom-test-ap':
wifi_passed = True
break
test_wifi()
if not wifi_passed: # try twice
time.sleep(1.0)
test_wifi()
from network import LoRa
import socket
lora = LoRa(mode=LoRa.LORA, public=False)
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
s.setblocking(False)
time.sleep(0.5)
def test_lora(ls):
import time
global lora_passed
for i in range(5):
if ls.recv(16) == b'Pycom':
lora_passed = True
break
time.sleep(1.5)
test_lora(s)
if wifi_passed and lora_passed:
pycom.heartbeat(False)
pycom.rgbled(0x008000) # green
green_led(1)
print('Test OK')
else:
pycom.heartbeat(False)
pycom.rgbled(0x800000) # red
red_led(1)
print('Test failed')
time.sleep(0.5)
machine.reset()
| mit |
ntiufalara/openerp7 | openerp/addons/account_bank_statement_extensions/report/__init__.py | 415 | 1128 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mit |
shelsonjava/TeaJS | deps/v8/tools/testrunner/local/commands.py | 65 | 5069 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import signal
import subprocess
import sys
import tempfile
import time
from ..local import utils
from ..objects import output
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = \
ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
except ImportError:
pass
return prev_error_mode
def RunProcess(verbose, timeout, args, **rest):
if verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = subprocess.list2cmdline(args)
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
shell=utils.IsWindows(),
args=popen_args,
**rest
)
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (exit_code, timed_out)
def PrintError(string):
sys.stderr.write(string)
sys.stderr.write("\n")
def CheckedUnlink(name):
# On Windows, when run with -jN in parallel processes,
# OS often fails to unlink the temp file. Not sure why.
# Need to retry.
# Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
retry_count = 0
while retry_count < 30:
try:
os.unlink(name)
return
except OSError, e:
retry_count += 1
time.sleep(retry_count * 0.1)
PrintError("os.unlink() " + str(e))
def Execute(args, verbose=False, timeout=None):
try:
args = [ c for c in args if c != "" ]
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(exit_code, timed_out) = RunProcess(
verbose,
timeout,
args=args,
stdout=fd_out,
stderr=fd_err
)
finally:
# TODO(machenbach): A keyboard interrupt before the assignment to
# fd_out|err can lead to reference errors here.
os.close(fd_out)
os.close(fd_err)
out = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return output.Output(exit_code, timed_out, out, errors)
| bsd-3-clause |
mentionllc/google-visualization-python | examples/dynamic_example.py | 13 | 1525 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of dynamic use of Google Visualization Python API."""
__author__ = "Misha Seltzer"
import gviz_api
description = {"name": ("string", "Name"),
"salary": ("number", "Salary"),
"full_time": ("boolean", "Full Time Employee")}
data = [{"name": "Mike", "salary": (10000, "$10,000"), "full_time": True},
{"name": "Jim", "salary": (800, "$800"), "full_time": False},
{"name": "Alice", "salary": (12500, "$12,500"), "full_time": True},
{"name": "Bob", "salary": (7000, "$7,000"), "full_time": True}]
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
print "Content-type: text/plain"
print
print data_table.ToJSonResponse(columns_order=("name", "salary", "full_time"),
order_by="salary")
# Put the url (http://google-visualization.appspot.com/python/dynamic_example)
# as your Google Visualization data source.
| apache-2.0 |
resmo/ansible | lib/ansible/modules/cloud/amazon/iam_role_info.py | 12 | 8310 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_role_info
short_description: Gather information on IAM roles
description:
- Gathers information about IAM roles
- This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
requirements: [ boto3 ]
author:
- "Will Thames (@willthames)"
options:
name:
description:
- Name of a role to search for
- Mutually exclusive with C(prefix)
aliases:
- role_name
path_prefix:
description:
- Prefix of role I(path) to restrict IAM role search for
- Mutually exclusive with C(name)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# find all existing IAM roles
- iam_role_info:
register: result
# describe a single role
- iam_role_info:
name: MyIAMRole
# describe all roles matching a path prefix
- iam_role_info:
path_prefix: /application/path
'''
RETURN = '''
iam_roles:
description: List of IAM roles
returned: always
type: complex
contains:
arn:
description: Amazon Resource Name for IAM role
returned: always
type: str
sample: arn:aws:iam::123456789012:role/AnsibleTestRole
assume_role_policy_document:
description: Policy Document describing what can assume the role
returned: always
type: str
create_date:
description: Date IAM role was created
returned: always
type: str
sample: '2017-10-23T00:05:08+00:00'
inline_policies:
description: List of names of inline policies
returned: always
type: list
sample: []
managed_policies:
description: List of attached managed policies
returned: always
type: complex
contains:
policy_arn:
description: Amazon Resource Name for the policy
returned: always
type: str
sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy
policy_name:
description: Name of managed policy
returned: always
type: str
sample: AnsibleTestEC2Policy
instance_profiles:
description: List of attached instance profiles
returned: always
type: complex
contains:
arn:
description: Amazon Resource Name for the instance profile
returned: always
type: str
sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy
create_date:
description: Date instance profile was created
returned: always
type: str
sample: '2017-10-23T00:05:08+00:00'
instance_profile_id:
description: Amazon Identifier for the instance profile
returned: always
type: str
sample: AROAII7ABCD123456EFGH
instance_profile_name:
description: Name of instance profile
returned: always
type: str
sample: AnsibleTestEC2Policy
path:
description: Path of instance profile
returned: always
type: str
sample: /
roles:
description: List of roles associated with this instance profile
returned: always
type: list
sample: []
path:
description: Path of role
returned: always
type: str
sample: /
role_id:
description: Amazon Identifier for the role
returned: always
type: str
sample: AROAII7ABCD123456EFGH
role_name:
description: Name of the role
returned: always
type: str
sample: AnsibleTestRole
'''
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
@AWSRetry.exponential_backoff()
def list_iam_roles_with_backoff(client, **kwargs):
paginator = client.get_paginator('list_roles')
return paginator.paginate(**kwargs).build_full_result()
@AWSRetry.exponential_backoff()
def list_iam_role_policies_with_backoff(client, role_name):
paginator = client.get_paginator('list_role_policies')
return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames']
@AWSRetry.exponential_backoff()
def list_iam_attached_role_policies_with_backoff(client, role_name):
paginator = client.get_paginator('list_attached_role_policies')
return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies']
@AWSRetry.exponential_backoff()
def list_iam_instance_profiles_for_role_with_backoff(client, role_name):
paginator = client.get_paginator('list_instance_profiles_for_role')
return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles']
def describe_iam_role(module, client, role):
name = role['RoleName']
try:
role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name)
try:
role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name)
try:
role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name)
return role
def describe_iam_roles(module, client):
name = module.params['name']
path_prefix = module.params['path_prefix']
if name:
try:
roles = [client.get_role(RoleName=name)['Role']]
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return []
else:
module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
else:
params = dict()
if path_prefix:
if not path_prefix.startswith('/'):
path_prefix = '/' + path_prefix
if not path_prefix.endswith('/'):
path_prefix = path_prefix + '/'
params['PathPrefix'] = path_prefix
try:
roles = list_iam_roles_with_backoff(client, **params)['Roles']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't list IAM roles")
return [camel_dict_to_snake_dict(describe_iam_role(module, client, role)) for role in roles]
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(aliases=['role_name']),
path_prefix=dict(),
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['name', 'path_prefix']])
if module._name == 'iam_role_facts':
module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", version='2.13')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=ec2_url, **aws_connect_params)
module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client))
if __name__ == '__main__':
main()
| gpl-3.0 |
zangsir/ANNIS | Misc/pygments-main/pygments/formatters/rtf.py | 50 | 5049 | # -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_int_opt, _surrogatepair
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft(R) Word(R) documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
def __init__(self, **options):
r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace(u'\\', u'\\\\') \
.replace(u'{', u'\\{') \
.replace(u'}', u'\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return u''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append(u'{\\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
buf.append(u'{\\u%d}{\\u%d}' % _surrogatepair(cn))
return u''.join(buf).replace(u'\n', u'\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(u'{\\rtf1\\ansi\\uc0\\deff0'
u'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
u'{\\colortbl;' % (self.fontface and
u' ' + self._escape(self.fontface) or
u''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(u'\\red%d\\green%d\\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(u'}\\f0 ')
if self.fontsize:
outfile.write(u'\\fs%d' % (self.fontsize))
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(u'\\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(u'\\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(u'\\b')
if style['italic']:
buf.append(u'\\i')
if style['underline']:
buf.append(u'\\ul')
if style['border']:
buf.append(u'\\chbrdr\\chcfpat%d' %
color_mapping[style['border']])
start = u''.join(buf)
if start:
outfile.write(u'{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write(u'}')
outfile.write(u'}')
| apache-2.0 |
mraspaud/dask | dask/dataframe/tests/test_optimize_dataframe.py | 3 | 1663 | import pytest
from operator import getitem
from toolz import merge
import dask
from dask.dataframe.optimize import dataframe_from_ctable
import dask.dataframe as dd
import pandas as pd
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
dfs = list(dsk.values())
def test_column_optimizations_with_bcolz_and_rewrite():
bcolz = pytest.importorskip('bcolz')
bc = bcolz.ctable([[1, 2, 3], [10, 20, 30]], names=['a', 'b'])
for cols in [None, 'abc', ['abc']]:
dsk2 = merge(dict((('x', i),
(dataframe_from_ctable, bc, slice(0, 2), cols, {}))
for i in [1, 2, 3]),
dict((('y', i),
(getitem, ('x', i), ['a', 'b']))
for i in [1, 2, 3]))
expected = dict((('y', i), (dataframe_from_ctable,
bc, slice(0, 2), ['a', 'b'], {}))
for i in [1, 2, 3])
result = dd.optimize(dsk2, [('y', i) for i in [1, 2, 3]])
assert result == expected
def test_fuse_ave_width():
df = pd.DataFrame({'x': range(10)})
df = dd.from_pandas(df, npartitions=5)
s = ((df.x + 1) + (df.x + 2))
with dask.set_options(fuse_ave_width=4):
a = s._optimize(s.dask, s._keys())
b = s._optimize(s.dask, s._keys())
assert len(a) < len(b)
assert len(a) <= 15
| bsd-3-clause |
manaschaturvedi/oscarbuddy | beautifulsoup-master/bs4/tests/test_docs.py | 607 | 1067 | "Test harness for doctests."
# pylint: disable-msg=E0611,W0142
__metaclass__ = type
__all__ = [
'additional_tests',
]
import atexit
import doctest
import os
#from pkg_resources import (
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
import unittest
DOCTEST_FLAGS = (
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
# def additional_tests():
# "Run the doc tests (README.txt and docs/*, if any exist)"
# doctest_files = [
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
# if resource_exists('bs4', 'docs'):
# for name in resource_listdir('bs4', 'docs'):
# if name.endswith('.txt'):
# doctest_files.append(
# os.path.abspath(
# resource_filename('bs4', 'docs/%s' % name)))
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
# atexit.register(cleanup_resources)
# return unittest.TestSuite((
# doctest.DocFileSuite(*doctest_files, **kwargs)))
| mit |
soldag/home-assistant | homeassistant/components/netgear_lte/__init__.py | 16 | 11560 | """Support for Netgear LTE modems."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import attr
import eternalegypt
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from . import sensor_types
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISPATCHER_NETGEAR_LTE = "netgear_lte_update"
DOMAIN = "netgear_lte"
DATA_KEY = "netgear_lte"
EVENT_SMS = "netgear_lte_sms"
SERVICE_DELETE_SMS = "delete_sms"
SERVICE_SET_OPTION = "set_option"
SERVICE_CONNECT_LTE = "connect_lte"
SERVICE_DISCONNECT_LTE = "disconnect_lte"
ATTR_HOST = "host"
ATTR_SMS_ID = "sms_id"
ATTR_FROM = "from"
ATTR_MESSAGE = "message"
ATTR_FAILOVER = "failover"
ATTR_AUTOCONNECT = "autoconnect"
FAILOVER_MODES = ["auto", "wire", "mobile"]
AUTOCONNECT_MODES = ["never", "home", "always"]
NOTIFY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
vol.Optional(CONF_RECIPIENT, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_SENSORS)])
}
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_BINARY_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_BINARY_SENSORS)])
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN, default={}): vol.All(
cv.ensure_list, [NOTIFY_SCHEMA]
),
vol.Optional(SENSOR_DOMAIN, default={}): SENSOR_SCHEMA,
vol.Optional(
BINARY_SENSOR_DOMAIN, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
DELETE_SMS_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HOST): cv.string,
vol.Required(ATTR_SMS_ID): vol.All(cv.ensure_list, [cv.positive_int]),
}
)
SET_OPTION_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_FAILOVER, ATTR_AUTOCONNECT),
{
vol.Optional(ATTR_HOST): cv.string,
vol.Optional(ATTR_FAILOVER): vol.In(FAILOVER_MODES),
vol.Optional(ATTR_AUTOCONNECT): vol.In(AUTOCONNECT_MODES),
},
)
)
CONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
DISCONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
@attr.s
class ModemData:
"""Class for modem state."""
hass = attr.ib()
host = attr.ib()
modem = attr.ib()
data = attr.ib(init=False, default=None)
connected = attr.ib(init=False, default=True)
async def async_update(self):
"""Call the API to update the data."""
try:
self.data = await self.modem.information()
if not self.connected:
_LOGGER.warning("Connected to %s", self.host)
self.connected = True
except eternalegypt.Error:
if self.connected:
_LOGGER.warning("Lost connection to %s", self.host)
self.connected = False
self.data = None
async_dispatcher_send(self.hass, DISPATCHER_NETGEAR_LTE)
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get modem_data for the host in config."""
if config[CONF_HOST] is not None:
return self.modem_data.get(config[CONF_HOST])
if len(self.modem_data) != 1:
return None
return next(iter(self.modem_data.values()))
async def async_setup(hass, config):
"""Set up Netgear LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True)
)
hass.data[DATA_KEY] = LTEData(websession)
async def service_handler(service):
"""Apply a service."""
host = service.data.get(ATTR_HOST)
conf = {CONF_HOST: host}
modem_data = hass.data[DATA_KEY].get_modem_data(conf)
if not modem_data:
_LOGGER.error("%s: host %s unavailable", service.service, host)
return
if service.service == SERVICE_DELETE_SMS:
for sms_id in service.data[ATTR_SMS_ID]:
await modem_data.modem.delete_sms(sms_id)
elif service.service == SERVICE_SET_OPTION:
failover = service.data.get(ATTR_FAILOVER)
if failover:
await modem_data.modem.set_failover_mode(failover)
autoconnect = service.data.get(ATTR_AUTOCONNECT)
if autoconnect:
await modem_data.modem.set_autoconnect_mode(autoconnect)
elif service.service == SERVICE_CONNECT_LTE:
await modem_data.modem.connect_lte()
elif service.service == SERVICE_DISCONNECT_LTE:
await modem_data.modem.disconnect_lte()
service_schemas = {
SERVICE_DELETE_SMS: DELETE_SMS_SCHEMA,
SERVICE_SET_OPTION: SET_OPTION_SCHEMA,
SERVICE_CONNECT_LTE: CONNECT_LTE_SCHEMA,
SERVICE_DISCONNECT_LTE: DISCONNECT_LTE_SCHEMA,
}
for service, schema in service_schemas.items():
hass.services.async_register(
DOMAIN, service, service_handler, schema=schema
)
netgear_lte_config = config[DOMAIN]
# Set up each modem
tasks = [_setup_lte(hass, lte_conf) for lte_conf in netgear_lte_config]
await asyncio.wait(tasks)
# Load platforms for each modem
for lte_conf in netgear_lte_config:
# Notify
for notify_conf in lte_conf[NOTIFY_DOMAIN]:
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
CONF_NAME: notify_conf.get(CONF_NAME),
NOTIFY_DOMAIN: notify_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, NOTIFY_DOMAIN, DOMAIN, discovery_info, config
)
)
# Sensor
sensor_conf = lte_conf.get(SENSOR_DOMAIN)
discovery_info = {CONF_HOST: lte_conf[CONF_HOST], SENSOR_DOMAIN: sensor_conf}
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
# Binary Sensor
binary_sensor_conf = lte_conf.get(BINARY_SENSOR_DOMAIN)
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
BINARY_SENSOR_DOMAIN: binary_sensor_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, BINARY_SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
return True
async def _setup_lte(hass, lte_config):
"""Set up a Netgear LTE modem."""
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = eternalegypt.Modem(hostname=host, websession=websession)
modem_data = ModemData(hass, host, modem)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
def fire_sms_event(sms):
"""Send an SMS event."""
data = {
ATTR_HOST: modem_data.host,
ATTR_SMS_ID: sms.id,
ATTR_FROM: sms.sender,
ATTR_MESSAGE: sms.message,
}
hass.bus.async_fire(EVENT_SMS, data)
await modem_data.modem.add_sms_listener(fire_sms_event)
await modem_data.async_update()
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def _update(now):
"""Periodic update."""
await modem_data.async_update()
update_unsub = async_track_time_interval(hass, _update, SCAN_INTERVAL)
async def cleanup(event):
"""Clean up resources."""
update_unsub()
await modem_data.modem.logout()
del hass.data[DATA_KEY].modem_data[modem_data.host]
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
_LOGGER.warning("Could not connect to %s. Will keep trying", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
delay = min(2 * delay, 300)
@attr.s
class LTEEntity(Entity):
"""Base LTE entity."""
modem_data = attr.ib()
sensor_type = attr.ib()
_unique_id = attr.ib(init=False)
@_unique_id.default
def _init_unique_id(self):
"""Register unique_id while we know data is valid."""
return f"{self.sensor_type}_{self.modem_data.data.serial_number}"
async def async_added_to_hass(self):
"""Register callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCHER_NETGEAR_LTE, self.async_write_ha_state
)
)
async def async_update(self):
"""Force update of state."""
await self.modem_data.async_update()
@property
def should_poll(self):
"""Return that the sensor should not be polled."""
return False
@property
def available(self):
"""Return the availability of the sensor."""
return self.modem_data.data is not None
@property
def unique_id(self):
"""Return a unique ID like 'usage_5TG365AB0078V'."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"Netgear LTE {self.sensor_type}"
| apache-2.0 |
vitan/hue | desktop/libs/liboozie/src/liboozie/credentials_tests.py | 33 | 2933 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nose.tools import assert_equal, assert_true
import beeswax.conf
from liboozie.credentials import Credentials
LOG = logging.getLogger(__name__)
class TestCredentials():
CREDENTIALS = {
"hcat": "org.apache.oozie.action.hadoop.HCatCredentials",
"hive2": "org.apache.oozie.action.hadoop.Hive2Credentials",
"hbase": "org.apache.oozie.action.hadoop.HbaseCredentials"
}
def test_parse_oozie(self):
oozie_credentialclasses = """
hbase=org.apache.oozie.action.hadoop.HbaseCredentials,
hcat=org.apache.oozie.action.hadoop.HCatCredentials,
hive2=org.apache.oozie.action.hadoop.Hive2Credentials
"""
oozie_config = {'oozie.credentials.credentialclasses': oozie_credentialclasses}
creds = Credentials()
assert_equal({
'hive2': 'org.apache.oozie.action.hadoop.Hive2Credentials',
'hbase': 'org.apache.oozie.action.hadoop.HbaseCredentials',
'hcat': 'org.apache.oozie.action.hadoop.HCatCredentials'
}, creds._parse_oozie(oozie_config)
)
def test_gen_properties(self):
creds = Credentials(credentials=TestCredentials.CREDENTIALS.copy())
hive_properties = {
'thrift_uri': 'thrift://hue-koh-chang:9999',
'kerberos_principal': 'hive',
'hive2.server.principal': 'hive',
}
finish = (
beeswax.conf.HIVE_SERVER_HOST.set_for_testing('hue-koh-chang'),
beeswax.conf.HIVE_SERVER_PORT.set_for_testing(12345),
)
try:
assert_equal({
'hcat': {
'xml_name': 'hcat',
'properties': [
('hcat.metastore.uri', 'thrift://hue-koh-chang:9999'),
('hcat.metastore.principal', 'hive')
]},
'hive2': {
'xml_name': 'hive2',
'properties': [
('hive2.jdbc.url', 'jdbc:hive2://hue-koh-chang:12345/default'),
('hive2.server.principal', 'hive')
]},
'hbase': {
'xml_name': 'hbase',
'properties': []
}
}, creds.get_properties(hive_properties))
finally:
for f in finish:
f()
| apache-2.0 |
fourwood/OutflowCone | Cone.py | 1 | 9228 | #!/usr/bin/env python3
import OutflowCone as oc
import numpy as np
import numpy.ma as ma
class Cone:
""" Galactic wind outflow cone model.
"""
def __init__(self, inc=0, PA=0, theta=60, r_in=0.0, r_out=5.0):
""" Create a new outflow cone.
Keywords:
inc -- The inclination of the cone with respect to the
line-of-sight.
PA -- The position angle of the cone with respect to
the line-of-sight.
theta -- The opening half-angle of the cone (degrees).
r_in -- The inner radius of the cone (kpc).
r_out -- The outer radius of the cone (kpc).
"""
self.inc = inc
self.PA = PA
self.theta = theta
self.r_in = r_in
self.r_out = r_out
self.positions = None
def GenerateClouds(self, n_clouds, bicone=False, falloff=1,
zero_z=False, flatten=False):
""" Generate 'n' model clouds within the cone bounds.
Arguments:
n_clouds--
Keywords:
bicone -- Create a single cone or a bi-cone. Default is False.
falloff -- Radial density distribution exponent. Default is 1 for
a mass-conserving outflow (density goes as r^-2).
A value of 1/3 creates a constant-density profile.
zero_z -- r_in makes the z-height of the base of the cone non-zero.
Should the clouds all get translated down? (e.g. z -= r_in)
flatten -- Keep the inner radius spherical? Or flatten it?
Returns:
None. Creates "positions" member variable, containing Cartesian
position vectors for 'n' clouds, and "velocities" member variable,
containing zero-velocity vectors for all clouds (ostensibly these
are Cartesian as well).
"""
self._n_clouds = n_clouds
# Even spread in cos(theta) to avoid clustering at poles.
theta_rad = np.radians(self.theta)
if bicone:
vs1 = np.random.random(self._n_clouds/2.) * \
(1-np.cos(theta_rad)) + np.cos(theta_rad)
vs2 = -(np.random.random(self._n_clouds/2.) * \
(1-np.cos(theta_rad)) + np.cos(theta_rad))
vs = np.concatenate((vs1, vs2))
else:
vs = np.random.random(self._n_clouds) * \
(1-np.cos(theta_rad)) + np.cos(theta_rad)
thetas = np.arccos(vs)
us = np.random.random(self._n_clouds)
phis = us * np.pi * 2.0
#falloff = 1 # 1/3 ~ constant density, 1/2 ~ 1/r fall-off, 1 ~ r^-2 fall-off
rs = np.random.random(self._n_clouds)**falloff * \
(self.r_out - self.r_in) + self.r_in
# Still need to rotate them!
self._sph_pts = np.vstack((rs, thetas, phis)).transpose()
# Convert to Cartesian so we can rotate things around.
self._cart_pts = oc.SphPosToCart(self._sph_pts, radians=True)
if flatten:
self._cart_pts[:,2] -= self.r_in * \
(self._local_zs / self._local_rs)
if not zero_z:
self._cart_pts[:,2] += self.r_in
elif zero_z:
self._cart_pts[:,2] -= self.r_in * np.cos(theta_rad)
# Coord system will be:
# -Z-axis is LOS, making X go right and Y go up (+Z out of the page)
# Rot in -X for inc (or rotate -inc in +X) and in Z for PA
self.positions = oc.Rot(self._cart_pts, x=-self.inc, z=self.PA)
self.velocities = np.zeros_like(self.positions)
def SetLocalVels(self, vels, radians=True):#, coordinates='spherical'):
""" Sets the velocities of the clouds in the galaxy/cone's frame,
meaning the input velocities should be in r/theta/phi of the galaxy,
and *without* inclination/PA taken into account. This function
applies the inclination effects automatically.
Arguments:
vels -- List/array of velocities to apply to the clouds.
Given velocities should be in (r, theta, phi) format.
List/array length must be equal to --- and assumed to
be in the same order as --- that of self.positions.
Keywords:
radians -- Are the passed theta/phi values in radians?
Returns:
None. self.velocities is set to the equivalent Cartesian
velocities with inclination accounted for.
"""
self._sph_vels = vels
cart_vels = np.zeros_like(self._sph_pts)
cart_vels = oc.SphVecToCart(self._sph_pts, vels, radians=radians)
#for i, pt in enumerate(self._sph_pts):
# cart_vels[i] = oc.SphVecToCart(pt, vels[i], radians=radians)
cart_vels_rot = oc.Rot(cart_vels, x=-self.inc)
self.velocities = cart_vels_rot
def ProjectVels(self, LOS):
""" Projects the 3D Cartesian velocities into the given line-of-sight.
NOTE: UNTESTED! So far I do a lot of assuming that -z is the LOS,
but maybe this still works okay.
NOTE2: If you just want it projected into a Cartesian axis,
use self._vxs, ._vys, and ._vzs to avoid all the dot products.
Arguments:
LOS -- 3-element NumPy array representing the LOS vector.
NOTE: UNTESTED! So far I do a lot of assuming that
-z is the LOS, but this still probably works okay.
Returns:
NumPy array of length == len(self.velocities), where each value
is the velocity vector dotted into the given LOS.
"""
return np.asarray([v.dot(LOS) for v in self.velocities])
def GetLOSCloudVels(self, coord, dx, dy=None):
""" Returns an array of all the projected velocities along a line of sight.
Arguments:
coord -- x/y (~RA/dec) coordinate pair, in projected kpc,
of the requested line-of-sight.
dx -- Full width of the x-direction spatial bin, in kpc.
Keywords:
dy -- Optional. Full width of hte y-direction spatial bin,
in kpc. If None/omitted, then dy = dx is assumed.
Returns:
NumPy masked array containing all LOS velocities, with velocities for
clouds located inside the requested LOS bin *not* masked (i.e. set
to *False* in return.mask).
"""
dy = dx if dy is None else dy
x, y = coord
x_mask = (self._xs > x) & (self._xs < x+dx)
y_mask = (self._ys > y) & (self._ys < y+dy)
mask = x_mask & y_mask
return ma.array(self.LOS_vs, mask=~mask)
# Properties for nicely slicing the 2D array of positions and velocities
# into lists for each coordinate.
# TODO: Property for spherical-coord velocities.
@property
def _local_rs(self):
return self._sph_pts[:,0]
@property
def _local_thetas(self):
return self._sph_pts[:,1]
@property
def _local_phis(self):
return self._sph_pts[:,2]
@property
def _xs(self):
return self.positions[:,0]
@property
def _ys(self):
return self.positions[:,1]
@property
def _zs(self):
return self.positions[:,2]
@property
def _local_xs(self):
return self._cart_pts[:,0]
@property
def _local_ys(self):
return self._cart_pts[:,1]
@property
def _local_zs(self):
return self._cart_pts[:,2]
@property
def _vxs(self):
return self.velocities[:,0]
@_vxs.setter
def _vxs(self, vxs):
if np.shape(vxs) == np.shape(self._vxs):
self.velocities[:,0] = vxs
else:
raise Exception("Array is not the same length as self._vxs.")
@property
def _vys(self):
return self.velocities[:,1]
@_vys.setter
def _vys(self, vys):
if np.shape(vys) == np.shape(self._vys):
self.velocities[:,1] = vys
else:
raise Exception("Array is not the same length as self._vys.")
@property
def _vzs(self):
return self.velocities[:,2]
@_vzs.setter
def _vzs(self, vzs):
if np.shape(vzs) == np.shape(self._vzs):
self.velocities[:,2] = vzs
else:
raise Exception("Array is not the same length as self._vzs.")
@property
def LOS_vs(self):
""" Returns the line-of-sight velocities of all clouds.
NOTE: Line-of-sight is assumed to be along the -z axis.
"""
return -self._vzs
@property
def _local_vrs(self):
return self._sph_vels[:,0]
@property
def _local_vthetas(self):
return self._sph_vels[:,1]
@property
def _local_vphis(self):
return self._sph_vels[:,2]
if __name__ == "__main__":
pass
| mit |
siliconsmiley/QGIS | python/plugins/processing/algs/qgis/ExtentFromLayer.py | 2 | 5365 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentFromLayer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsField, QgsPoint, QgsGeometry, QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class ExtentFromLayer(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
BY_FEATURE = 'BY_FEATURE'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Polygon from layer extent')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterBoolean(self.BY_FEATURE,
self.tr('Calculate extent for each feature separately'), False))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Extent')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT_LAYER))
byFeature = self.getParameterValue(self.BY_FEATURE)
fields = [
QgsField('MINX', QVariant.Double),
QgsField('MINY', QVariant.Double),
QgsField('MAXX', QVariant.Double),
QgsField('MAXY', QVariant.Double),
QgsField('CNTX', QVariant.Double),
QgsField('CNTY', QVariant.Double),
QgsField('AREA', QVariant.Double),
QgsField('PERIM', QVariant.Double),
QgsField('HEIGHT', QVariant.Double),
QgsField('WIDTH', QVariant.Double),
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
QGis.WKBPolygon, layer.crs())
if byFeature:
self.featureExtent(layer, writer, progress)
else:
self.layerExtent(layer, writer, progress)
del writer
def layerExtent(self, layer, writer, progress):
rect = layer.extent()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + width / 2.0
cnty = miny + height / 2.0
area = width * height
perim = 2 * width + 2 * height
rect = [QgsPoint(minx, miny), QgsPoint(minx, maxy), QgsPoint(maxx,
maxy), QgsPoint(maxx, miny), QgsPoint(minx, miny)]
geometry = QgsGeometry().fromPolygon([rect])
feat = QgsFeature()
feat.setGeometry(geometry)
attrs = [
minx,
miny,
maxx,
maxy,
cntx,
cnty,
area,
perim,
height,
width,
]
feat.setAttributes(attrs)
writer.addFeature(feat)
def featureExtent(self, layer, writer, progress):
current = 0
features = vector.features(layer)
total = 100.0 / float(len(features))
feat = QgsFeature()
for f in features:
rect = f.geometry().boundingBox()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + width / 2.0
cnty = miny + height / 2.0
area = width * height
perim = 2 * width + 2 * height
rect = [QgsPoint(minx, miny), QgsPoint(minx, maxy), QgsPoint(maxx,
maxy), QgsPoint(maxx, miny), QgsPoint(minx, miny)]
geometry = QgsGeometry().fromPolygon([rect])
feat.setGeometry(geometry)
attrs = [
minx,
miny,
maxx,
maxy,
cntx,
cnty,
area,
perim,
height,
width,
]
feat.setAttributes(attrs)
writer.addFeature(feat)
current += 1
progress.setPercentage(int(current * total))
| gpl-2.0 |
ntuecon/server | pyenv/Lib/site-packages/win32com/test/testDCOM.py | 4 | 1701 | # testDCOM
usage="""\
testDCOM.py - Simple DCOM test
Usage: testDCOM.py serverName
Attempts to start the Python.Interpreter object on the named machine,
and checks that the object is indeed running remotely.
Requires the named server be configured to run DCOM (using dcomcnfg.exe),
and the Python.Interpreter object installed and registered on that machine.
The Python.Interpreter object must be installed on the local machine,
but no special DCOM configuration should be necessary.
"""
# NOTE: If you configured the object locally using dcomcnfg, you could
# simple use Dispatch rather than DispatchEx.
import pythoncom, win32com.client, win32api, string, sys
def test(serverName):
if string.lower(serverName)==string.lower(win32api.GetComputerName()):
print "You must specify a remote server name, not the local machine!"
return
# Hack to overcome a DCOM limitation. As the Python.Interpreter object
# is probably installed locally as an InProc object, DCOM seems to ignore
# all settings, and use the local object.
clsctx = pythoncom.CLSCTX_SERVER & ~pythoncom.CLSCTX_INPROC_SERVER
ob = win32com.client.DispatchEx("Python.Interpreter", serverName, clsctx=clsctx)
ob.Exec("import win32api")
actualName = ob.Eval("win32api.GetComputerName()")
if string.lower(serverName) != string.lower(actualName):
print "Error: The object created on server '%s' reported its name as '%s'" % (serverName, actualName)
else:
print "Object created and tested OK on server '%s'" % serverName
if __name__=='__main__':
if len(sys.argv) == 2:
test(sys.argv[1])
else:
print usage
| bsd-3-clause |
sameetb-cuelogic/edx-platform-test | cms/djangoapps/contentstore/views/user.py | 9 | 7393 | from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from util.json_request import JsonResponse, expect_json
from student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole
from course_creators.views import user_requested_access
from student.auth import STUDIO_EDIT_ROLES, STUDIO_VIEW_USERS, get_user_permissions
from student.models import CourseEnrollment
from django.http import HttpResponseNotFound
from student import auth
__all__ = ['request_course_creator', 'course_team_handler']
@require_POST
@login_required
def request_course_creator(request):
"""
User has requested course creation access.
"""
user_requested_access(request.user)
return JsonResponse({"Status": "OK"})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def course_team_handler(request, course_key_string=None, email=None):
"""
The restful handler for course team users.
GET
html: return html page for managing course team
json: return json representation of a particular course team member (email is required).
POST or PUT
json: modify the permissions for a particular course team member (email is required, as well as role in the payload).
DELETE:
json: remove a particular course team member from the course team (email is required).
"""
course_key = CourseKey.from_string(course_key_string) if course_key_string else None
# No permissions check here - each helper method does its own check.
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
return _course_team_user(request, course_key, email)
elif request.method == 'GET': # assume html
return _manage_users(request, course_key)
else:
return HttpResponseNotFound()
def _manage_users(request, course_key):
"""
This view will return all CMS users who are editors for the specified course
"""
# check that logged in user has permissions to this item
user_perms = get_user_permissions(request.user, course_key)
if not user_perms & STUDIO_VIEW_USERS:
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
instructors = CourseInstructorRole(course_key).users_with_role()
# the page only lists staff and assumes they're a superset of instructors. Do a union to ensure.
staff = set(CourseStaffRole(course_key).users_with_role()).union(instructors)
return render_to_response('manage_users.html', {
'context_course': course_module,
'staff': staff,
'instructors': instructors,
'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES),
})
@expect_json
def _course_team_user(request, course_key, email):
"""
Handle the add, remove, promote, demote requests ensuring the requester has authority
"""
# check that logged in user has permissions to this item
requester_perms = get_user_permissions(request.user, course_key)
permissions_error_response = JsonResponse({"error": _("Insufficient permissions")}, 403)
if (requester_perms & STUDIO_VIEW_USERS) or (email == request.user.email):
# This user has permissions to at least view the list of users or is editing themself
pass
else:
# This user is not even allowed to know who the authorized users are.
return permissions_error_response
try:
user = User.objects.get(email=email)
except Exception:
msg = {
"error": _("Could not find user by email address '{email}'.").format(email=email),
}
return JsonResponse(msg, 404)
is_library = isinstance(course_key, LibraryLocator)
# Ordered list of roles: can always move self to the right, but need STUDIO_EDIT_ROLES to move any user left
if is_library:
role_hierarchy = (CourseInstructorRole, CourseStaffRole, LibraryUserRole)
else:
role_hierarchy = (CourseInstructorRole, CourseStaffRole)
if request.method == "GET":
# just return info about the user
msg = {
"email": user.email,
"active": user.is_active,
"role": None,
}
# what's the highest role that this user has? (How should this report global staff?)
for role in role_hierarchy:
if role(course_key).has_user(user):
msg["role"] = role.ROLE
break
return JsonResponse(msg)
# All of the following code is for editing/promoting/deleting users.
# Check that the user has STUDIO_EDIT_ROLES permission or is editing themselves:
if not ((requester_perms & STUDIO_EDIT_ROLES) or (user.id == request.user.id)):
return permissions_error_response
# can't modify an inactive user
if not user.is_active:
msg = {
"error": _('User {email} has registered but has not yet activated his/her account.').format(email=email),
}
return JsonResponse(msg, 400)
if request.method == "DELETE":
new_role = None
else:
# only other operation supported is to promote/demote a user by changing their role:
# role may be None or "" (equivalent to a DELETE request) but must be set.
# Check that the new role was specified:
if "role" in request.json or "role" in request.POST:
new_role = request.json.get("role", request.POST.get("role"))
else:
return JsonResponse({"error": _("No `role` specified.")}, 400)
old_roles = set()
role_added = False
for role_type in role_hierarchy:
role = role_type(course_key)
if role_type.ROLE == new_role:
if (requester_perms & STUDIO_EDIT_ROLES) or (user.id == request.user.id and old_roles):
# User has STUDIO_EDIT_ROLES permission or
# is currently a member of a higher role, and is thus demoting themself
auth.add_users(request.user, role, user)
role_added = True
else:
return permissions_error_response
elif role.has_user(user):
# Remove the user from this old role:
old_roles.add(role)
if new_role and not role_added:
return JsonResponse({"error": _("Invalid `role` specified.")}, 400)
for role in old_roles:
if isinstance(role, CourseInstructorRole) and role.users_with_role().count() == 1:
msg = {"error": _("You may not remove the last Admin. Add another Admin first.")}
return JsonResponse(msg, 400)
auth.remove_users(request.user, role, user)
if new_role and not is_library:
# The user may be newly added to this course.
# auto-enroll the user in the course so that "View Live" will work.
CourseEnrollment.enroll(user, course_key)
return JsonResponse()
| agpl-3.0 |
chaowyc/youtube-dl | youtube_dl/extractor/footyroom.py | 104 | 1590 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class FootyRoomIE(InfoExtractor):
_VALID_URL = r'http://footyroom\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://footyroom.com/schalke-04-0-2-real-madrid-2015-02/',
'info_dict': {
'id': 'schalke-04-0-2-real-madrid-2015-02',
'title': 'Schalke 04 0 – 2 Real Madrid',
},
'playlist_count': 3,
}, {
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
'info_dict': {
'id': 'georgia-0-2-germany-2015-03',
'title': 'Georgia 0 – 2 Germany',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._parse_json(
self._search_regex(
r'VideoSelector\.load\((\[.+?\])\);', webpage, 'video selector'),
playlist_id)
playlist_title = self._og_search_title(webpage)
entries = []
for video in playlist:
payload = video.get('payload')
if not payload:
continue
playwire_url = self._search_regex(
r'data-config="([^"]+)"', payload,
'playwire url', default=None)
if playwire_url:
entries.append(self.url_result(self._proto_relative_url(
playwire_url, 'http:'), 'Playwire'))
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense |
washort/zamboni | mkt/zadmin/urls.py | 11 | 1837 | from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from . import views
# Hijack the admin's login to use our pages.
def login(request):
# If someone is already auth'd then they're getting directed to login()
# because they don't have sufficient permissions.
if request.user.is_authenticated():
raise PermissionDenied
else:
return redirect('%s?to=%s' % (reverse('users.login'), request.path))
admin.site.login = login
urlpatterns = patterns(
'',
# AMO stuff.
url('^$', views.index, name='zadmin.index'),
url('^models$', lambda r: redirect('admin:index'), name='zadmin.home'),
url('^env$', views.env, name='mkt.env'),
url('^memcache$', views.memcache, name='zadmin.memcache'),
url('^settings', views.show_settings, name='zadmin.settings'),
url(r'^email_preview/(?P<topic>.*)\.csv$',
views.email_preview_csv, name='zadmin.email_preview_csv'),
url('^mail$', views.mail, name='zadmin.mail'),
url('^email-devs$', views.email_devs, name='zadmin.email_devs'),
url('^generate-error$', views.generate_error,
name='zadmin.generate-error'),
url('^export_email_addresses$', views.export_email_addresses,
name='zadmin.export_email_addresses'),
url('^email_addresses_file$', views.email_addresses_file,
name='zadmin.email_addresses_file'),
url('^price-tiers$', views.price_tiers, name='zadmin.price_tiers'),
# The Django admin.
url('^models/', include(admin.site.urls)),
url('^elastic$', views.elastic, name='zadmin.elastic'),
url('^manifest-revalidation$', views.manifest_revalidation,
name='zadmin.manifest_revalidation'),
)
| bsd-3-clause |
DataViva/dataviva-scripts | scripts/crosswalk/format_raw_data.py | 1 | 1434 | # -*- coding: utf-8 -*-
import os, sys, time, bz2, click
import pandas as pd
import pandas.io.sql as sql
import numpy as np
import itertools
@click.command()
@click.argument('file_path', type=click.Path(exists=True))
@click.option('output_path', '--output', '-o', help='Path to save files to.', type=click.Path(), required=True, prompt="Output path")
def main(file_path, output_path):
nestings = []
fieldA = "hs"
fieldB = "cnae"
df = pd.read_csv(file_path, converters={fieldA: str, fieldB: str})
df = df[ (df[fieldA].str.len() > 0) & (df[fieldB].str.len() >0)]
df = df[[fieldA, fieldB]]
if fieldA == "hs":
df.hs = df.hs.str.slice(2, 6)
df = df.drop_duplicates()
print df
print
print
# depths = {"hs" : [2, 6], "cnae": [1, 5]}
# for depthcol, lengths in depths.items():
# my_nesting.append(lengths)
# my_nesting_cols.append(depthcol)
# print my_nesting, my_nesting_cols
# for depths in itertools.product(*my_nesting):
# series = {}
# print depths
# for col_name, l in zip(my_nesting_cols, depths):
# series[col_name] = df[col_name].str.slice(0, l)
# addtl_rows = pd.DataFrame(series)
# full_table = pd.concat([addtl_rows, full_table])
# # print pk
# print full_table
df.to_csv("pi_crosswalk.csv", index=False)
if __name__ == "__main__":
main()
| mit |
goofwear/raspberry_pwn | src/pentest/sqlmap/plugins/dbms/sqlite/connector.py | 7 | 3003 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import sqlite3
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pysqlite.googlecode.com/ and http://packages.ubuntu.com/quantal/python-sqlite
User guide: http://docs.python.org/release/2.5/lib/module-sqlite3.html
API: http://docs.python.org/library/sqlite3.html
Debian package: python-sqlite (SQLite 2), python-pysqlite3 (SQLite 3)
License: MIT
Possible connectors: http://wiki.python.org/moin/SQLite
"""
def __init__(self):
GenericConnector.__init__(self)
self.__sqlite = sqlite3
def connect(self):
self.initConnection()
self.checkFileDb()
try:
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
cursor = self.connector.cursor()
cursor.execute("SELECT * FROM sqlite_master")
cursor.close()
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
warnMsg = "unable to connect using SQLite 3 library, trying with SQLite 2"
logger.warn(warnMsg)
try:
try:
import sqlite
except ImportError:
errMsg = "sqlmap requires 'python-sqlite' third-party library "
errMsg += "in order to directly connect to the database '%s'" % self.db
raise SqlmapMissingDependence(errMsg)
self.__sqlite = sqlite
self.connector = self.__sqlite.connect(database=self.db, check_same_thread=False, timeout=conf.timeout)
except (self.__sqlite.DatabaseError, self.__sqlite.OperationalError), msg:
raise SqlmapConnectionException(msg[0])
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
return None
def execute(self, query):
try:
self.cursor.execute(utf8encode(query))
except self.__sqlite.OperationalError, msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg[0])
except self.__sqlite.DatabaseError, msg:
raise SqlmapConnectionException(msg[0])
self.connector.commit()
def select(self, query):
self.execute(query)
return self.fetchall()
| gpl-3.0 |
JiminHong/mine | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | 395 | 12634 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
googlecodelabs/gcp-marketplace-integrated-saas | python2.7/impl/database/database.py | 2 | 1736 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
DATABASE_FILE = os.environ['PROCUREMENT_CODELAB_DATABASE']
class JsonDatabase(object):
"""JSON-based implementation of a simple file-based database."""
def __init__(self):
self.database = json.loads(open(DATABASE_FILE, 'r').read())
def read(self, key):
"""Read the record with the given key from the database, if it exists."""
if key in self.database:
return self.database[key]
return None
def write(self, key, value):
"""Write the record with the given key to the database."""
self.database[key] = value
self.commit()
def delete(self, key):
"""Delete the record with the given key from the database, if it exists."""
if key in self.database:
del self.database[key]
self.commit()
def commit(self):
"""Commits changes to the database by writing the in-memory dictionary."""
with open(DATABASE_FILE, 'w') as f:
json.dump(self.database, f)
def items(self):
"""Provides a way to iterate over all elements in the database."""
return self.database.items()
| apache-2.0 |
exploreodoo/datStruct | odoo/addons/mail/wizard/invite.py | 268 | 5847 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
class invite_wizard(osv.osv_memory):
""" Wizard to invite partners and make them followers. """
_name = 'mail.wizard.invite'
_description = 'Invite wizard'
def default_get(self, cr, uid, fields, context=None):
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
user_name = self.pool.get('res.users').name_get(cr, uid, [uid], context=context)[0][1]
model = result.get('res_model')
res_id = result.get('res_id')
if 'message' in fields and model and res_id:
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', self.pool[model]._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
document_name = self.pool[model].name_get(cr, uid, [res_id], context=context)[0][1]
message = _('<div><p>Hello,</p><p>%s invited you to follow %s document: %s.<p></div>') % (user_name, model_name, document_name)
result['message'] = message
elif 'message' in fields:
result['message'] = _('<div><p>Hello,</p><p>%s invited you to follow a new document.</p></div>') % user_name
return result
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_ids': fields.many2many('res.partner', string='Recipients',
help="List of partners that will be added as follower of the current document."),
'message': fields.html('Message'),
'send_mail': fields.boolean('Send Email',
help="If checked, the partners will receive an email warning they have been "
"added in the document's followers."),
}
_defaults = {
'send_mail': True,
}
def add_followers(self, cr, uid, ids, context=None):
for wizard in self.browse(cr, uid, ids, context=context):
model_obj = self.pool[wizard.res_model]
document = model_obj.browse(cr, uid, wizard.res_id, context=context)
# filter partner_ids to get the new followers, to avoid sending email to already following partners
new_follower_ids = [p.id for p in wizard.partner_ids if p not in document.message_follower_ids]
model_obj.message_subscribe(cr, uid, [wizard.res_id], new_follower_ids, context=context)
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', model_obj._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
# send an email if option checked and if a message exists (do not send void emails)
if wizard.send_mail and wizard.message and not wizard.message == '<br>': # when deleting the message, cleditor keeps a <br>
# add signature
# FIXME 8.0: use notification_email_send, send a wall message and let mail handle email notification + message box
signature_company = self.pool.get('mail.notification').get_signature_footer(cr, uid, user_id=uid, res_model=wizard.res_model, res_id=wizard.res_id, context=context)
wizard.message = tools.append_content_to_html(wizard.message, signature_company, plaintext=False, container_tag='div')
# send mail to new followers
# the invite wizard should create a private message not related to any object -> no model, no res_id
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'model': wizard.res_model,
'res_id': wizard.res_id,
'record_name': document.name_get()[0][1],
'email_from': self.pool['mail.message']._get_default_from(cr, uid, context=context),
'reply_to': self.pool['mail.message']._get_default_from(cr, uid, context=context),
'subject': _('Invitation to follow %s: %s') % (model_name, document.name_get()[0][1]),
'body_html': '%s' % wizard.message,
'auto_delete': True,
'message_id': self.pool['mail.message']._get_message_id(cr, uid, {'no_auto_thread': True}, context=context),
'recipient_ids': [(4, id) for id in new_follower_ids]
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
return {'type': 'ir.actions.act_window_close'}
| gpl-2.0 |
jbbskinny/sympy | sympy/plotting/experimental_lambdify.py | 41 | 25165 | """ rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debuging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They recieved an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args):
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparision. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l.lambda_func
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for i in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
namespace.update({'Eq': lambda x, y: x == y})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
return template % (func_name, self.tree2str(argtree))
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not poluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
| bsd-3-clause |
play113/swer | opencamlib-read-only/scripts/kdtree_movie1.py | 8 | 5347 | import ocl as cam
import camvtk
import time
import vtk
import datetime
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
myscreen.setAmbient(20,20,20)
#stl = camvtk.STLSurf(filename="demo.stl")
stl = camvtk.STLSurf(filename="demo2.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
#stl.SetFlat()
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
cutterDiameter=0.6
cutter = cam.CylCutter(cutterDiameter)
#print cutter.str()
#print cc.type
minx=-20
dx=1
maxx=20
miny=-20
dy=01
maxy=20
z=-0.2
bucketSize = 20
#pftp = cam.ParallelFinish()
#pftp.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
#pftp.initSTLSurf(s, bucketSize)
#pftp.dropCutterSTL1(cutter)
#print " made ", pftp.dcCalls, " drop-cutter calls"
#exit
pf2 = cam.ParallelFinish()
pf2.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pf2.initSTLSurf(s, bucketSize)
pf2.dropCutterSTL2(cutter)
print " made ", pf2.dcCalls, " drop-cutter calls"
#clpoints = pftp.getCLPoints()
#ccpoints = pftp.getCCPoints()
clpoints = pf2.getCLPoints()
ccpoints = pf2.getCCPoints()
#CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
myscreen.camera.SetPosition(3, 100, 15)
myscreen.camera.SetFocalPoint(50, 50, 0)
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
t3 = camvtk.Text()
t3.SetPos( (30, 30))
myscreen.addActor( t3)
t4 = camvtk.Text()
t4.SetPos( (30, myscreen.height-60))
myscreen.addActor( t4)
n=0
precl = cam.Point()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
lwr.SetFileName("tux1.png")
for cl,cc in zip(clpoints,ccpoints):
camEye = myscreen.camera.GetFocalPoint()
camPos = myscreen.camera.GetPosition()
postext = "(%3.3f, %3.3f, %3.3f)" % (camPos[0], camPos[1], camPos[2])
eyetext = "(%3.3f, %3.3f, %3.3f)" % (camEye[0], camEye[1], camEye[2])
camtext = "Camera LookAt: "+eyetext+"\nCamera Pos: "+ postext
t4.SetText(camtext)
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
xtext = "%3.3f" % cl.x
ytext = "%3.3f" % cl.y
ztext = "%3.3f" % cl.z
t2.SetText( "X: " + xtext + "\nY: " + ytext + "\nZ: " + ztext )
if cc.type==cam.CCType.FACET:
nf+=1
col = (0,1,1)
elif cc.type == cam.CCType.VERTEX:
nv+=1
col = (0,1,0)
elif cc.type == cam.CCType.EDGE:
ne+=1
col = (1,0,0)
elif cc.type == cam.CCType.NONE:
#print "type=NONE!"
nn+=1
col = (1,1,1)
#if cl.isInside(t):
# col = (0, 1, 0)
#else:
# col = (1, 0, 0)
trilist = pf2.getTrianglesUnderCutter(cl, cutter)
#print "at cl=", cl.str() , " where len(trilist)=", len(trilist)
t3.SetText("Total Triangles: "+ str(s.size()) +"\nUnder Cutter (red): "+str(len(trilist)))
stl2 = camvtk.STLSurf(filename=None, triangleList=trilist, color=(1,0,0)) # a new surface with only triangles under cutter
stl2.SetWireframe()
#stl2.SetFlat()
myscreen.addActor(stl2)
trilist=[]
cutactor = camvtk.Cylinder(center=(cl.x,cl.y,cl.z), radius=cutterDiameter/2, height=2, color=(0.7,1,1))
myscreen.addActor( cutactor )
#myscreen.addActor( camvtk.Point(center=(cl.x,cl.y,cl.z) , color=col) )
if n==0:
precl = cl
else:
d = cl-precl
if (d.norm() < 9):
myscreen.addActor( camvtk.Line( p1=(precl.x, precl.y, precl.z), p2=(cl.x, cl.y, cl.z), color=(0,1,1) ) )
precl = cl
n=n+1
#myscreen.addActor( camvtk.Point(center=(cl2.x,cl2.y,cl2.z+0.2) , color=(0.6,0.2,0.9)) )
#myscreen.addActor( camvtk.Point(center=(cc.x,cc.y,cc.z), color=col) )
#print cc.type
myscreen.camera.Azimuth( 0.2 )
#time.sleep(0.01)
myscreen.render()
w2if.Modified()
lwr.SetFileName("kdmov"+ ('%05d' % n)+".png")
#lwr.Write()
#raw_input("Press Enter to continue")
myscreen.removeActor(stl2)
myscreen.removeActor( cutactor )
print "none=",nn," vertex=",nv, " edge=",ne, " facet=",nf, " sum=", nn+nv+ne+nf
print len(clpoints), " cl points evaluated"
#lwr.Write()
for n in range(1,36):
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.camera.Azimuth( 1 )
time.sleep(0.01)
myscreen.render()
lwr.SetFileName("kd_frame"+ ('%03d' % n)+".png")
w2if.Modified()
#lwr.Write()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
| mit |
boooka/GeoPowerOff | venv/lib/python2.7/site-packages/django/contrib/gis/geometry/test_data.py | 105 | 3009 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import json
import os
from django.contrib import gis
from django.utils import six
from django.utils._os import upath
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(upath(gis.__file__)), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple(tuplize(i) for i in seq)
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict((str(k), v) for k, v in six.iteritems(d))
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
with open(os.path.join(TEST_DATA, 'geometries.json')) as f:
geometries = json.load(f)
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| apache-2.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/wire.py | 63 | 2359 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
# system imports
import time, struct
from zope.interface import implements
# twisted import
from twisted.internet import protocol, interfaces
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
pass
class Chargen(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
implements(interfaces.IProducer)
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
"""Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(self):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
| gpl-2.0 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/idlelib/HyperParser.py | 58 | 10293 | """
HyperParser
===========
This module defines the HyperParser class, which provides advanced parsing
abilities for the ParenMatch and other extensions.
The HyperParser uses PyParser. PyParser is intended mostly to give information
on the proper indentation of code. HyperParser gives some information on the
structure of code, used by extensions to help the user.
"""
import string
import keyword
import PyParse
class HyperParser:
def __init__(self, editwin, index):
"""Initialize the HyperParser to analyze the surroundings of the given
index.
"""
self.editwin = editwin
self.text = text = editwin.text
parser = PyParse.Parser(editwin.indentwidth, editwin.tabwidth)
def index2line(index):
return int(float(index))
lno = index2line(text.index(index))
if not editwin.context_use_ps1:
for context in editwin.num_context_lines:
startat = max(lno - context, 1)
startatindex = `startat` + ".0"
stopatindex = "%d.end" % lno
# We add the newline because PyParse requires a newline at end.
# We add a space so that index won't be at end of line, so that
# its status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
bod = parser.find_good_parse_start(
editwin._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
parser.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", index)
if r:
startatindex = r[1]
else:
startatindex = "1.0"
stopatindex = "%d.end" % lno
# We add the newline because PyParse requires a newline at end.
# We add a space so that index won't be at end of line, so that
# its status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
parser.set_lo(0)
# We want what the parser has, except for the last newline and space.
self.rawtext = parser.str[:-2]
# As far as I can see, parser.str preserves the statement we are in,
# so that stopatindex can be used to synchronize the string with the
# text box indices.
self.stopatindex = stopatindex
self.bracketing = parser.get_last_stmt_bracketing()
# find which pairs of bracketing are openers. These always correspond
# to a character of rawtext.
self.isopener = [i>0 and self.bracketing[i][1] > self.bracketing[i-1][1]
for i in range(len(self.bracketing))]
self.set_index(index)
def set_index(self, index):
"""Set the index to which the functions relate. Note that it must be
in the same statement.
"""
indexinrawtext = \
len(self.rawtext) - len(self.text.get(index, self.stopatindex))
if indexinrawtext < 0:
raise ValueError("The index given is before the analyzed statement")
self.indexinrawtext = indexinrawtext
# find the rightmost bracket to which index belongs
self.indexbracket = 0
while self.indexbracket < len(self.bracketing)-1 and \
self.bracketing[self.indexbracket+1][0] < self.indexinrawtext:
self.indexbracket += 1
if self.indexbracket < len(self.bracketing)-1 and \
self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and \
not self.isopener[self.indexbracket+1]:
self.indexbracket += 1
def is_in_string(self):
"""Is the index given to the HyperParser is in a string?"""
# The bracket to which we belong should be an opener.
# If it's an opener, it has to have a character.
return self.isopener[self.indexbracket] and \
self.rawtext[self.bracketing[self.indexbracket][0]] in ('"', "'")
def is_in_code(self):
"""Is the index given to the HyperParser is in a normal code?"""
return not self.isopener[self.indexbracket] or \
self.rawtext[self.bracketing[self.indexbracket][0]] not in \
('#', '"', "'")
def get_surrounding_brackets(self, openers='([{', mustclose=False):
"""If the index given to the HyperParser is surrounded by a bracket
defined in openers (or at least has one before it), return the
indices of the opening bracket and the closing bracket (or the
end of line, whichever comes first).
If it is not surrounded by brackets, or the end of line comes before
the closing bracket and mustclose is True, returns None.
"""
bracketinglevel = self.bracketing[self.indexbracket][1]
before = self.indexbracket
while not self.isopener[before] or \
self.rawtext[self.bracketing[before][0]] not in openers or \
self.bracketing[before][1] > bracketinglevel:
before -= 1
if before < 0:
return None
bracketinglevel = min(bracketinglevel, self.bracketing[before][1])
after = self.indexbracket + 1
while after < len(self.bracketing) and \
self.bracketing[after][1] >= bracketinglevel:
after += 1
beforeindex = self.text.index("%s-%dc" %
(self.stopatindex, len(self.rawtext)-self.bracketing[before][0]))
if after >= len(self.bracketing) or \
self.bracketing[after][0] > len(self.rawtext):
if mustclose:
return None
afterindex = self.stopatindex
else:
# We are after a real char, so it is a ')' and we give the index
# before it.
afterindex = self.text.index("%s-%dc" %
(self.stopatindex,
len(self.rawtext)-(self.bracketing[after][0]-1)))
return beforeindex, afterindex
# This string includes all chars that may be in a white space
_whitespace_chars = " \t\n\\"
# This string includes all chars that may be in an identifier
_id_chars = string.ascii_letters + string.digits + "_"
# This string includes all chars that may be the first char of an identifier
_id_first_chars = string.ascii_letters + "_"
# Given a string and pos, return the number of chars in the identifier
# which ends at pos, or 0 if there is no such one. Saved words are not
# identifiers.
def _eat_identifier(self, str, limit, pos):
i = pos
while i > limit and str[i-1] in self._id_chars:
i -= 1
if i < pos and (str[i] not in self._id_first_chars or \
keyword.iskeyword(str[i:pos])):
i = pos
return pos - i
def get_expression(self):
"""Return a string with the Python expression which ends at the given
index, which is empty if there is no real one.
"""
if not self.is_in_code():
raise ValueError("get_expression should only be called if index "\
"is inside a code.")
rawtext = self.rawtext
bracketing = self.bracketing
brck_index = self.indexbracket
brck_limit = bracketing[brck_index][0]
pos = self.indexinrawtext
last_identifier_pos = pos
postdot_phase = True
while 1:
# Eat whitespaces, comments, and if postdot_phase is False - one dot
while 1:
if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars:
# Eat a whitespace
pos -= 1
elif not postdot_phase and \
pos > brck_limit and rawtext[pos-1] == '.':
# Eat a dot
pos -= 1
postdot_phase = True
# The next line will fail if we are *inside* a comment, but we
# shouldn't be.
elif pos == brck_limit and brck_index > 0 and \
rawtext[bracketing[brck_index-1][0]] == '#':
# Eat a comment
brck_index -= 2
brck_limit = bracketing[brck_index][0]
pos = bracketing[brck_index+1][0]
else:
# If we didn't eat anything, quit.
break
if not postdot_phase:
# We didn't find a dot, so the expression end at the last
# identifier pos.
break
ret = self._eat_identifier(rawtext, brck_limit, pos)
if ret:
# There is an identifier to eat
pos = pos - ret
last_identifier_pos = pos
# Now, in order to continue the search, we must find a dot.
postdot_phase = False
# (the loop continues now)
elif pos == brck_limit:
# We are at a bracketing limit. If it is a closing bracket,
# eat the bracket, otherwise, stop the search.
level = bracketing[brck_index][1]
while brck_index > 0 and bracketing[brck_index-1][1] > level:
brck_index -= 1
if bracketing[brck_index][0] == brck_limit:
# We were not at the end of a closing bracket
break
pos = bracketing[brck_index][0]
brck_index -= 1
brck_limit = bracketing[brck_index][0]
last_identifier_pos = pos
if rawtext[pos] in "([":
# [] and () may be used after an identifier, so we
# continue. postdot_phase is True, so we don't allow a dot.
pass
else:
# We can't continue after other types of brackets
break
else:
# We've found an operator or something.
break
return rawtext[last_identifier_pos:self.indexinrawtext]
| apache-2.0 |
jjmiranda/edx-platform | common/djangoapps/student/tests/test_certificates.py | 22 | 9277 | """Tests for display of certificates on the student dashboard. """
import unittest
import ddt
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from django.test.utils import override_settings
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error
from certificates.api import get_certificate_url # pylint: disable=import-error
from certificates.models import CertificateStatuses # pylint: disable=import-error
from course_modes.models import CourseMode
from student.models import LinkedInAddToProfileConfiguration
# pylint: disable=no-member
class CertificateDisplayTestBase(SharedModuleStoreTestCase):
"""Tests display of certificates on the student dashboard. """
USERNAME = "test_user"
PASSWORD = "password"
DOWNLOAD_URL = "http://www.example.com/certificate.pdf"
@classmethod
def setUpClass(cls):
super(CertificateDisplayTestBase, cls).setUpClass()
cls.course = CourseFactory()
cls.course.certificates_display_behavior = "early_with_info"
with cls.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, cls.course.id):
cls.store.update_item(cls.course, cls.USERNAME)
def setUp(self):
super(CertificateDisplayTestBase, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
def _check_linkedin_visibility(self, is_visible):
"""
Performs assertions on the Dashboard
"""
response = self.client.get(reverse('dashboard'))
if is_visible:
self.assertContains(response, u'Add Certificate to LinkedIn Profile')
else:
self.assertNotContains(response, u'Add Certificate to LinkedIn Profile')
def _create_certificate(self, enrollment_mode):
"""Simulate that the user has a generated certificate. """
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode)
return GeneratedCertificateFactory(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode,
download_url=self.DOWNLOAD_URL,
status="downloadable",
grade=0.98,
)
def _check_can_download_certificate(self):
"""
Inspect the dashboard to see if a certificate can be downloaded.
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download Your ID Verified')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_download_certificate_no_id(self):
"""
Inspects the dashboard to see if a certificate for a non verified course enrollment
is present
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download')
self.assertContains(response, u'(PDF)')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_not_download_certificate(self):
"""
Make sure response does not have any of the download certificate buttons
"""
response = self.client.get(reverse('dashboard'))
self.assertNotContains(response, u'View Test_Certificate')
self.assertNotContains(response, u'Download Your Test_Certificate (PDF)')
self.assertNotContains(response, u'Download Test_Certificate (PDF)')
self.assertNotContains(response, self.DOWNLOAD_URL)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTest(CertificateDisplayTestBase):
"""
Tests of certificate display.
"""
@ddt.data('verified', 'professional')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate(self, enrollment_mode):
self._create_certificate(enrollment_mode)
self._check_can_download_certificate()
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate_no_id(self):
"""
Confirm that if we get a certificate with a no-id-professional mode
we still can download our certificate
"""
self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE)
self._check_can_download_certificate_no_id()
@ddt.data('verified', 'honor', 'professional')
def test_unverified_certificate_message(self, enrollment_mode):
cert = self._create_certificate(enrollment_mode)
cert.status = CertificateStatuses.unverified
cert.save()
response = self.client.get(reverse('dashboard'))
self.assertContains(
response,
u'do not have a current verified identity with {platform_name}'
.format(platform_name=settings.PLATFORM_NAME))
def test_post_to_linkedin_invisibility(self):
"""
Verifies that the post certificate to linked button
does not appear by default (when config is not set)
"""
self._create_certificate('honor')
# until we set up the configuration, the LinkedIn action
# button should not be visible
self._check_linkedin_visibility(False)
def test_post_to_linkedin_visibility(self):
"""
Verifies that the post certificate to linked button appears
as expected
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should see it
self._check_linkedin_visibility(True)
@mock.patch("openedx.core.djangoapps.theming.helpers.is_request_in_themed_site", mock.Mock(return_value=True))
def test_post_to_linkedin_site_specific(self):
"""
Verifies behavior for themed sites which disables the post to LinkedIn
feature (for now)
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should not see it because we are in a themed site
self._check_linkedin_visibility(False)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTestHtmlView(CertificateDisplayTestBase):
"""
Tests of webview certificate display
"""
@classmethod
def setUpClass(cls):
super(CertificateDisplayTestHtmlView, cls).setUpClass()
cls.course.cert_html_view_enabled = True
cls.course.save()
cls.store.update_item(cls.course, cls.USERNAME)
@ddt.data('verified', 'honor')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_display_download_certificate_button(self, enrollment_mode):
"""
Tests if CERTIFICATES_HTML_VIEW is True
and course has enabled web certificates via cert_html_view_enabled setting
and no active certificate configuration available
then any of the Download certificate button should not be visible.
"""
self._create_certificate(enrollment_mode)
self._check_can_not_download_certificate()
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTestLinkedHtmlView(CertificateDisplayTestBase):
"""
Tests of linked student certificates.
"""
@classmethod
def setUpClass(cls):
super(CertificateDisplayTestLinkedHtmlView, cls).setUpClass()
cls.course.cert_html_view_enabled = True
certificates = [
{
'id': 0,
'name': 'Test Name',
'description': 'Test Description',
'is_active': True,
'signatories': [],
'version': 1
}
]
cls.course.certificates = {'certificates': certificates}
cls.course.save()
cls.store.update_item(cls.course, cls.USERNAME)
@ddt.data('verified')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_linked_student_to_web_view_credential(self, enrollment_mode):
cert = self._create_certificate(enrollment_mode)
test_url = get_certificate_url(course_id=self.course.id, uuid=cert.verify_uuid)
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'View Test_Certificate')
self.assertContains(response, test_url)
| agpl-3.0 |
krasin/omim | 3party/protobuf/python/google/protobuf/internal/descriptor_database_test.py | 73 | 2924 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_database."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from google.apputils import basetest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
class DescriptorDatabaseTest(basetest.TestCase):
def testAdd(self):
db = descriptor_database.DescriptorDatabase()
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto)
self.assertEquals(file_desc_proto, db.FindFileByName(
'google/protobuf/internal/factory_test2.proto'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Enum'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum'))
if __name__ == '__main__':
basetest.main()
| apache-2.0 |
elena/django | django/template/defaultfilters.py | 19 | 26556 | """Default variable filters."""
import random as random_module
import re
import types
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from operator import itemgetter
from pprint import pformat
from urllib.parse import quote
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs,
json_script as _json_script, linebreaks, strip_tags, urlize as _urlize,
)
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import gettext, ngettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive strings. The object
passed as the first positional argument will be converted to a string.
"""
def _dec(*args, **kwargs):
args = list(args)
args[0] = str(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Add slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalize the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encode characters for use in JavaScript strings."""
return escapejs(value)
@register.filter(is_safe=True)
def json_script(value, element_id):
"""
Output value JSON-encoded, wrapped in a <script type="application/json">
tag.
"""
return _json_script(value, element_id)
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Display a float to a specified number of decimal places.
If called without an argument, display the floating point number with one
decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, always display exactly arg number of decimal places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, display arg number of decimal places -- but only if
there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If arg has the 'g' suffix, force the result to be grouped by the
THOUSAND_SEPARATOR for the active locale. When the active locale is
en (English):
* {{ 6666.6666|floatformat:"2g" }} displays "6,666.67"
* {{ 10000|floatformat:"g" }} displays "10,000"
If the input float is infinity or NaN, display the string representation
of that value.
"""
force_grouping = False
if isinstance(arg, str) and arg.endswith('g'):
force_grouping = True
arg = arg[:-1] or -1
try:
input_val = repr(text)
d = Decimal(input_val)
except InvalidOperation:
try:
d = Decimal(str(float(text)))
except (ValueError, InvalidOperation, TypeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(
formats.number_format('%d' % (int(d)), 0, force_grouping=force_grouping),
)
exp = Decimal(1).scaleb(-abs(p))
# Set the precision high enough to avoid an exception (#15789).
tupl = d.as_tuple()
units = len(tupl[1])
units += -tupl[2] if m else tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`,
# and `exponent` from Decimal.as_tuple() directly.
rounded_d = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec))
sign, digits, exponent = rounded_d.as_tuple()
digits = [str(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign and rounded_d:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(
formats.number_format(number, abs(p), force_grouping=force_grouping),
)
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escape an IRI value for use in a URL."""
return iri_to_uri(value)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Display text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = str(len(str(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Convert a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Return the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't
alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip
leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Format the variable according to the arg, a string formatting specifier.
This specifier uses Python string formatting syntax, with the exception
that the leading "%" is dropped.
See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting
for documentation of Python string formatting.
"""
if isinstance(value, tuple):
value = str(value)
try:
return ("%" + str(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Convert a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m[0].lower(), value.title())
return re.sub(r'\d([A-Z])', lambda m: m[0].lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""Truncate a string after `arg` number of characters."""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncate HTML after `arg` number of chars.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncate a string after `arg` number of words.
Remove newlines within the string.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' …')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncate HTML after `arg` number of words.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' …')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Convert a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escape a value for use in a URL.
The ``safe`` parameter determines the characters which should not be
escaped by Python's quote() function. If not provided, use the default safe
characters (but an empty string can be provided when *all* characters
should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return quote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Convert URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Convert URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Return the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""Wrap words at `arg` line length."""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""Left-align the value in a field of a given width."""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""Right-align the value in a field of a given width."""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Center the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""Remove all values of arg from the given string."""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""Mark the value as a string that should be auto-escaped."""
return conditional_escape(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escape a string's HTML. Return a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replace line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br>``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Convert all newlines in a piece of plain text to HTML line breaks
(``<br>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br>'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""Mark the value as a string that should not be auto-escaped."""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Mark each element in the sequence,
individually, as safe, after converting them to strings. Return a list
with the results.
"""
return [mark_safe(obj) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strip all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
def _property_resolver(arg):
"""
When arg is convertible to float, behave like operator.itemgetter(arg)
Otherwise, behave like Variable(arg).resolve
>>> _property_resolver(1)('abc')
'b'
>>> _property_resolver('1')('abc')
Traceback (most recent call last):
...
TypeError: string indices must be integers
>>> class Foo:
... a = 42
... b = 3.14
... c = 'Hey!'
>>> _property_resolver('b')(Foo())
3.14
"""
try:
float(arg)
except ValueError:
return Variable(arg).resolve
else:
return itemgetter(arg)
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Given a list of dicts, return that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=_property_resolver(arg))
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Given a list of dicts, return that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=_property_resolver(arg), reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Return the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""Join a list with a string, like Python's ``str.join(list)``."""
try:
if autoescape:
value = [conditional_escape(v) for v in value]
data = conditional_escape(arg).join(value)
except TypeError: # Fail silently if arg isn't iterable.
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"""Return the last item in a list."""
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Return the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Return a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Return a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Return a slice of the list using the same syntax as Python's list slicing.
"""
try:
bits = []
for x in str(arg).split(':'):
if not x:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively take a self-nested list and return an HTML unordered list --
WITHOUT opening and closing <ul> tags.
Assume the list is in the proper format. For example, if ``var`` contains:
``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then
``{{ var|unordered_list }}`` returns::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
def escaper(x):
return x
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if isinstance(next_item, (list, tuple, types.GeneratorType)):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(item), sublist))
return '\n'.join(output)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Add the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, return the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Return the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Format a date according to the given format."""
if value in (None, ''):
return ''
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Format a time according to the given format."""
if value in (None, ''):
return ''
try:
return formats.time_format(value, arg)
except (AttributeError, TypeError):
try:
return time_format(value, arg)
except (AttributeError, TypeError):
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Format a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Format a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Return True if the value is divisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false, and (optionally) None,
return one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
# Translators: Please do not add spaces around commas.
arg = gettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes_):
"""
Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes_ = int(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
value = ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
def filesize_number_format(value):
return formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = ngettext("%(size)d byte", "%(size)d bytes", bytes_) % {'size': bytes_}
elif bytes_ < MB:
value = gettext("%s KB") % filesize_number_format(bytes_ / KB)
elif bytes_ < GB:
value = gettext("%s MB") % filesize_number_format(bytes_ / MB)
elif bytes_ < TB:
value = gettext("%s GB") % filesize_number_format(bytes_ / GB)
elif bytes_ < PB:
value = gettext("%s TB") % filesize_number_format(bytes_ / TB)
else:
value = gettext("%s PB") % filesize_number_format(bytes_ / PB)
if negative:
value = "-%s" % value
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Return a plural suffix if the value is not 1, '1', or an object of
length 1. By default, use 's' as the suffix:
* If value is 0, vote{{ value|pluralize }} display "votes".
* If value is 1, vote{{ value|pluralize }} display "vote".
* If value is 2, vote{{ value|pluralize }} display "votes".
If an argument is provided, use that string instead:
* If value is 0, class{{ value|pluralize:"es" }} display "classes".
* If value is 1, class{{ value|pluralize:"es" }} display "class".
* If value is 2, class{{ value|pluralize:"es" }} display "classes".
If the provided argument contains a comma, use the text before the comma
for the singular case and the text after the comma for the plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} display "candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} display "candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} display "candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
return singular_suffix if float(value) == 1 else plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
return singular_suffix if len(value) == 1 else plural_suffix
except TypeError: # len() of unsized object.
pass
return ''
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Take a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, e)
| bsd-3-clause |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/openshift_health_checker/test/action_plugin_test.py | 33 | 12200 | import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
from openshift_checks import OpenShiftCheckException, FileToSave
def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
class FakeCheck(object):
name = _name
tags = _tags or []
def __init__(self, **_):
self.changed = False
self.failures = []
self.logs = run_logs or []
self.files_to_save = run_files or []
def is_active(self):
if isinstance(is_active, Exception):
raise is_active
return is_active
def run(self):
self.changed = changed
if run_exception is not None:
raise run_exception
return run_return
def get_var(*args, **_):
return get_var_return
def register_failure(self, exc):
self.failures.append(OpenShiftCheckException(str(exc)))
return
return FakeCheck
# Fixtures
@pytest.fixture
def plugin():
task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
plugin = ActionModule(task, None, PlayContext(), None, None, None)
return plugin
class FakeTask(object):
def __init__(self, action, args):
self.action = action
self.args = args
self.async = 0
@pytest.fixture
def task_vars():
return dict(openshift=dict(), ansible_host='unit-test-host')
# Assertion helpers
def failed(result, msg_has=None):
if msg_has is not None:
assert 'msg' in result
for term in msg_has:
assert term.lower() in result['msg'].lower()
return result.get('failed', False)
def changed(result):
return result.get('changed', False)
# tests whether task is skipped, not individual checks
def skipped(result):
return result.get('skipped', False)
# Tests
@pytest.mark.parametrize('task_vars', [
None,
{},
])
def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch):
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['openshift_facts'])
def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, monkeypatch):
FakeCheck1 = fake_check('duplicate_name')
FakeCheck2 = fake_check('duplicate_name')
checks = [FakeCheck1, FakeCheck2]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
@pytest.mark.parametrize('is_active, skipped_reason', [
(False, "Not active for this host"),
(Exception("borked"), "exception"),
])
def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'].get('skipped')
assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
@pytest.mark.parametrize('to_disable', [
'fake_check',
['fake_check', 'spam'],
'*,spam,eggs',
])
def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
assert not failed(result)
assert not changed(result)
assert not skipped(result)
def test_action_plugin_run_list_checks(monkeypatch):
task = FakeTask('openshift_health_check', {'checks': []})
plugin = ActionModule(task, None, PlayContext(), None, None, None)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run()
assert failed(result, msg_has="Available checks")
assert not changed(result)
assert not skipped(result)
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, run_files=[None])
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
assert not failed(result)
assert not changed(result)
assert not skipped(result)
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
assert changed(result['checks']['fake_check'])
assert not failed(result)
assert changed(result)
assert not skipped(result)
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
assert failed(result, msg_has=['failed'])
assert not changed(result)
assert not skipped(result)
@pytest.mark.parametrize('exc_class, expect_traceback', [
(OpenShiftCheckException, False),
(Exception, True),
])
def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
check_class = fake_check(
run_return={},
run_logs=[('thing', 'note')],
run_files=[
FileToSave('save.file', 'contents', None),
FileToSave('save.file', 'duplicate', None),
FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
],
)
task_vars['openshift_checks_output_dir'] = str(tmpdir)
check_class.get_var = lambda self, name, **_: task_vars.get(name)
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
plugin.run(tmp=None, task_vars=task_vars)
assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
assert any(path.basename == 'save.file' for path in tmpdir.visit())
assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['unknown', 'name'])
assert not changed(result)
assert not skipped(result)
@pytest.mark.parametrize('names,all_checks,expected', [
([], [], set()),
(
['a', 'b'],
[
fake_check('a'),
fake_check('b'),
],
set(['a', 'b']),
),
(
['a', 'b', '@group'],
[
fake_check('from_group_1', ['group', 'another_group']),
fake_check('not_in_group', ['another_group']),
fake_check('from_group_2', ['preflight', 'group']),
fake_check('a'),
fake_check('b'),
],
set(['a', 'b', 'from_group_1', 'from_group_2']),
),
])
def test_resolve_checks_ok(names, all_checks, expected):
assert resolve_checks(names, all_checks) == expected
@pytest.mark.parametrize('names,all_checks,words_in_exception', [
(
['testA', 'testB'],
[],
['check', 'name', 'testA', 'testB'],
),
(
['@group'],
[],
['tag', 'name', 'group'],
),
(
['testA', 'testB', '@group'],
[],
['check', 'name', 'testA', 'testB', 'tag', 'group'],
),
(
['testA', 'testB', '@group'],
[
fake_check('from_group_1', ['group', 'another_group']),
fake_check('not_in_group', ['another_group']),
fake_check('from_group_2', ['preflight', 'group']),
],
['check', 'name', 'testA', 'testB'],
),
])
def test_resolve_checks_failure(names, all_checks, words_in_exception):
with pytest.raises(Exception) as excinfo:
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
@pytest.mark.parametrize('give_output_dir, result, expect_file', [
(False, None, False),
(True, dict(content="c3BhbQo=", encoding="base64"), True),
(True, dict(content="encoding error", encoding="base64"), False),
(True, dict(content="spam", no_encoding=None), True),
(True, dict(failed=True, msg="could not slurp"), False),
])
def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
check = fake_check()()
check.execute_module = lambda *args, **_: result
copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
class Spam(object):
def __str__(self):
raise Exception("break str")
test = {1: object(), 2: Spam()}
test[3] = test
write_result_to_output_dir(str(tmpdir), test)
assert "Error writing" in test["output_files"]
output_dir = tmpdir.join("eggs")
output_dir.write("spam") # so now it's not a dir
write_to_output_file(str(output_dir), "somefile", "somedata")
assert "Could not write" in capsys.readouterr()[1]
monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
write_result_to_output_dir(str(tmpdir), test)
assert "Error creating" in test["output_files"]
| apache-2.0 |
slightstone/SickRage | autoProcessTV/lib/requests/auth.py | 331 | 6123 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
| gpl-3.0 |
zarafagroupware/zarafa-zsm | fab/ldap/ldif.py | 1 | 8267 | # Copyright 2012 - 2013 Zarafa B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation with the following additional
# term according to sec. 7:
#
# According to sec. 7 of the GNU Affero General Public License, version
# 3, the terms of the AGPL are supplemented with the following terms:
#
# "Zarafa" is a registered trademark of Zarafa B.V. The licensing of
# the Program under the AGPL does not imply a trademark license.
# Therefore any rights, title and interest in our trademarks remain
# entirely with us.
#
# However, if you propagate an unmodified version of the Program you are
# allowed to use the term "Zarafa" to indicate that you distribute the
# Program. Furthermore you may use our trademarks where it is necessary
# to indicate the intended purpose of a product or service provided you
# use it in accordance with honest practices in industrial or commercial
# matters. If you want to propagate modified versions of the Program
# under the name "Zarafa" or "Zarafa Server", you may only do so if you
# have a written permission by Zarafa B.V. (to acquire a permission
# please contact Zarafa at trademark@zarafa.com).
#
# The interactive user interface of the software displays an attribution
# notice containing the term "Zarafa" and/or the logo of Zarafa.
# Interactive user interfaces of unmodified and modified versions must
# display Appropriate Legal Notices according to sec. 5 of the GNU
# Affero General Public License, version 3, when you propagate
# unmodified or modified versions of the Program. In accordance with
# sec. 7 b) of the GNU Affero General Public License, version 3, these
# Appropriate Legal Notices must retain the logo of Zarafa or display
# the words "Initial Development by Zarafa" if the display of the logo
# is not reasonably feasible for technical reasons. The use of the logo
# of Zarafa in Legal Notices is allowed for unmodified and modified
# versions of the software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join
import shutil
from fabric.api import env
from libzsm import text
from detect import detect_database_dn
from fab import fs
from fab.models.ldapenv import LdapEnv
def get_ldap_tmpl(template_name, context, **kwargs):
filepath = join(
env.ldap_template_dir, 'ldap', '{0}.in'.format(template_name))
content = open(filepath, 'rt').read()
content = text.patch(context, content, **kwargs)
return content
def get_zarafa_schema():
ldap_env = LdapEnv.get()
with fs.mkdtemp() as d:
shutil.copyfile(
join(env.ldap_schema_dir, 'zarafa.schema'),
join(d, 'zarafa.schema'))
context = {
'include\s+zarafa\.schema': 'include {0}'.format(join(d, 'zarafa.schema')),
'__LDAP_ETC_PATH__': ldap_env.etc_path_orig,
}
conv_fp = join(d, 'schema_convert.conf')
text.patch_file(context,
join(env.ldap_schema_dir, 'schema_convert.conf.in'),
dest=conv_fp, literal=False)
# debian6: fails to find slaptest without abs path
env.run('/usr/sbin/slaptest -f {0} -F {1}'.format(conv_fp, d))
ldif_file = join(d, 'cn=config', 'cn=schema', 'cn={4}zarafa.ldif')
context = {
'(?m)^structuralObjectClass: .*$': '',
'(?m)^entryUUID: .*$': '',
'(?m)^creatorsName: .*$': '',
'(?m)^createTimestamp: .*$': '',
'(?m)^entryCSN: .*$': '',
'(?m)^modifiersName: .*$': '',
'(?m)^modifyTimestamp: .*$': '',
'(?m)^dn: .*': 'dn: {0}'.format(env.ldap_custom_schema_dn),
'(?m)^cn: .*': 'cn: {0}'.format(env.ldap_custom_schema_cn),
}
text.patch_file(context, ldif_file, dest=ldif_file, literal=False)
content = open(ldif_file, 'rt').read()
return content.strip()
def get_database():
ldap_env = LdapEnv.get()
indexes = ''
for fname, vals in sorted(env.ldap_indexes.items()):
indexes += 'olcDbIndex: {0} {1}\n'.format(fname, vals)
pw_hash = env.sudo('slappasswd -s {0}'.format(env.ldap_admin_pw), capture=True)
context = {
'__DB_PATH__': ldap_env.database_path,
'__DIT_DN__': env.ldap_dit_dn,
'__DIT_ADMIN_DN__': env.ldap_admin_dn,
'__ADMIN_PW__': pw_hash,
'__LDAP_INDEXES__': indexes,
}
return get_ldap_tmpl('database.ldif', context)
def get_mod_memberof():
return get_ldap_tmpl('mod_memberof.ldif', {})
def get_mod_memberof_config():
pairs = [
('overlay', 'memberof'),
('memberof-dangling', 'ignore'),
('memberof-refint', 'TRUE'),
('memberof-group-oc', env.ldap_group_objectclass),
('memberof-member-ad', 'member'),
('memberof-memberof-ad', 'memberOf'),
]
return pairs
def get_overlay_memberof():
db_entry = detect_database_dn()
context = {
'__DB_RDN__': db_entry,
'__GROUP_OBJECTCLASS__': env.ldap_group_objectclass,
}
return get_ldap_tmpl('overlay_memberof.ldif', context)
def get_mod_refint():
return get_ldap_tmpl('mod_refint.ldif', {})
def get_mod_refint_config():
atts = ' '.join(env.ldap_ref_attributes)
pairs = [
('overlay', 'refint'),
('refint_attributes', atts),
]
return pairs
def get_overlay_refint():
db_entry = detect_database_dn()
atts = ' '.join(env.ldap_ref_attributes)
context = {
'__DB_RDN__': db_entry,
'__REF_ATTRIBUTES__': atts,
}
return get_ldap_tmpl('overlay_refint.ldif', context)
def get_base_structure():
context = {
'__DB_NAME__': env.ldap_database_name,
'__DIT_DN__': env.ldap_dit_dn,
}
return get_ldap_tmpl('base-structure.ldif', context)
def get_slapd_conf(distro):
ldap_env = LdapEnv(distro)
def tabbed_line_join(data, prefix='', sort=False):
tuples = data
if type(data) == dict:
tuples = data.items()
if sort:
tuples = sorted(tuples)
return '\n'.join(['{0}{1:30} {2}'.format(prefix, name, props)
for (name, props) in tuples])
confs = [get_mod_memberof_config(), get_mod_refint_config()]
moduleconf = '\n\n'.join([tabbed_line_join(c) for c in confs])
modules = '\n'.join(['moduleload\t{0}'.format(name) for name in env.ldap_modules])
indexes = tabbed_line_join(env.ldap_indexes, prefix='index\t', sort=True)
context = {
'__DIT_DN__': env.ldap_dit_dn,
'__DIT_ADMIN_DN__': env.ldap_admin_dn,
'__ADMIN_PW__': env.ldap_admin_pw,
'__INDEXES__': indexes,
'__MODULES__': modules,
'__MODULECONF__': moduleconf,
'__ARGSFILE__': ldap_env.argsfile,
'__ETC_LDAP__': ldap_env.etc_path,
'__PIDFILE__': ldap_env.pidfile,
'__VAR_LIB_LDAP__': ldap_env.database_rootdir,
}
return get_ldap_tmpl('slapd.conf', context)
def get_ldap_bootstrap_script(distro):
ldap_env = LdapEnv(distro)
context = {
'__DIT_ADMIN_DN__': env.ldap_admin_dn,
'__ADMIN_PW__': env.ldap_admin_pw,
'__LDAP_GROUP__': ldap_env.ldap_group,
'__LDAP_USER__': ldap_env.ldap_user,
'__ETC_LDAP__': ldap_env.etc_path,
'__ETC_LDAP_SCHEMA__': ldap_env.schema_path,
'__ETC_LDAP_SLAPDD__': ldap_env.slapdd_path,
'__USR_LIB_LDAP_LOCS__': ' '.join(ldap_env.module_path_locs),
'__VAR_LIB_LDAP__': ldap_env.database_rootdir,
}
return get_ldap_tmpl('bootstrap-ldap.sh', context)
def get_zarafa_schema_raw():
filepath = join(env.ldap_template_dir, 'schema', 'zarafa.schema')
content = open(filepath, 'rt').read()
return content.strip()
| agpl-3.0 |
keto/askbot-devel | askbot/deps/livesettings/templatetags/config_tags.py | 22 | 2226 | from django import template
from django.contrib.sites.models import Site
from django.core import urlresolvers
from askbot.deps.livesettings import config_value
from askbot.deps.livesettings.utils import url_join
import logging
log = logging.getLogger('configuration.config_tags')
register = template.Library()
def force_space(value, chars=40):
"""Forces spaces every `chars` in value"""
chars = int(chars)
if len(value) < chars:
return value
else:
out = []
start = 0
end = 0
looping = True
while looping:
start = end
end += chars
out.append(value[start:end])
looping = end < len(value)
return ' '.join(out)
def break_at(value, chars=40):
"""Force spaces into long lines which don't have spaces"""
#todo: EF - lazy patch
return value
chars = int(chars)
value = unicode(value)
if len(value) < chars:
return value
else:
out = []
line = value.split(' ')
for word in line:
if len(word) > chars:
out.append(force_space(word, chars))
else:
out.append(word)
return " ".join(out)
register.filter('break_at', break_at)
def config_boolean(option):
"""Looks up the configuration option, returning true or false."""
args = option.split('.')
try:
val = config_value(*args)
except:
log.warn('config_boolean tag: Tried to look up config setting "%s", got SettingNotSet, returning False', option)
val = False
if val:
return "true"
else:
return ""
register.filter('config_boolean', config_boolean)
def admin_site_views(view):
"""Returns a formatted list of sites, rendering for view, if any"""
if view:
path = urlresolvers.reverse(view)
else:
path = None
links = []
for site in Site.objects.all():
paths = ["http://", site.domain]
if path:
paths.append(path)
links.append((site.name, url_join(paths)))
ret = {
'links' : links,
}
return ret
register.inclusion_tag('askbot.deps.livesettings/_admin_site_views.html')(admin_site_views)
| gpl-3.0 |
kobejean/tensorflow | tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py | 17 | 22170 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class TestGatherTree(test.TestCase):
"""Tests the gather_tree function."""
def test_gather_tree(self):
# (max_time = 3, batch_size = 2, beam_width = 3)
# create (batch_size, max_time, beam_width) matrix and transpose it
predicted_ids = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
dtype=np.int32).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2]], [[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
dtype=np.int32).transpose([1, 0, 2])
# sequence_lengths is shaped (batch_size = 3)
max_sequence_lengths = [3, 3]
expected_result = np.array([[[2, 2, 2], [6, 5, 6], [7, 8, 9]],
[[2, 4, 4], [7, 6, 6],
[8, 9, 10]]]).transpose([1, 0, 2])
res = beam_search_ops.gather_tree(
predicted_ids,
parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=11)
with self.cached_session() as sess:
res_ = sess.run(res)
self.assertAllEqual(expected_result, res_)
def _test_gather_tree_from_array(self,
depth_ndims=0,
merged_batch_beam=False):
array = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 4], [5, 6, 7], [8, 9, 10], [11, 12, 0]]]).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]],
[[0, 0, 0], [1, 1, 0], [2, 0, 1], [0, 1, 0]]]).transpose([1, 0, 2])
expected_array = np.array(
[[[2, 2, 2], [6, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 2], [7, 5, 7], [8, 9, 8], [11, 12, 0]]]).transpose([1, 0, 2])
sequence_length = [[3, 3, 3], [4, 4, 3]]
array = ops.convert_to_tensor(
array, dtype=dtypes.float32)
parent_ids = ops.convert_to_tensor(
parent_ids, dtype=dtypes.int32)
expected_array = ops.convert_to_tensor(
expected_array, dtype=dtypes.float32)
max_time = array_ops.shape(array)[0]
batch_size = array_ops.shape(array)[1]
beam_width = array_ops.shape(array)[2]
def _tile_in_depth(tensor):
# Generate higher rank tensors by concatenating tensor and tensor + 1.
for _ in range(depth_ndims):
tensor = array_ops.stack([tensor, tensor + 1], -1)
return tensor
if merged_batch_beam:
array = array_ops.reshape(
array, [max_time, batch_size * beam_width])
expected_array = array_ops.reshape(
expected_array, [max_time, batch_size * beam_width])
if depth_ndims > 0:
array = _tile_in_depth(array)
expected_array = _tile_in_depth(expected_array)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length)
with self.cached_session() as sess:
sorted_array = sess.run(sorted_array)
expected_array = sess.run(expected_array)
self.assertAllEqual(expected_array, sorted_array)
def test_gather_tree_from_array_scalar(self):
self._test_gather_tree_from_array()
def test_gather_tree_from_array_1d(self):
self._test_gather_tree_from_array(depth_ndims=1)
def test_gather_tree_from_array_1d_with_merged_batch_beam(self):
self._test_gather_tree_from_array(depth_ndims=1, merged_batch_beam=True)
def test_gather_tree_from_array_2d(self):
self._test_gather_tree_from_array(depth_ndims=2)
def test_gather_tree_from_array_complex_trajectory(self):
# Max. time = 7, batch = 1, beam = 5.
array = np.expand_dims(np.array(
[[[25, 12, 114, 89, 97]],
[[9, 91, 64, 11, 162]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 2, 4]],
[[2, 3, 6, 2, 2]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]]]), -1)
parent_ids = np.array(
[[[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4]],
[[0, 0, 1, 2, 1]],
[[0, 1, 1, 2, 3]],
[[0, 1, 3, 1, 2]],
[[0, 1, 2, 3, 4]]])
expected_array = np.expand_dims(np.array(
[[[25, 25, 25, 25, 25]],
[[9, 9, 91, 9, 9]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 4, 4]],
[[2, 3, 6, 3, 6]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]]]), -1)
sequence_length = [[4, 6, 4, 7, 6]]
array = ops.convert_to_tensor(
array, dtype=dtypes.float32)
parent_ids = ops.convert_to_tensor(
parent_ids, dtype=dtypes.int32)
expected_array = ops.convert_to_tensor(
expected_array, dtype=dtypes.float32)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length)
with self.cached_session() as sess:
sorted_array, expected_array = sess.run([sorted_array, expected_array])
self.assertAllEqual(expected_array, sorted_array)
class TestArrayShapeChecks(test.TestCase):
def _test_array_shape_dynamic_checks(self, static_shape, dynamic_shape,
batch_size, beam_width, is_valid=True):
t = array_ops.placeholder_with_default(
np.random.randn(*static_shape).astype(np.float32),
shape=dynamic_shape)
batch_size = array_ops.constant(batch_size)
check_op = beam_search_decoder._check_batch_beam(t, batch_size, beam_width) # pylint: disable=protected-access
with self.cached_session() as sess:
if is_valid:
sess.run(check_op)
else:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(check_op)
def test_array_shape_dynamic_checks(self):
self._test_array_shape_dynamic_checks(
(8, 4, 5, 10), (None, None, 5, 10), 4, 5, is_valid=True)
self._test_array_shape_dynamic_checks(
(8, 20, 10), (None, None, 10), 4, 5, is_valid=True)
self._test_array_shape_dynamic_checks(
(8, 21, 10), (None, None, 10), 4, 5, is_valid=False)
self._test_array_shape_dynamic_checks(
(8, 4, 6, 10), (None, None, None, 10), 4, 5, is_valid=False)
self._test_array_shape_dynamic_checks(
(8, 4), (None, None), 4, 5, is_valid=False)
class TestEosMasking(test.TestCase):
"""Tests EOS masking used in beam search."""
def test_eos_masking(self):
probs = constant_op.constant([
[[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0], [5, 6, 0, 0, 0]],
[[-.2, -.2, -.2, -.2, 0], [-.3, -.3, -.1, 3, 0], [5, 6, 3, 0, 0]],
])
eos_token = 0
previously_finished = np.array([[0, 1, 0], [0, 1, 1]], dtype=bool)
masked = beam_search_decoder._mask_probs(probs, eos_token,
previously_finished)
with self.cached_session() as sess:
probs = sess.run(probs)
masked = sess.run(masked)
self.assertAllEqual(probs[0][0], masked[0][0])
self.assertAllEqual(probs[0][2], masked[0][2])
self.assertAllEqual(probs[1][0], masked[1][0])
self.assertEqual(masked[0][1][0], 0)
self.assertEqual(masked[1][1][0], 0)
self.assertEqual(masked[1][2][0], 0)
for i in range(1, 5):
self.assertAllClose(masked[0][1][i], np.finfo('float32').min)
self.assertAllClose(masked[1][1][i], np.finfo('float32').min)
self.assertAllClose(masked[1][2][i], np.finfo('float32').min)
class TestBeamStep(test.TestCase):
"""Tests a single step of beam search."""
def setUp(self):
super(TestBeamStep, self).setUp()
self.batch_size = 2
self.beam_width = 3
self.vocab_size = 5
self.end_token = 0
self.length_penalty_weight = 0.6
self.coverage_penalty_weight = 0.0
def test_step(self):
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=nn_ops.log_softmax(
array_ops.ones([self.batch_size, self.beam_width])),
lengths=constant_op.constant(
2, shape=[self.batch_size, self.beam_width], dtype=dtypes.int64),
finished=array_ops.zeros(
[self.batch_size, self.beam_width], dtype=dtypes.bool),
accumulated_attention_probs=())
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = ops.convert_to_tensor(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight,
coverage_penalty_weight=self.coverage_penalty_weight)
with self.cached_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertAllEqual(outputs_.predicted_ids, [[3, 3, 2], [2, 2, 1]])
self.assertAllEqual(outputs_.parent_ids, [[1, 0, 0], [2, 1, 0]])
self.assertAllEqual(next_state_.lengths, [[3, 3, 3], [3, 3, 3]])
self.assertAllEqual(next_state_.finished,
[[False, False, False], [False, False, False]])
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0][[1, 0, 0]])
expected_log_probs.append(state_.log_probs[1][[2, 1, 0]]) # 0 --> 1
expected_log_probs[0][0] += log_probs_[0, 1, 3]
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 2, 2]
expected_log_probs[1][1] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
self.assertAllEqual(next_state_.log_probs, expected_log_probs)
def test_step_with_eos(self):
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=nn_ops.log_softmax(
array_ops.ones([self.batch_size, self.beam_width])),
lengths=ops.convert_to_tensor(
[[2, 1, 2], [2, 2, 1]], dtype=dtypes.int64),
finished=ops.convert_to_tensor(
[[False, True, False], [False, False, True]], dtype=dtypes.bool),
accumulated_attention_probs=())
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 5.7 # why does this not work when it's 2.7?
logits_[1, 2, 2] = 1.0
logits_[1, 2, 3] = 0.2
logits = ops.convert_to_tensor(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight,
coverage_penalty_weight=self.coverage_penalty_weight)
with self.cached_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertAllEqual(outputs_.parent_ids, [[1, 0, 0], [1, 2, 0]])
self.assertAllEqual(outputs_.predicted_ids, [[0, 3, 2], [2, 0, 1]])
self.assertAllEqual(next_state_.lengths, [[1, 3, 3], [3, 1, 3]])
self.assertAllEqual(next_state_.finished,
[[True, False, False], [False, True, False]])
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0][[1, 0, 0]])
expected_log_probs.append(state_.log_probs[1][[1, 2, 0]])
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
self.assertAllEqual(next_state_.log_probs, expected_log_probs)
class TestLargeBeamStep(test.TestCase):
"""Tests large beam step.
Tests a single step of beam search in such case that beam size is larger than
vocabulary size.
"""
def setUp(self):
super(TestLargeBeamStep, self).setUp()
self.batch_size = 2
self.beam_width = 8
self.vocab_size = 5
self.end_token = 0
self.length_penalty_weight = 0.6
self.coverage_penalty_weight = 0.0
def test_step(self):
def get_probs():
"""this simulates the initialize method in BeamSearchDecoder."""
log_prob_mask = array_ops.one_hot(
array_ops.zeros([self.batch_size], dtype=dtypes.int32),
depth=self.beam_width,
on_value=True,
off_value=False,
dtype=dtypes.bool)
log_prob_zeros = array_ops.zeros(
[self.batch_size, self.beam_width], dtype=dtypes.float32)
log_prob_neg_inf = array_ops.ones(
[self.batch_size, self.beam_width], dtype=dtypes.float32) * -np.Inf
log_probs = array_ops.where(log_prob_mask, log_prob_zeros,
log_prob_neg_inf)
return log_probs
log_probs = get_probs()
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
# pylint: disable=invalid-name
_finished = array_ops.one_hot(
array_ops.zeros([self.batch_size], dtype=dtypes.int32),
depth=self.beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
_lengths = np.zeros([self.batch_size, self.beam_width], dtype=np.int64)
_lengths[:, 0] = 2
_lengths = constant_op.constant(_lengths, dtype=dtypes.int64)
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=log_probs,
lengths=_lengths,
finished=_finished,
accumulated_attention_probs=())
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = constant_op.constant(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight,
coverage_penalty_weight=self.coverage_penalty_weight)
with self.cached_session() as sess:
outputs_, next_state_, _, _ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertEqual(outputs_.predicted_ids[0, 0], 3)
self.assertEqual(outputs_.predicted_ids[0, 1], 2)
self.assertEqual(outputs_.predicted_ids[1, 0], 1)
neg_inf = -np.Inf
self.assertAllEqual(
next_state_.log_probs[:, -3:],
[[neg_inf, neg_inf, neg_inf], [neg_inf, neg_inf, neg_inf]])
self.assertEqual((next_state_.log_probs[:, :-3] > neg_inf).all(), True)
self.assertEqual((next_state_.lengths[:, :-3] > 0).all(), True)
self.assertAllEqual(next_state_.lengths[:, -3:], [[0, 0, 0], [0, 0, 0]])
class BeamSearchDecoderTest(test.TestCase):
def _testDynamicDecodeRNN(self, time_major, has_attention,
with_alignment_history=False):
encoder_sequence_length = np.array([3, 2, 3, 1, 1])
decoder_sequence_length = np.array([2, 0, 1, 2, 3])
batch_size = 5
decoder_max_time = 4
input_depth = 7
cell_depth = 9
attention_depth = 6
vocab_size = 20
end_token = vocab_size - 1
start_token = 0
embedding_dim = 50
max_out = max(decoder_sequence_length)
output_layer = layers_core.Dense(vocab_size, use_bias=True, activation=None)
beam_width = 3
with self.cached_session() as sess:
batch_size_tensor = constant_op.constant(batch_size)
embedding = np.random.randn(vocab_size, embedding_dim).astype(np.float32)
cell = rnn_cell.LSTMCell(cell_depth)
initial_state = cell.zero_state(batch_size, dtypes.float32)
coverage_penalty_weight = 0.0
if has_attention:
coverage_penalty_weight = 0.2
inputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, decoder_max_time, input_depth).astype(
np.float32),
shape=(None, None, input_depth))
tiled_inputs = beam_search_decoder.tile_batch(
inputs, multiplier=beam_width)
tiled_sequence_length = beam_search_decoder.tile_batch(
encoder_sequence_length, multiplier=beam_width)
attention_mechanism = attention_wrapper.BahdanauAttention(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
initial_state = beam_search_decoder.tile_batch(
initial_state, multiplier=beam_width)
cell = attention_wrapper.AttentionWrapper(
cell=cell,
attention_mechanism=attention_mechanism,
attention_layer_size=attention_depth,
alignment_history=with_alignment_history)
cell_state = cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size_tensor * beam_width)
if has_attention:
cell_state = cell_state.clone(cell_state=initial_state)
bsd = beam_search_decoder.BeamSearchDecoder(
cell=cell,
embedding=embedding,
start_tokens=array_ops.fill([batch_size_tensor], start_token),
end_token=end_token,
initial_state=cell_state,
beam_width=beam_width,
output_layer=output_layer,
length_penalty_weight=0.0,
coverage_penalty_weight=coverage_penalty_weight)
final_outputs, final_state, final_sequence_lengths = (
decoder.dynamic_decode(
bsd, output_time_major=time_major, maximum_iterations=max_out))
def _t(shape):
if time_major:
return (shape[1], shape[0]) + shape[2:]
return shape
self.assertTrue(
isinstance(final_outputs,
beam_search_decoder.FinalBeamSearchDecoderOutput))
self.assertTrue(
isinstance(final_state, beam_search_decoder.BeamSearchDecoderState))
beam_search_decoder_output = final_outputs.beam_search_decoder_output
self.assertEqual(
_t((batch_size, None, beam_width)),
tuple(beam_search_decoder_output.scores.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None, beam_width)),
tuple(final_outputs.predicted_ids.get_shape().as_list()))
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'final_sequence_lengths': final_sequence_lengths
})
max_sequence_length = np.max(sess_results['final_sequence_lengths'])
# A smoke test
self.assertEqual(
_t((batch_size, max_sequence_length, beam_width)),
sess_results['final_outputs'].beam_search_decoder_output.scores.shape)
self.assertEqual(
_t((batch_size, max_sequence_length, beam_width)), sess_results[
'final_outputs'].beam_search_decoder_output.predicted_ids.shape)
def testDynamicDecodeRNNBatchMajorNoAttention(self):
self._testDynamicDecodeRNN(time_major=False, has_attention=False)
def testDynamicDecodeRNNBatchMajorYesAttention(self):
self._testDynamicDecodeRNN(time_major=False, has_attention=True)
def testDynamicDecodeRNNBatchMajorYesAttentionWithAlignmentHistory(self):
self._testDynamicDecodeRNN(
time_major=False,
has_attention=True,
with_alignment_history=True)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Krolov18/Languages | xSampa2api/xSampa2api.py | 1 | 1532 | # coding: utf-8
import codecs
import yaml
import itertools
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import argparse
parser = argparse.ArgumentParser(description="Translate a word in sampa to a word in API", prog="sampa2apiTranslater")
parser.add_argument('-i','--infile', type=argparse.FileType('r'), nargs='?', help="file to be copied")
parser.add_argument('-o', '--outfile', type=argparse.FileType('w+'), nargs="?", help="%(prog)s writes your translation into a file")
args = parser.parse_args()
def grouperElements(liste, function=len):
"""
fonctions qui groupe selon la fonction qu'on lui donne.
Ainsi pour le kalaba comme pour les graphèmes, nous aurons
besoin de la longueur,
"""
lexique=[]
data=sorted(liste, key=function)
for k,g in itertools.groupby(data, function):
lexique.append(list(g))
return lexique
def traduire_sampa2api(mot,sampa="xSampa2api.yaml"):
"""
Cette fonction convertit un symbole x-sampa en un vrai caractère API.
"""
convertisseur = yaml.load(open(sampa,'r'), Loader=Loader)
triCles = sorted(grouperElements(convertisseur.keys()),reverse=True)
for groupe in triCles:
for element in groupe:
if element in mot:
mot = mot.replace(element,convertisseur[element]["api"])
return mot
if args.infile:
for element in args.infile.readlines():
element = element.strip().split(' ')
temp = []
for mot in element:
temp.append(traduire_sampa2api(mot))
args.outfile.write(" ".join(temp)+"\n")
| apache-2.0 |
mathstuf/offlineimap | test/tests/test_00_globals.py | 18 | 1173 | #!/usr/bin/env python
# Copyright 2013 Eygene A. Ryabinkin
from offlineimap import globals
import unittest
class Opt:
def __init__(self):
self.one = "baz"
self.two = 42
self.three = True
class TestOfflineimapGlobals(unittest.TestCase):
@classmethod
def setUpClass(klass):
klass.o = Opt()
globals.set_options (klass.o)
def test_initial_state(self):
for k in self.o.__dict__.keys():
self.assertTrue(getattr(self.o, k) ==
getattr(globals.options, k))
def test_object_changes(self):
self.o.one = "one"
self.o.two = 119
self.o.three = False
return self.test_initial_state()
def test_modification(self):
with self.assertRaises(AttributeError):
globals.options.two = True
def test_deletion(self):
with self.assertRaises(RuntimeError):
del globals.options.three
def test_nonexistent_key(self):
with self.assertRaises(AttributeError):
a = globals.options.nosuchoption
def test_double_init(self):
with self.assertRaises(ValueError):
globals.set_options (True)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestOfflineimapGlobals)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 |
msiedlarek/qtwebkit | Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py | 122 | 4562 | #!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models import test_run_results_unittest
from webkitpy.layout_tests.views import buildbot_results
class BuildBotPrinterTests(unittest.TestCase):
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
def get_printer(self):
stream = StringIO.StringIO()
printer = buildbot_results.BuildBotPrinter(stream, debug_logging=True)
return printer, stream
def test_print_unexpected_results(self):
port = MockHost().port_factory.get('test')
printer, out = self.get_printer()
# test everything running as expected
DASHED_LINE = "-" * 78 + "\n"
summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False)
printer.print_unexpected_results(summary)
self.assertEqual(out.getvalue(), DASHED_LINE)
# test failures
printer, out = self.get_printer()
summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
printer.print_unexpected_results(summary)
self.assertNotEmpty(out)
# test unexpected flaky
printer, out = self.get_printer()
summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=True)
printer.print_unexpected_results(summary)
self.assertNotEmpty(out)
printer, out = self.get_printer()
summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
printer.print_unexpected_results(summary)
self.assertNotEmpty(out)
printer, out = self.get_printer()
summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
printer.print_unexpected_results(summary)
self.assertNotEmpty(out)
printer, out = self.get_printer()
summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
printer.print_unexpected_results(summary)
self.assertNotEmpty(out)
def test_print_results(self):
port = MockHost().port_factory.get('test')
printer, out = self.get_printer()
initial_results = test_run_results_unittest.run_results(port)
summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
details = test_run_results.RunDetails(summary['num_regressions'], summary, initial_results, None)
printer.print_results(details)
self.assertNotEmpty(out)
| lgpl-3.0 |
gamebooster/dota2-lua-engine | protobuf-2.5.0/python/google/protobuf/internal/message_cpp_test.py | 215 | 1929 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.message_cpp."""
__author__ = 'shahms@google.com (Shahms King)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
import unittest
from google.protobuf.internal.message_test import *
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
mtagle/airflow | airflow/contrib/sensors/pubsub_sensor.py | 5 | 1162 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.sensors.pubsub`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.sensors.pubsub import PubSubPullSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.sensors.pubsub`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 |
Xeralux/tensorflow | tensorflow/contrib/image/python/kernel_tests/interpolate_spline_test.py | 16 | 10269 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for interpolate_spline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate as sc_interpolate
from tensorflow.contrib.image.python.ops import interpolate_spline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import momentum
class _InterpolationProblem(object):
"""Abstract class for interpolation problem descriptions."""
def get_problem(self, optimizable=False, extrapolate=True, dtype='float32'):
"""Make data for an interpolation problem where all x vectors are n-d.
Args:
optimizable: If True, then make train_points a tf.Variable.
extrapolate: If False, then clamp the query_points values to be within
the max and min of train_points.
dtype: The data type to use.
Returns:
query_points, query_values, train_points, train_values: training and
test tensors for interpolation problem
"""
# The values generated here depend on a seed of 0.
np.random.seed(0)
batch_size = 1
num_training_points = 10
num_query_points = 4
init_points = np.random.uniform(
size=[batch_size, num_training_points, self.DATA_DIM])
init_points = init_points.astype(dtype)
train_points = (
variables.Variable(init_points)
if optimizable else constant_op.constant(init_points))
train_values = self.tf_function(train_points)
query_points_np = np.random.uniform(
size=[batch_size, num_query_points, self.DATA_DIM])
query_points_np = query_points_np.astype(dtype)
if not extrapolate:
query_points_np = np.clip(query_points_np, np.min(init_points),
np.max(init_points))
query_points = constant_op.constant(query_points_np)
query_values = self.np_function(query_points_np)
return query_points, query_values, train_points, train_values
class _QuadraticPlusSinProblem1D(_InterpolationProblem):
"""1D interpolation problem used for regression testing."""
DATA_DIM = 1
HARDCODED_QUERY_VALUES = {
(1.0, 0.0): [6.2647187603, -7.84362604077, -5.63690142322, 1.42928896387],
(1.0,
0.01): [6.77688289946, -8.02163669853, -5.79491157027, 1.4063285693],
(2.0,
0.0): [8.67110264937, -8.41281390883, -5.80190044693, 1.50155606059],
(2.0,
0.01): [6.70797816797, -7.49709587663, -5.28965776238, 1.52284731741],
(3.0,
0.0): [9.37691802935, -8.50390141515, -5.80786417426, 1.63467762122],
(3.0,
0.01): [4.47106304758, -5.71266128361, -3.92529303296, 1.86755293857],
(4.0,
0.0): [9.58172461111, -8.51432104771, -5.80967675388, 1.63361164256],
(4.0, 0.01): [
-3.87902711352, -0.0253462273846, 1.79857618022, -0.769339675725
]
}
def np_function(self, x):
"""Takes np array, evaluates the test function, and returns np array."""
return np.sum(
np.power((x - 0.5), 3) - 0.25 * x + 10 * np.sin(x * 10),
axis=2,
keepdims=True)
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_mean(
math_ops.pow((x - 0.5), 3) - 0.25 * x + 10 * math_ops.sin(x * 10),
2,
keepdims=True)
class _QuadraticPlusSinProblemND(_InterpolationProblem):
"""3D interpolation problem used for regression testing."""
DATA_DIM = 3
HARDCODED_QUERY_VALUES = {
(1.0, 0.0): [1.06609663962, 1.28894849357, 1.10882405595, 1.63966936885],
(1.0, 0.01): [1.03123780748, 1.2952930985, 1.10366822954, 1.65265118569],
(2.0, 0.0): [0.627787735064, 1.43802857251, 1.00194632358, 1.91667538215],
(2.0, 0.01): [0.730159985046, 1.41702471595, 1.0065827217, 1.85758519312],
(3.0, 0.0): [0.350460417862, 1.67223539464, 1.00475331246, 2.31580322491],
(3.0,
0.01): [0.624557250556, 1.63138876667, 0.976588193162, 2.12511237866],
(4.0,
0.0): [0.898129669986, 1.24434133638, -0.938056116931, 1.59910338833],
(4.0,
0.01): [0.0930360338179, -3.38791305538, -1.00969032567, 0.745535080382],
}
def np_function(self, x):
"""Takes np array, evaluates the test function, and returns np array."""
return np.sum(
np.square(x - 0.5) + 0.25 * x + 1 * np.sin(x * 15),
axis=2,
keepdims=True)
def tf_function(self, x):
"""Takes tf tensor, evaluates the test function, and returns tf tensor."""
return math_ops.reduce_sum(
math_ops.square(x - 0.5) + 0.25 * x + 1 * math_ops.sin(x * 15),
2,
keepdims=True)
class InterpolateSplineTest(test_util.TensorFlowTestCase):
def test_1d_linear_interpolation(self):
"""For 1d linear interpolation, we can compare directly to scipy."""
tp = _QuadraticPlusSinProblem1D()
(query_points, _, train_points, train_values) = tp.get_problem(
extrapolate=False, dtype='float64')
interpolation_order = 1
with ops.name_scope('interpolator'):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, interpolation_order)
with self.test_session() as sess:
fetches = [query_points, train_points, train_values, interpolator]
query_points_, train_points_, train_values_, interp_ = sess.run(fetches)
# Just look at the first element of the minibatch.
# Also, trim the final singleton dimension.
interp_ = interp_[0, :, 0]
query_points_ = query_points_[0, :, 0]
train_points_ = train_points_[0, :, 0]
train_values_ = train_values_[0, :, 0]
# Compute scipy interpolation.
scipy_interp_function = sc_interpolate.interp1d(
train_points_, train_values_, kind='linear')
scipy_interpolation = scipy_interp_function(query_points_)
scipy_interpolation_on_train = scipy_interp_function(train_points_)
# Even with float64 precision, the interpolants disagree with scipy a
# bit due to the fact that we add the EPSILON to prevent sqrt(0), etc.
tol = 1e-3
self.assertAllClose(
train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)
self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)
def test_1d_interpolation(self):
"""Regression test for interpolation with 1-D points."""
tp = _QuadraticPlusSinProblem1D()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
for order in (1, 2, 3):
for reg_weight in (0, 0.01):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.test_session() as sess:
interp_val = sess.run(interpolator)
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_nd_linear_interpolation(self):
"""Regression test for interpolation with N-D points."""
tp = _QuadraticPlusSinProblemND()
(query_points, _, train_points,
train_values) = tp.get_problem(dtype='float64')
for order in (1, 2, 3):
for reg_weight in (0, 0.01):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, order, reg_weight)
target_interpolation = tp.HARDCODED_QUERY_VALUES[(order, reg_weight)]
target_interpolation = np.array(target_interpolation)
with self.test_session() as sess:
interp_val = sess.run(interpolator)
self.assertAllClose(interp_val[0, :, 0], target_interpolation)
def test_interpolation_gradient(self):
"""Make sure that backprop can run. Correctness of gradients is assumed.
Here, we create a use a small 'training' set and a more densely-sampled
set of query points, for which we know the true value in advance. The goal
is to choose x locations for the training data such that interpolating using
this training data yields the best reconstruction for the function
values at the query points. The training data locations are optimized
iteratively using gradient descent.
"""
tp = _QuadraticPlusSinProblemND()
(query_points, query_values, train_points,
train_values) = tp.get_problem(optimizable=True)
regularization = 0.001
for interpolation_order in (1, 2, 3, 4):
interpolator = interpolate_spline.interpolate_spline(
train_points, train_values, query_points, interpolation_order,
regularization)
loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))
optimizer = momentum.MomentumOptimizer(0.001, 0.9)
grad = gradients.gradients(loss, [train_points])
grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
opt_func = optimizer.apply_gradients(zip(grad, [train_points]))
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(100):
sess.run([loss, opt_func])
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
iustinam90/vm | ui.py | 1 | 28878 | #!/usr/bin/python -u
from optparse import OptionParser
import commands
import ConfigParser
import os
import re
import controller
conf_file="/opt/ncit-cloud/vm.ini"
debug=1
class VMUserInterface:
def __init__(self):
self.parser=OptionParser()
self.opts=None
def getConf(self):
config = ConfigParser.ConfigParser()
config.read(conf_file)
conf=config._sections['conf']
errs=""
needed_opts="default_db default_ip_range default_admin_uid default_admin_home dhcp_keyname dhcp_secret dhcp_server dhcp_port id_range_limit base_domain_location base_disc_location vmoutdir separator_len watcher_path starter_path server_ip"
for opt in needed_opts.split(' '):
if(not opt in conf.keys()):
print "Missing option: {0}".format(opt);
exit(1)
dirs="base_disc_location base_domain_location default_admin_home"
for d in dirs.split(' '):
if(not os.path.isdir(conf[d])):
errs+="Path does not exist: {0}".format(conf[d])
files="watcher_path starter_path"
for f in files.split(' '):
if(not os.path.exists(conf[f])):
errs+="File does not exist: {0}".format(conf[f])
if(not os.path.exists(os.path.dirname(conf['default_db']))):
errs+="Database path does not exist: {0}".format(conf['default_db'])
if(errs):
print "Invalid configuration file: {0}".format(conf_file)
print errs
exit(1)
self.conf=conf
def parseOpts(self):
# First thing to do
self.parser.add_option("--ruid",dest='ruid',help="real user id")
self.parser.add_option("--init",dest="init",action='store_true',help="to initialize the db when first used",default=False)
self.parser.add_option("--dbpath",dest="dbpath",help="path to database")
# multipurpose opts
self.parser.add_option("--name",dest="name",help="name (for vm,user,user group or vm group) (<50 chars)")
self.parser.add_option("--user",dest="user",help= "user id or name")
self.parser.add_option("--vm",dest="vm",help="vm id or name")
self.parser.add_option("--ug",dest="user_group_s",help="(list of) user group id or name")
self.parser.add_option("--vmg",dest="vm_group_s",help="(list of) vm group id or name")
self.parser.add_option("--uid",dest="user_id",help="user id")
self.parser.add_option("--uuid",dest="uuid",help="identifier used with domains in libvirt #unused")
self.parser.add_option("--ugid",dest="user_group_id",help="user group id")
self.parser.add_option("--vmgid",dest="vm_group_id",help="vm group id")
self.parser.add_option("--iprange",dest="ip_range",help="example: 192.168.10.1-192.168.10.254",default="")
# User Groups
self.parser.add_option("--ugadd",dest="user_group_add",action='store_true',help="add user group",default=False)
self.parser.add_option("--ugdel",dest="user_group_del",action='store_true',help="delete user group",default=False)
self.parser.add_option("--ugmod",dest="user_group_mod",action='store_true',help="modify user group",default=False)
self.parser.add_option("--uglist",dest="list_user_groups",action='store_true',help="list user groups",default=False)
# VM Groups
self.parser.add_option("--vmgadd",dest="vm_group_add",action='store_true',help="add user group",default=False)
self.parser.add_option("--vmgdel",dest="vm_group_del",action='store_true',help="delete user group",default=False)
self.parser.add_option("--vmgmod",dest="vm_group_mod",action='store_true',help="modify user group",default=False)
self.parser.add_option("--vmglist",dest="list_vm_groups",action='store_true',help="list vm groups",default=False)
# Users
self.parser.add_option("--uadd",dest="user_add",action='store_true',help="add user ",default=False)
self.parser.add_option("--udel",dest="user_del",action='store_true',help="delete user ",default=False)
self.parser.add_option("--umod",dest="user_mod",action='store_true',help="modify user ",default=False)
self.parser.add_option("--maxrun",dest="maxrun",help="maximum number of running vms allowed",default=2)
self.parser.add_option("--maxstor",dest="maxstor",help="maximum storage size allowed (eg. 100G)",default="133G")
self.parser.add_option("--stordir",dest="storage_dir",help="storage directory (in user home maybe)")
self.parser.add_option("--ulist",dest="user_list",action='store_true',default=False,help="list users")
self.parser.add_option("--str",dest="string_to_find",help="#unused")
# VMs
self.parser.add_option("--vmadd",dest="vm_add",action='store_true',help="add vm ",default=False)
self.parser.add_option("--vmdel",dest="vm_del",action='store_true',help="delete vm ",default=False)
self.parser.add_option("--vmmod",dest="vm_mod",action='store_true',help="modify vm ",default=False)
self.parser.add_option("--vmrun",dest="vm_run",action='store_true',help="modify vm ",default=False)
self.parser.add_option("--usediscs",dest="use_discs",help="list of paths separated by , (first is used to boot)")
self.parser.add_option("--stor",dest="storage",help="eg. '5G,500M (creates 2 discs), '3G' (one disc) or wtv.")
self.parser.add_option("--smp",dest="smp",help="# processors, default 1",default=1)
self.parser.add_option("--mem",dest="mem",help="memory in megabytes (eg '512' or '512M','1G'), default 512",default="1G")
self.parser.add_option("--isolate",dest="isolate",action='store_true',help="Isolates vm from the others",default=False)
self.parser.add_option("--base",dest="base",help="template vm id or name",default="")
self.parser.add_option("--desc",dest="desc",help="description (<50 chars)",default="")
self.parser.add_option("--derivable",dest="derivable",action='store_true',default=False,help="make it template")
self.parser.add_option("--noderivable",dest="noderivable",action='store_true',default=False,help="")
self.parser.add_option("--install",dest="install",action='store_true',help="if specified, the vm will boot from the cdrom (which must be specified also)",default=False)
self.parser.add_option("--cdrom",dest="cdrom",help="specify .iso installation file")
self.parser.add_option("--owner",dest="owner",help="")
self.parser.add_option("--vmlist",dest="vm_list",action='store_true',help="find vm ",default=False)
self.parser.add_option("--perm",dest="perm",help="#unused")
self.parser.add_option("--running",dest="running",action='store_true',default=False,help="#unused")
# Permissions
self.parser.add_option("--permset",dest="permset",help="set permissions: any combination of +i,+d,+r,+m,-i,-d,-r,-m")
self.parser.add_option("--permdel",dest="permdel",help="delete permissions",action='store_true',default=False)
self.parser.add_option("--permlist",dest="permlist",action='store_true',help="list permissions",default=False)
# Mappings
self.parser.add_option("--maplist",dest="maplist",action='store_true',help="list existing mappings in db",default=False)
self.parser.add_option("--mapdel",dest="mapdel",action='store_true',help="delete mappings in db",default=False)
(self.opts,args)=self.parser.parse_args()
##############################################################################################
def name_ok(self,name):
rex1=re.compile(r"^[a-zA-Z_0-9]+$")
if(name and rex1.match(name) and len(name)<=50):
return True
if(debug): print "nok name"
return False
# comparison is '<' or '>'
def id_number_ok(self,number,comparison):
rex2=re.compile(r"^[0-9]+$")
if(not number or not rex2.match(str(number)) or len(str(number))>6):
if(debug): print "nok id"
return False
if(comparison=='>'):
if(int(number)<controller.id_range_limit): return False
elif(comparison=='<'):
if(int(number)>controller.id_range_limit): return False
else:
print "comparison sign incorrect"; exit(1)
return True
# for checking mixtures of ids and names,
# in: "nume1,12,nume2,
# returns {'ids':[1,2..],'names':[name1,name2..]}
def list_ok(self,mixed_list,comparison):
if(not mixed_list):
return False
ids=[]
names=[]
for item in mixed_list.split(','):
if(self.id_number_ok(item,comparison)):
ids.append(int(item))
elif(self.name_ok(item)):
names.append(item)
if(len(ids)==0 and len(names)==0):
print "Nothing valid in your list"
return False
return {'ids':ids,'names':names}
# validate memory/disc size: 1G,100M
# ! returns size in M
def size_ok(self,size):
rex1=re.compile(r"[1-9][0-9]*[MG]")
if(rex1.match(size)):
if(size[-1]=='G'):
return int(size[:-1]+"000")
elif(size[-1]=='M'):
return int(size[:-1])
return False
def path_ok(self,path,is_qcow):
if(not path or not os.path.exists(path)):
print "Invalid path: missing/does not exist: ",path
return False
# returns "data" for new images. leave this for now
# if(is_qcow):
# if(not commands.getstatusoutput("file {0} |grep 'Qemu Image,'".format(path))[1]):
# print "Invalid path: not a qemu image"
# return False
return True
def ip_range_ok(self,iprange):
if(not iprange):
return False
#todo check format, eg 10.42.31.1-10.42.334.3"
for ip in iprange.split('-'):
for n in ip.split('.'):
if(not n.isdigit()):
return False
if(int(n)>255):
return False
return True
##############################################################################################
def check_init(self):
if(not self.path_ok(os.path.dirname(self.opts_dict['dbpath']), 0)):
print "Invalid action. Usage: --init --dbpath _ (please provide a valid path (including filename) where database will be created)"
return False
return True
def check_user_group_add(self):
if(not self.name_ok(self.opts_dict['name']) or not self.id_number_ok(self.opts_dict['user_group_id'], '<')):
print "Invalid action. Usage: --ugadd --name _ --ugid _ (name allowed characters: 0-9a-zA-Z_)"
return False
self.opts_dict['user_group_id']=int(self.opts_dict['user_group_id'])
return True
def check_user_group_del(self):
filteredDict=self.list_ok(self.opts_dict['user_group_s'],'<')
if(not filteredDict):
print "Invalid action. Usage: --ugdel --ug _id/name[,..]"
return False
self.opts_dict['user_group_s']=filteredDict
return True
def check_user_group_mod(self):
if(not (self.name_ok(self.opts_dict['user_group_s']) or self.id_number_ok(self.opts_dict['user_group_s'], '<')) or
not self.name_ok(self.opts_dict['name']) ):
print "Invalid action. Usage: --ugmod --ug _id/name --name _ (name allowed characters: 0-9a-zA-Z_)"
return False
return True
##############################################################################################
def check_vm_group_add(self):
if(not self.name_ok(self.opts_dict['name']) or not self.id_number_ok(self.opts_dict['vm_group_id'], '<')
or not self.ip_range_ok(self.opts_dict['ip_range'])):
print "Invalid action. Usage: --vmgadd --name _ --vmgid _ --iprange _ (name allowed characters: 0-9a-zA-Z_, iprange x.x.x.x-x.x.x.x)"
return False
self.opts_dict['vm_group_id']=int(self.opts_dict['vm_group_id'])
return True
def check_vm_group_del(self):
filteredDict=self.list_ok(self.opts_dict['vm_group_s'],'<')
if(not filteredDict):
print "Invalid action. Usage: --vmgdel --vmg _id/name[,..]"
return False
self.opts_dict['vm_group_s']=filteredDict
return True
def check_vm_group_mod(self):
if(not (self.name_ok(self.opts_dict['vm_group_s']) or self.id_number_ok(self.opts_dict['vm_group_s'], '<')) or
not (self.name_ok(self.opts_dict['name']) or self.ip_range_ok(self.opts_dict['ip_range']))):
print "Invalid action. Usage: --vmgmod --vmg _id/name[,..] --name _ --iprange _ (name allowed characters: 0-9a-zA-Z_, iprange x.x.x.x-x.x.x.x)"
return False
return True
##############################################################################################
def check_user_add(self):
#print self.id_number_ok(self.opts_dict['user_id'],'>')
#print self.path_ok(self.opts_dict['storage_dir'], 0)
if(not self.name_ok(self.opts_dict['name']) or not self.id_number_ok(self.opts_dict['user_id'],'>')
or not self.path_ok(self.opts_dict['storage_dir'], 0)):
print "Invalid action. Usage: --uadd --name _ --uid _ --stordir _ [-ug _id1/name1,.. --maxrun _(def=2) --maxstor _(def=133G) ]"
return False
#optional params
if(self.opts_dict['user_group_s']):
filteredDict=self.list_ok(self.opts_dict['user_group_s'],'<')
if(not filteredDict):
print "Invalid user group list"
return False
self.opts_dict['user_group_s']=filteredDict
if(self.opts_dict['maxstor']):
size=self.size_ok(self.opts_dict['maxstor'])
if(not size):
print "Invalid size for maximum storage"
return False
self.opts_dict['maxstor']=size
if(self.opts_dict['maxrun'] and not self.id_number_ok(self.opts_dict['maxrun'], '<')):
print "Invalid number for maximum running VMs"
return False
return True
def check_user_del(self):
filteredDict=self.list_ok(self.opts_dict['user'],'<')
if(not filteredDict):
print "Invalid action. Usage:--udel --user _id/name[,..]"
return False
self.opts_dict['user']=filteredDict
return True
def check_user_mod(self):
if(not (self.name_ok(self.opts_dict['user']) or self.id_number_ok(self.opts_dict['user'], '>'))):
print "Invalid action. Usage: --umod --user _id/name [--name _ --ug +|-_,_ --maxrun _ --maxstor _ --stordir _ ]"
return False
if(self.opts_dict['name']):
if(not self.name_ok(self.opts_dict['name'])):
print "Invalid name"
return False
if(self.opts_dict['storage_dir']):
if(not self.path_ok(self.opts_dict['storage_dir'], 0)):
print "Invalid storage_dir"
if(self.opts_dict['user_group_s']):
toadd=[]
todel=[]
for i in self.opts_dict['user_group_s'].split(','):
if(i[0]=='+'):
toadd.append(i[1:]) #remove the +
if(i[0]=='-'):
todel.append(i[1:]) #remove the -
self.opts_dict['user_group_s']={}
self.opts_dict['user_group_s']['add']={}
self.opts_dict['user_group_s']['del']={}
self.opts_dict['user_group_s']['add']=self.list_ok(','.join(toadd),'<')
self.opts_dict['user_group_s']['del']=self.list_ok(','.join(todel),'<')
if(debug): print self.opts_dict['user_group_s']
if(not self.opts_dict['user_group_s']['add'] and not self.opts_dict['user_group_s']['del']):
print "Invalid user group list"
return False
if(not self.opts_dict['user_group_s']['add']):
self.opts_dict['user_group_s']['add']={'names':[],'ids':[]}
if(not self.opts_dict['user_group_s']['del']):
self.opts_dict['user_group_s']['del']={'names':[],'ids':[]}
if(self.opts_dict['maxstor']):
size=self.size_ok(self.opts_dict['maxstor'])
if(not size):
print "Invalid size for maximum storage"
return False
self.opts_dict['maxstor']=size
if(self.opts_dict['maxrun'] and not self.id_number_ok(self.opts_dict['maxrun'], '<')):
print "Invalid number for maximum running VMs"
return False
return True
def check_user_list(self):
return True
##############################################################################################
def check_vm_add(self):
usage="Usage: --vmadd --name _ [ --vmg _id1/name (!unul) --desc _ --derivable] --stor 1G,500M | --base _id/name | --usediscs path1,path2.."
# name mandatory
if(not self.name_ok(self.opts_dict['name'])):
print "Invalid name"
print usage
return False
# use_discs #todo check if list of valid qcow2 existing paths
one=0 # check if only one of use_discs|storage|base was specified
if(self.opts_dict['use_discs']):
one=1
paths=self.opts_dict['use_discs'].split(',')
self.opts_dict['use_discs']=[]
self.opts_dict['use_discs']
for path in paths:
if(self.path_ok(path,1)):
self.opts_dict['use_discs'].append(path)
else:
print "Invalid path in --usediscs"
print usage
return False
if(self.opts_dict['storage']): # validate storage format numberM|G , convert to list and into M
if(one): print "Please specify only one of these: --base --stor --usediscs"
else: one=1
newsizes=[]
for size_str in self.opts_dict['storage'].split(','):
size_int=self.size_ok(size_str)
if(size_int):
newsizes.append(size_int)
if(len(newsizes)<1):
print "Invalid storage"
print usage
return False
self.opts_dict['storage']=newsizes
if(self.opts_dict['base']):
if(one): print "Please specify only one of these: --base --stor --usediscs"
else: one=1
if(not (self.id_number_ok(self.opts_dict['base'], '>') or self.name_ok(self.opts_dict['base']))):
print "Invalid base VM"
print usage
return False
# optional stuff
if(self.opts_dict['vm_group_s']):
# if(not (self.id_number_ok(self.opts_dict['vm_group_s'],'<') or self.name_ok(self.opts_dict['vm_group_s']))):
# print "Invalid VM group."
# return False
newl={'ids':[],'names':[]}
if(self.id_number_ok(self.opts_dict['vm_group_s'],'<')):
newl['ids'].append(self.opts_dict['vm_group_s'])
elif(self.name_ok(self.opts_dict['vm_group_s'])):
newl['names'].append(self.opts_dict['vm_group_s'])
else:
print "Invalid VM group."
print usage
return False
self.opts_dict['vm_group_s']=newl
if(self.opts_dict['desc']):
if(not self.name_ok(self.opts_dict['desc'])):
print "Invalid description.(maximum lenght 50 characters, [a-zA-Z0-9_]+)"
print usage
return False
if(self.opts_dict['derivable'] and self.opts_dict['base']):
print "Invalid option combination: --derivable and --base "
print usage
return False
if(self.opts_dict['derivable']):
self.opts_dict['derivable']=1
return True
def check_vm_run(self):
if(not (self.name_ok(self.opts_dict['vm']) or self.id_number_ok(self.opts_dict['vm'], '>'))):
print "Invalid action. Usage: --vmrun --vm _id/name [--mem _ --smp _ --install --cdrom _ --isolate]"
return False
if(self.opts_dict['mem']):
if(not self.size_ok(self.opts_dict['mem'])):
print "Invalid size for --mem"
return False
if(self.opts_dict['smp']):
if(not self.id_number_ok(self.opts_dict['smp'], '<')):
print "Invalid size for --smp"
return False
if(self.opts_dict['install'] and not self.path_ok(self.opts_dict['cdrom'], 0)):
print "Invalid option combination: --install requires a valid --cdrom path"
return False
return True
def check_vm_del(self):
filteredDict=self.list_ok(self.opts_dict['vm'],'>')
if(not filteredDict):
print "Invalid action. Usage: --vmdel --vm _id/name[,..]"
return False
self.opts_dict['vm']=filteredDict
return True
def check_vm_mod(self):
if(not (self.name_ok(self.opts_dict['vm']) or self.id_number_ok(self.opts_dict['vm'], '>'))):
print "Invalid action. Usage: --vmmod --vm _id/name [--name _ --owner _ --derivable | --noderivable]"
return False
if(self.opts_dict['name']):
if(not self.name_ok(self.opts_dict['name'])):
print "Invalid name"
return False
if(self.opts_dict['owner']):
if(not self.name_ok(self.opts_dict['owner']) and not self.id_number_ok(self.opts_dict['owner'], '>')):
print "Invalid owner"
return False
if(self.opts_dict['derivable'] and self.opts_dict['noderivable']):
print "..make up your mind"
return False
return True
##############################################################################################
def check_permset(self):
if(not (self.name_ok(self.opts_dict['user']) or self.id_number_ok(self.opts_dict['user'], '>')
or self.name_ok(self.opts_dict['user_group_s']) or self.id_number_ok(self.opts_dict['user_group_s'], '<') )):
print "Invalid action. Usage: --permset [+m,-d,+r,-i](at least one) --user|--ug _id/name --vm|--vmg _id/name "
return False
if(not (self.name_ok(self.opts_dict['vm']) or self.id_number_ok(self.opts_dict['vm'], '>')
or self.name_ok(self.opts_dict['vm_group_s']) or self.id_number_ok(self.opts_dict['vm_group_s'], '<') )):
print "Invalid action. Usage: --permset [+m,-d,+r,-i](at least one) --user|--ug _id/name --vm|--vmg _id/name"
return False
# self.opts_dict['use_discs'] is like "+m,-d,+r,-i"
perm_err="invalid permissions: should be a list from +m,+d,+r,+i,-m,-d,-r,-i"
permdict={}
for perm in self.opts_dict['permset'].split(","):
if(len(perm)!=2):
print perm_err
return False
add_del=perm[0]
if(add_del=="+"):
add_del=1
elif(add_del=="-"):
add_del=0
else:
print perm_err
return False
if(perm[1]=="m"): permdict['modify']=add_del
elif(perm[1]=="d"): permdict['derive']=add_del
elif(perm[1]=="r"): permdict['run']=add_del
elif(perm[1]=="i"): permdict['force_isolated']=add_del
else: print perm_err
self.opts_dict['permset']=permdict
if(debug): print permdict
return True
def check_permdel(self):
if(not (self.name_ok(self.opts_dict['user']) or self.id_number_ok(self.opts_dict['user'], '>')
or self.name_ok(self.opts_dict['user_group_s']) or self.id_number_ok(self.opts_dict['user_group_s'], '<') )):
print "Invalid action. Usage: --permdel --user|--ug _id/name --vm|--vmg _id/name "
return False
if(not (self.name_ok(self.opts_dict['vm']) or self.id_number_ok(self.opts_dict['vm'], '>')
or self.name_ok(self.opts_dict['vm_group_s']) or self.id_number_ok(self.opts_dict['vm_group_s'], '<') )):
print "Invalid action. Usage: --permdel --user|--ug _id/name --vm|--vmg _id/name"
return False
return True
def check_mapdel(self):
if(not (self.name_ok(self.opts_dict['user']) or self.id_number_ok(self.opts_dict['user'], '>'))):
print "Invalid action. Usage: --mapdel --user _id/name --vm _id/name "
return False
if(not (self.name_ok(self.opts_dict['vm']) or self.id_number_ok(self.opts_dict['vm'], '>'))):
print "Invalid action. Usage: --mapdel --user _id/name --vm _id/name"
return False
return True
##############################################################################################
def validateArgs(self,action):
if(action=='init'):
return self.check_init()
if(action=='user_group_add'):
return self.check_user_group_add()
if(action=='user_group_del'):
return self.check_user_group_del()
if(action=='user_group_mod'):
return self.check_user_group_mod()
if(action=='vm_group_add'):
return self.check_vm_group_add()
if(action=='vm_group_del'):
return self.check_vm_group_del()
if(action=='vm_group_mod'):
return self.check_vm_group_mod()
if(action=='user_add'):
return self.check_user_add()
if(action=='user_del'):
return self.check_user_del()
if(action=='user_mod'):
return self.check_user_mod()
if(action=='user_list'):
return self.check_user_list()
if(action=='vm_add'):
return self.check_vm_add()
if(action=='vm_run'):
return self.check_vm_run()
if(action=='vm_del'):
return self.check_vm_del()
if(action=='vm_mod'):
return self.check_vm_mod()
if(action=='permset'):
return self.check_permset()
if(action=='permdel'):
return self.check_permdel()
if(action=='mapdel'):
return self.check_mapdel()
return True # False #todo!!
##############################################################################################
# vrfy command arguments and call the controller
def extractCommand(self,vmcontroller):
# these are exclusive, there should be only one:
#acts ="ugadd ugdel ugmod vmgadd vmdel vmmod uadd udel umod ulist vmadd vmrun vmmod vmlist permset permlist"
# don't forget the space at the end, in case you add more !
acts = "init user_group_add user_group_del user_group_mod list_user_groups "+\
"vm_group_add vm_group_del vm_group_mod list_vm_groups "+\
"user_add user_del user_mod user_list "+\
"vm_add vm_del vm_mod vm_run vm_list "+\
"permset permlist permdel maplist mapdel"
action_list=acts.split(' ')
self.opts_dict=vars(self.opts)
#print opts_dict.keys()
no=0 # count options (there should be only one of these
for opt in action_list:
if(self.opts_dict[opt]):
action=opt
no+=1
if(no>1 or no<1):
print "err: Too many actions / No action specified";exit(1)
if(not self.validateArgs(action)):
print "err: Validating action".format(action);exit(1)
#print action
error=vmcontroller.execute(self.real_uid,action,self.opts_dict)
if(error): print error
################################################################################# test area
def main():
## if(os.getuid()!=0): print "err";exit(1)
# real_uid=commands.getstatusoutput("echo $SUDO_USER | xargs -I name id -u name ")[1]
# real_uid=os.getuid()
ui=VMUserInterface()
ui.getConf()
ui.parseOpts()
#print "you ",os.getuid()
#os.setuid(int(ui.opts.ruid))
#print "you ",os.getuid()
ui.real_uid=ui.opts.ruid
if(not ui.real_uid):
print "--ruid (real uid) missing"
exit(1)
vmcontroller=controller.VMController(ui.conf)
vmcontroller.setRUID(ui.real_uid)
ui.extractCommand(vmcontroller)
if __name__ == "__main__": main() | gpl-3.0 |
debata/test_session_recorder | tests/test_report_generator.py | 1 | 3002 | import pytest
import os
from modules.report_generator import SessionReportGenerator
from modules.session import Session
@pytest.fixture(scope='session')
def generator(tmpdir_factory):
"""Create the report generator object and pass in a temporary directory for testing"""
global tmp_dir
tmp_dir = tmpdir_factory.mktemp('test')
generator = SessionReportGenerator(str(tmp_dir), 'test_session_recorder')
return generator
def test_generate_report(generator):
"""Test to generate an empty report with just the test session name"""
session_file_name = 'test_report'
generator.generate_report(session_name=session_file_name)
assert os.path.isfile(os.path.join(str(tmp_dir), session_file_name + '.html')), 'Report file was not created'
def test_invalid_report_name(generator):
"""Test to validate that a report cannot be generated with the session name contains invalid characters"""
assert not generator.generate_report(session_name='/////'), 'Generator should have returned false'
def test_generate_all_values_report(generator):
"""Test to generate a report with all report values set"""
session_file_name = 'All Value Report'
report_data = {Session.SESSION_NAME_KEY:session_file_name, Session.MISSION_KEY:'Test Mession', Session.AREAS_KEY:['Area 1', 'Area 2'], Session.TIMEBOX_KEY:'00:30:00', Session.DURATION_KEY:'00:20:00', Session.LOG_KEY:['Entry 1'], Session.BUG_KEY:['Bug 1', 'Bug 2'], Session.DEBRIEF_KEY:'Test Debrief'}
generator.generate_report(**report_data)
assert os.path.isfile(os.path.join(str(tmp_dir), session_file_name + '.html')), 'Report file was not created'
def test_generate_report_new_filename(generator):
"""Test to generate a report with all report values set"""
session_filename = 'alternate_file'
session_name = 'Session1'
report_data = {Session.SESSION_NAME_KEY:session_name, Session.MISSION_KEY:'Test Mession', Session.AREAS_KEY:['Area 1', 'Area 2'], Session.TIMEBOX_KEY:'00:30:00', Session.DURATION_KEY:'00:20:00', Session.LOG_KEY:['Entry 1'], Session.BUG_KEY:['Bug 1', 'Bug 2'], Session.DEBRIEF_KEY:'Test Debrief'}
generator.generate_report(filename=session_filename, **report_data)
assert os.path.isfile(os.path.join(str(tmp_dir), session_filename + '.html')), 'Report file was not created'
def test_generate_report_new_filename_with_space(generator):
"""Test to generate a report with all report values set"""
session_filename = 'alternate file with spaces'
session_name = 'Session22'
report_data = {Session.SESSION_NAME_KEY:session_name, Session.MISSION_KEY:'Test Mession', Session.AREAS_KEY:['Area 1', 'Area 2'], Session.TIMEBOX_KEY:'00:30:00', Session.DURATION_KEY:'00:20:00', Session.LOG_KEY:['Entry 1'], Session.BUG_KEY:['Bug 1', 'Bug 2'], Session.DEBRIEF_KEY:'Test Debrief'}
generator.generate_report(filename=session_filename, **report_data)
assert os.path.isfile(os.path.join(str(tmp_dir), session_filename + '.html')), 'Report file was not created'
| apache-2.0 |
appliedx/edx-platform | common/djangoapps/student/migrations/0042_grant_sales_admin_roles.py | 109 | 13747 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models, IntegrityError
class Migration(DataMigration):
def forwards(self, orm):
"""Map all Finance Admins to Sales Admins."""
finance_admins = orm['student.courseaccessrole'].objects.filter(role='finance_admin')
for finance_admin in finance_admins:
sales_admin = orm['student.courseaccessrole'](
role='sales_admin',
user=finance_admin.user,
org=finance_admin.org,
course_id=finance_admin.course_id,
)
try:
sales_admin.save()
except IntegrityError:
pass # If sales admin roles exist, continue.
def backwards(self, orm):
"""Remove all sales administrators, as they did not exist before this migration. """
sales_admins = orm['student.courseaccessrole'].objects.filter(role='sales_admin')
sales_admins.delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.dashboardconfiguration': {
'Meta': {'object_name': 'DashboardConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recent_enrollment_time_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
symmetrical = True
| agpl-3.0 |
microdee/IronHydra | src/IronHydra/Lib/atexit.py | 336 | 1705 | """
atexit.py - allow programmer to define multiple exit functions to be executed
upon normal program termination.
One public function, register, is defined.
"""
__all__ = ["register"]
import sys
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
exc_info = None
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
try:
func(*targs, **kargs)
except SystemExit:
exc_info = sys.exc_info()
except:
import traceback
print >> sys.stderr, "Error in atexit._run_exitfuncs:"
traceback.print_exc()
exc_info = sys.exc_info()
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
_exithandlers.append((func, targs, kargs))
return func
if hasattr(sys, "exitfunc"):
# Assume it's another registered exit function - append it to our list
register(sys.exitfunc)
sys.exitfunc = _run_exitfuncs
if __name__ == "__main__":
def x1():
print "running x1"
def x2(n):
print "running x2(%r)" % (n,)
def x3(n, kwd=None):
print "running x3(%r, kwd=%r)" % (n, kwd)
register(x1)
register(x2, 12)
register(x3, 5, "bar")
register(x3, "no kwd args")
| mit |
pekeler/arangodb | 3rdParty/V8-4.3.61/build/gyp/test/gyp-defines/gyptest-regyp.py | 268 | 1260 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that when the same value is repeated for a gyp define, duplicates are
stripped from the regeneration rule.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
test.run_gyp('defines.gyp')
test.build('defines.gyp')
# The last occurrence of a repeated set should take precedence over other
# values. See gyptest-multiple-values.py.
test.must_contain('action.txt', 'repeated_value')
# So the regeneration rule needs to use the correct order.
test.must_not_contain(
'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"')
test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"')
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
os.utime("defines.gyp", None)
test.build('defines.gyp')
test.must_contain('action.txt', 'repeated_value')
test.pass_test()
| apache-2.0 |
slohse/ansible | lib/ansible/utils/module_docs_fragments/sros.py | 58 | 2847 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
notes:
- For more information on using Ansible to manage Nokia SR OS Network devices see U(https://www.ansible.com/ansible-nokia).
"""
| gpl-3.0 |
thebonzitree/django-guardian | extras.py | 85 | 2934 | import _ast
import os
import sys
from setuptools import Command
#from pyflakes.scripts import pyflakes as flakes
def check(filename):
from pyflakes import reporter as mod_reporter
from pyflakes.checker import Checker
codeString = open(filename).read()
reporter = mod_reporter._makeDefaultReporter()
# First, compile into an AST and handle syntax errors.
try:
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(filename, 'problem decoding source')
else:
reporter.syntaxError(filename, msg, lineno, offset, text)
return 1
except Exception:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
else:
# Okay, it's syntactically valid. Now check it.
lines = codeString.splitlines()
warnings = Checker(tree, filename)
warnings.messages.sort(key=lambda m: m.lineno)
real_messages = []
for m in warnings.messages:
line = lines[m.lineno - 1]
if 'pyflakes:ignore' in line.rsplit('#', 1)[-1]:
# ignore lines with pyflakes:ignore
pass
else:
real_messages.append(m)
reporter.flake(m)
return len(real_messages)
class RunFlakesCommand(Command):
"""
Runs pyflakes against guardian codebase.
"""
description = "Check sources with pyflakes"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import pyflakes # pyflakes:ignore
except ImportError:
sys.stderr.write("No pyflakes installed!\n")
sys.exit(-1)
thisdir = os.path.dirname(__file__)
guardiandir = os.path.join(thisdir, 'guardian')
warns = 0
# Define top-level directories
for topdir, dirnames, filenames in os.walk(guardiandir):
paths = (os.path.join(topdir, f) for f in filenames if f .endswith('.py'))
for path in paths:
if path.endswith('tests/__init__.py'):
# ignore that module (it should only gather test cases with *)
continue
warns += check(path)
if warns > 0:
sys.stderr.write("ERROR: Finished with total %d warnings.\n" % warns)
sys.exit(1)
else:
print("No problems found in source codes.")
| bsd-2-clause |
cjds/cron | lib/flask/testing.py | 783 | 5003 | # -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs):
"""Creates a new test builder with some application defaults thrown in."""
http_host = app.config.get('SERVER_NAME')
app_root = app.config.get('APPLICATION_ROOT')
if base_url is None:
url = url_parse(path)
base_url = 'http://%s/' % (url.netloc or http_host or 'localhost')
if app_root:
base_url += app_root.lstrip('/')
if url.netloc:
path = url.path
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a with body when used in a with statement. For general information
about how to use this class refer to :class:`werkzeug.test.Client`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a with statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the with block is left the session is
stored back.
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
sess = app.open_session(c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not app.session_interface.is_null_session(sess):
app.save_session(sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| apache-2.0 |
ujenmr/ansible | lib/ansible/modules/cloud/amazon/rds_param_group.py | 39 | 14496 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds_param_group
version_added: "1.5"
short_description: manage RDS parameter groups
description:
- Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5.
requirements: [ boto3 ]
options:
state:
description:
- Specifies whether the group should be present or absent.
required: true
default: present
choices: [ 'present' , 'absent' ]
name:
description:
- Database parameter group identifier.
required: true
description:
description:
- Database parameter group description. Only set when a new group is added.
engine:
description:
- The type of database for this group. Required for state=present.
- Please use following command to get list of all supported db engines and their respective versions.
- '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"'
immediate:
description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances.
aliases:
- apply_immediately
type: bool
params:
description:
- Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3),
or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
aliases: [parameters]
tags:
description:
- Dictionary of tags to attach to the parameter group
version_added: "2.4"
purge_tags:
description:
- Whether or not to remove tags that do not appear in the I(tags) list.
version_added: "2.4"
type: bool
default: False
author:
- "Scott Anderson (@tastychutney)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group:
state: present
name: norwegian-blue
description: 'My Fancy Ex Parrot Group'
engine: 'mysql5.6'
params:
auto_increment_increment: "42K"
tags:
Environment: production
Application: parrot
# Remove a parameter group
- rds_param_group:
state: absent
name: norwegian-blue
'''
RETURN = '''
db_parameter_group_name:
description: Name of DB parameter group
type: str
returned: when state is present
db_parameter_group_family:
description: DB parameter group family that this DB parameter group is compatible with.
type: str
returned: when state is present
db_parameter_group_arn:
description: ARN of the DB parameter group
type: str
returned: when state is present
description:
description: description of the DB parameter group
type: str
returned: when state is present
errors:
description: list of errors from attempting to modify parameters that are not modifiable
type: list
returned: when state is present
tags:
description: dictionary of tags
type: dict
returned: when state is present
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, HAS_BOTO3, compare_aws_tags
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
INT_MODIFIERS = {
'K': 1024,
'M': pow(1024, 2),
'G': pow(1024, 3),
'T': pow(1024, 4),
}
def convert_parameter(param, value):
"""
Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
"""
converted_value = value
if param['DataType'] == 'integer':
if isinstance(value, string_types):
try:
for modifier in INT_MODIFIERS.keys():
if value.endswith(modifier):
converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
except ValueError:
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to boto
pass
elif isinstance(value, bool):
converted_value = 1 if value else 0
elif param['DataType'] == 'boolean':
if isinstance(value, string_types):
converted_value = to_native(value) in BOOLEANS_TRUE
# convert True/False to 1/0
converted_value = 1 if converted_value else 0
return str(converted_value)
def update_parameters(module, connection):
groupname = module.params['name']
desired = module.params['params']
apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot'
errors = []
modify_list = []
parameters_paginator = connection.get_paginator('describe_db_parameters')
existing = parameters_paginator.paginate(DBParameterGroupName=groupname).build_full_result()['Parameters']
lookup = dict((param['ParameterName'], param) for param in existing)
for param_key, param_value in desired.items():
if param_key not in lookup:
errors.append("Parameter %s is not an available parameter for the %s engine" %
(param_key, module.params.get('engine')))
else:
converted_value = convert_parameter(lookup[param_key], param_value)
# engine-default parameters do not have a ParameterValue, so we'll always override those.
if converted_value != lookup[param_key].get('ParameterValue'):
if lookup[param_key]['IsModifiable']:
modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method))
else:
errors.append("Parameter %s is not modifiable" % param_key)
# modify_db_parameters takes at most 20 parameters
if modify_list:
try:
from itertools import izip_longest as zip_longest # python 2
except ImportError:
from itertools import zip_longest # python 3
for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None):
non_empty_slice = [item for item in modify_slice if item]
try:
connection.modify_db_parameter_group(DBParameterGroupName=groupname, Parameters=non_empty_slice)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't update parameters: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return True, errors
return False, errors
def update_tags(module, connection, group, tags):
changed = False
existing_tags = connection.list_tags_for_resource(ResourceName=group['DBParameterGroupArn'])['TagList']
to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags),
tags, module.params['purge_tags'])
if to_update:
try:
connection.add_tags_to_resource(ResourceName=group['DBParameterGroupArn'],
Tags=ansible_dict_to_boto3_tag_list(to_update))
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't add tags to parameter group: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.ParamValidationError as e:
# Usually a tag value has been passed as an int or bool, needs to be a string
# The AWS exception message is reasonably ok for this purpose
module.fail_json(msg="Couldn't add tags to parameter group: %s." % str(e),
exception=traceback.format_exc())
if to_delete:
try:
connection.remove_tags_from_resource(ResourceName=group['DBParameterGroupArn'],
TagKeys=to_delete)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't remove tags from parameter group: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return changed
def ensure_present(module, connection):
groupname = module.params['name']
tags = module.params.get('tags')
changed = False
errors = []
try:
response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
response = None
else:
module.fail_json(msg="Couldn't access parameter group information: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
if not response:
params = dict(DBParameterGroupName=groupname,
DBParameterGroupFamily=module.params['engine'],
Description=module.params['description'])
if tags:
params['Tags'] = ansible_dict_to_boto3_tag_list(tags)
try:
response = connection.create_db_parameter_group(**params)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't create parameter group: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
group = response['DBParameterGroups'][0]
if tags:
changed = update_tags(module, connection, group, tags)
if module.params.get('params'):
params_changed, errors = update_parameters(module, connection)
changed = changed or params_changed
try:
response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname)
group = camel_dict_to_snake_dict(response['DBParameterGroups'][0])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't obtain parameter group information: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
try:
tags = connection.list_tags_for_resource(ResourceName=group['db_parameter_group_arn'])['TagList']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't obtain parameter group tags: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
group['tags'] = boto3_tag_list_to_ansible_dict(tags)
module.exit_json(changed=changed, errors=errors, **group)
def ensure_absent(module, connection):
group = module.params['name']
try:
response = connection.describe_db_parameter_groups(DBParameterGroupName=group)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="Couldn't access parameter group information: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
try:
response = connection.delete_db_parameter_group(DBParameterGroupName=group)
module.exit_json(changed=True)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete parameter group: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
engine=dict(),
description=dict(),
params=dict(aliases=['parameters'], type='dict'),
immediate=dict(type='bool', aliases=['apply_immediately']),
tags=dict(type='dict', default={}),
purge_tags=dict(type='bool', default=False)
)
)
module = AnsibleModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['description', 'engine']]])
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be present")
try:
conn = boto3_conn(module, conn_type='client', resource='rds', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % str(e))
state = module.params.get('state')
if state == 'present':
ensure_present(module, conn)
if state == 'absent':
ensure_absent(module, conn)
if __name__ == '__main__':
main()
| gpl-3.0 |
edwardbadboy/tuned-ubuntu | tuned/storage/pickle_provider.py | 5 | 1403 | import interfaces
import tuned.logs
import pickle
import os
import tuned.consts as consts
log = tuned.logs.get()
class PickleProvider(interfaces.Provider):
__slots__ = ["_path", "_data"]
def __init__(self, path=None):
if path is None:
path = consts.DEFAULT_STORAGE_FILE
self._path = path
self._data = {}
def set(self, namespace, option, value):
self._data.setdefault(namespace, {})
self._data[namespace][option] = value
def get(self, namespace, option, default=None):
self._data.setdefault(namespace, {})
return self._data[namespace].get(option, default)
def unset(self, namespace, option):
self._data.setdefault(namespace, {})
if option in self._data[namespace]:
del self._data[namespace][option]
def save(self):
try:
log.debug("Saving %s" % str(self._data))
with open(self._path, "w") as f:
pickle.dump(self._data, f)
except (OSError, IOError) as e:
log.error("Error saving storage file '%s': %s" % (self._path, e))
def load(self):
try:
with open(self._path, "r") as f:
self._data = pickle.load(f)
except (OSError, IOError) as e:
log.debug("Error loading storage file '%s': %s" % (self._path, e))
self._data = {}
except EOFError:
self._data = {}
def clear(self):
self._data.clear()
try:
os.unlink(self._path)
except (OSError, IOError) as e:
log.debug("Error removing storage file '%s': %s" % (self._path, e))
| gpl-2.0 |
SlimRoms/android_external_chromium | testing/gmock/test/gmock_output_test.py | 986 | 5999 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Mocking Framework.
SYNOPSIS
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read()
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
gmock_test_utils.Main()
| bsd-3-clause |
openchange/openchange | pyopenchange/tests/mapistore_mgmt.py | 13 | 1573 | #!/usr/bin/python
# NOTE:
#
# For this test, we are running the whole environment on a different
# server: imap, ldap, postgresql, postfix, openchange, samba and sogo.
# We have configured SOGo backend locally and adjusted defaults to
# connect to this remote server.
#
# Finally we are accessing the openchange.ldb file through sshfs and
# map it to expected /usr/local/samba/private folder.
# sshfs openchange@ip_addr:/usr/local/samba/private /usr/local/samba/private
# We have also adjusted the permissions to allow openchange user to
# read/write openchange.ldb file remotely.
#
# Do not forget to run memcached with the user account running the
# script.
#
import os
import sys
import time
sys.path.append("python")
import openchange.mapistore as mapistore
dirname = "/usr/local/samba/private/mapistore"
if not os.path.exists(dirname):
os.mkdir("/usr/local/samba/private/mapistore")
mapistore.set_mapping_path(dirname)
MAPIStore = mapistore.mapistore(syspath="/usr/local/samba/private")
mgmt = MAPIStore.management()
#while 1:
# time.sleep(5)
# d = mgmt.registered_users("SOGo", "Administrator")
# print d
print "Is SOGo backend registered: %s" % mgmt.registered_backend("SOGo")
print "Is NonExistent backend registered: %s" % mgmt.registered_backend("NonExistent")
print "Registered message: %s" % mgmt.registered_message("SOGo", "Administrator", "Administrator", "inbox", "61")
print "Registered message: %s" % mgmt.registered_message("SOGo", "Administrator", "Administrator", "inbox", "74")
mgmt.existing_users("SOGo", "Administrator", "inbox")
| gpl-3.0 |
craftytrickster/servo | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 95 | 4179 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png"), "rb").read()
elif content_type == "audio/mpeg":
response_data = open(os.path.join(request.doc_root,
"media",
"sound_5.oga"), "rb").read()
elif content_type == "video/mp4":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.mp4"), "rb").read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js"), "rb").read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
| mpl-2.0 |
ircncl/linux-grsec-incremental | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
eslimaf/random_codes | capture.py | 1 | 1973 | from netlib.odict import ODictCaseless
from libmproxy.protocol.http import HTTPResponse
import cgi
import re
from gzip import GzipFile
import StringIO
import time
XML_OK_RESPONSE = '''<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"><plist version="1.0"><dict><key>iPhone6,2</key><array><string>powerDiagnostics</string></array></dict></plist>'''
def request(context, flow):
print 'Requesting -> %s' % flow.request.path
def saveContent(flow, prefix):
print "Saving data with prefix -> ", prefix
decodedData = StringIO.StringIO()
decodedData.write(flow.request.get_decoded_content())
contentType = flow.request.headers['Content-Type'][0]
multipart_boundary_re = re.compile('^multipart/form-data; boundary=(.*)$')
matches = multipart_boundary_re.match(contentType)
decodedData.seek(0)
query = cgi.parse_multipart( decodedData, {"boundary" : matches.group(1)})
with open("%s-%s.tar.gz" % (prefix, time.strftime("%Y%m%d-%H%M%S")), "w") as logs:
logs.write(query['log_archive'][0])
def set_response(flow, content):
print 'Response from server -> ', flow.response
flow.response.code = 200
flow.response.msg = "OK"
flow.response.headers = ODictCaseless([["Content-Type","text/xml"]])
flow.response.content = content
print "Setting new response -> ", flow.response
print "Body -> ", content
def response(context, flow):
path = flow.request.path
if path == '/ios/TestConfiguration/1.2':
set_response(flow, XML_OK_RESPONSE)
elif path == '/MR3Server/ValidateTicket?ticket_number=123456':
set_response(flow, XML_OK_RESPONSE)
elif path == '/MR3Server/MR3Post':
saveContent(flow, 'general')
set_response(flow, XML_OK_RESPONSE)
elif path == '/ios/log/extendedUpload':
saveContent(flow, 'power')
set_response(flow, XML_OK_RESPONSE)
| mit |
lpantano/cloudbiolinux | fabfile.py | 1 | 19010 | """Main Fabric deployment file for CloudBioLinux distribution.
This installs a standard set of useful biological applications on a remote
server. It is designed for bootstrapping a machine from scratch, as with new
Amazon EC2 instances.
Usage:
fab -H hostname -i private_key_file install_biolinux
which will call into the 'install_biolinux' method below. See the README for
more examples. hostname can be a named host in ~/.ssh/config
Requires:
Fabric http://docs.fabfile.org
PyYAML http://pyyaml.org/wiki/PyYAMLDocumentation
"""
import os
import sys
from datetime import datetime
from fabric.api import *
from fabric.contrib.files import *
import yaml
# use local cloudbio directory
for to_remove in [p for p in sys.path if p.find("cloudbiolinux-") > 0]:
sys.path.remove(to_remove)
sys.path.append(os.path.dirname(__file__))
import cloudbio
from cloudbio import libraries
from cloudbio.utils import _setup_logging, _configure_fabric_environment
from cloudbio.cloudman import _cleanup_ec2, _configure_cloudman
from cloudbio.cloudbiolinux import _cleanup_space, _freenx_scripts
from cloudbio.custom import shared
from cloudbio.package.shared import _yaml_to_packages
from cloudbio.package import brew
from cloudbio.package import (_configure_and_install_native_packages,
_connect_native_packages, _print_shell_exports)
from cloudbio.package.nix import _setup_nix_sources, _nix_packages
from cloudbio.flavor.config import get_config_file
from cloudbio.config_management.puppet import _puppet_provision
from cloudbio.config_management.chef import _chef_provision, chef, _configure_chef
# ### Shared installation targets for all platforms
def install_biolinux(target=None, flavor=None):
"""Main entry point for installing BioLinux on a remote server.
`flavor` allows customization of CloudBioLinux behavior. It can either
be a flavor name that maps to a corresponding directory in contrib/flavor
or the path to a custom directory. This can contain:
- alternative package lists (main.yaml, packages.yaml, custom.yaml)
- custom python code (nameflavor.py) that hooks into the build machinery
`target` allows running only particular parts of the build process. Valid choices are:
- packages Install distro packages
- custom Install custom packages
- chef_recipes Provision chef recipes
- libraries Install programming language libraries
- post_install Setup CloudMan, FreeNX and other system services
- cleanup Remove downloaded files and prepare images for AMI builds
"""
_setup_logging(env)
time_start = _print_time_stats("Config", "start")
_check_fabric_version()
if env.ssh_config_path and os.path.isfile(os.path.expanduser(env.ssh_config_path)):
env.use_ssh_config = True
_configure_fabric_environment(env, flavor,
ignore_distcheck=(target is not None
and target in ["libraries", "custom"]))
env.logger.debug("Target is '%s'" % target)
env.logger.debug("Flavor is '%s'" % flavor)
_perform_install(target, flavor)
_print_time_stats("Config", "end", time_start)
if hasattr(env, "keep_isolated") and env.keep_isolated:
_print_shell_exports(env)
def _perform_install(target=None, flavor=None, more_custom_add=None):
"""
Once CBL/fabric environment is setup, this method actually
runs the required installation procedures.
See `install_biolinux` for full details on arguments
`target` and `flavor`.
"""
pkg_install, lib_install, custom_ignore, custom_add = _read_main_config()
if more_custom_add:
if custom_add is None:
custom_add = {}
for k, vs in more_custom_add.iteritems():
if k in custom_add:
custom_add[k].extend(vs)
else:
custom_add[k] = vs
if target is None or target == "packages":
env.keep_isolated = getattr(env, "keep_isolated", "false").lower() in ["true", "yes"]
# Only touch system information if we're not an isolated installation
if not env.keep_isolated:
# can only install native packages if we have sudo access or are root
if env.use_sudo or env.safe_run_output("whoami").strip() == "root":
_configure_and_install_native_packages(env, pkg_install)
else:
_connect_native_packages(env, pkg_install, lib_install)
if env.nixpkgs: # ./doc/nixpkgs.md
_setup_nix_sources()
_nix_packages(pkg_install)
if target is None or target == "custom":
_custom_installs(pkg_install, custom_ignore, custom_add)
if target is None or target == "chef_recipes":
_provision_chef_recipes(pkg_install, custom_ignore)
if target is None or target == "puppet_classes":
_provision_puppet_classes(pkg_install, custom_ignore)
if target is None or target == "brew":
install_brew(flavor=flavor, automated=True)
if target is None or target == "libraries":
_do_library_installs(lib_install)
if target is None or target == "post_install":
env.flavor.post_install()
if "is_ec2_image" in env and env.is_ec2_image.upper() in ["TRUE", "YES"]:
_freenx_scripts(self.env)
if pkg_install is not None and 'cloudman' in pkg_install:
_configure_cloudman(self.env)
if target is None or target == "cleanup":
_cleanup_space(env)
if "is_ec2_image" in env and env.is_ec2_image.upper() in ["TRUE", "YES"]:
_cleanup_ec2(env)
def _print_time_stats(action, event, prev_time=None):
""" A convenience method for displaying time event during configuration.
:type action: string
:param action: Indicates type of action (eg, Config, Lib install, Pkg install)
:type event: string
:param event: The monitoring event (eg, start, stop)
:type prev_time: datetime
:param prev_time: A timeststamp of a previous event. If provided, duration between
the time the method is called and the time stamp is included in
the printout
:rtype: datetime
:return: A datetime timestamp of when the method was called
"""
time = datetime.utcnow()
s = "{0} {1} time: {2}".format(action, event, time)
if prev_time: s += "; duration: {0}".format(str(time-prev_time))
env.logger.info(s)
return time
def _check_fabric_version():
"""Checks for fabric version installed
"""
version = env.version
if int(version.split(".")[0]) < 1:
raise NotImplementedError("Please install fabric version 1 or higher")
def _custom_installs(to_install, ignore=None, add=None):
if not env.safe_exists(env.local_install) and env.local_install:
env.safe_run("mkdir -p %s" % env.local_install)
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
if add is not None:
for key, vals in add.iteritems():
for v in vals:
pkg_to_group[v] = key
packages.append(v)
for p in env.flavor.rewrite_config_items("custom", packages):
install_custom(p, True, pkg_to_group)
def _provision_chef_recipes(to_install, ignore=None):
"""
Much like _custom_installs, read config file, determine what to install,
and install it.
"""
pkg_config = get_config_file(env, "chef_recipes.yaml").base
packages, _ = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
recipes = [recipe for recipe in env.flavor.rewrite_config_items("chef_recipes", packages)]
if recipes: # Don't bother running chef if nothing to configure
install_chef_recipe(recipes, True)
def _provision_puppet_classes(to_install, ignore=None):
"""
Much like _custom_installs, read config file, determine what to install,
and install it.
"""
pkg_config = get_config_file(env, "puppet_classes.yaml").base
packages, _ = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
classes = [recipe for recipe in env.flavor.rewrite_config_items("puppet_classes", packages)]
if classes: # Don't bother running chef if nothing to configure
install_puppet_class(classes, True)
def install_chef_recipe(recipe, automated=False, flavor=None):
"""Install one or more chef recipes by name.
Usage: fab [-i key] [-u user] -H host install_chef_recipe:recipe
:type recipe: string or list
:param recipe: TODO
:type automated: bool
:param automated: If set to True, the environment is not loaded.
"""
_setup_logging(env)
if not automated:
_configure_fabric_environment(env, flavor)
time_start = _print_time_stats("Chef provision for recipe(s) '{0}'".format(recipe), "start")
_configure_chef(env, chef)
recipes = recipe if isinstance(recipe, list) else [recipe]
for recipe_to_add in recipes:
chef.add_recipe(recipe_to_add)
_chef_provision(env, recipes)
_print_time_stats("Chef provision for recipe(s) '%s'" % recipe, "end", time_start)
def install_puppet_class(classes, automated=False, flavor=None):
"""Install one or more puppet classes by name.
Usage: fab [-i key] [-u user] -H host install_puppet_class:class
:type classes: string or list
:param classes: TODO
:type automated: bool
:param automated: If set to True, the environment is not loaded.
"""
_setup_logging(env)
if not automated:
_configure_fabric_environment(env, flavor)
time_start = _print_time_stats("Puppet provision for class(es) '{0}'".format(classes), "start")
classes = classes if isinstance(classes, list) else [classes]
_puppet_provision(env, classes)
_print_time_stats("Puppet provision for classes(s) '%s'" % classes, "end", time_start)
def install_custom(p, automated=False, pkg_to_group=None, flavor=None):
"""
Install a single custom program or package by name.
This method fetches program name from ``config/custom.yaml`` and delegates
to a method in ``custom/*name*.py`` to proceed with the installation.
Alternatively, if a program install method is defined in the appropriate
package, it will be called directly (see param ``p``).
Usage: fab [-i key] [-u user] -H host install_custom:program_name
:type p: string
:param p: A name of the custom program to install. This has to be either a name
that is listed in ``custom.yaml`` as a subordinate to a group name or a
program name whose install method is defined in either ``cloudbio`` or
``custom`` packages
(e.g., ``cloudbio/custom/cloudman.py -> install_cloudman``).
:type automated: bool
:param automated: If set to True, the environment is not loaded and reading of
the ``custom.yaml`` is skipped.
"""
p = p.lower() # All packages listed in custom.yaml are in lower case
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, None)
time_start = _print_time_stats("Custom install for '{0}'".format(p), "start")
fn = _custom_install_function(env, p, pkg_to_group)
fn(env)
## TODO: Replace the previous 4 lines with the following one, barring
## objections. Slightly different behavior because pkg_to_group will be
## loaded regardless of automated if it is None, but IMO this shouldn't
## matter because the following steps look like they would fail if
## automated is True and pkg_to_group is None.
# _install_custom(p, pkg_to_group)
_print_time_stats("Custom install for '%s'" % p, "end", time_start)
def _install_custom(p, pkg_to_group=None):
if pkg_to_group is None:
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, None)
fn = _custom_install_function(env, p, pkg_to_group)
fn(env)
def install_brew(p=None, version=None, flavor=None, automated=False):
"""Top level access to homebrew/linuxbrew packages.
p is a package name to install, or all configured packages if not specified.
"""
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
if p is not None:
if version:
p = "%s==%s" % (p, version)
brew.install_packages(env, packages=[p])
else:
pkg_install = _read_main_config()[0]
brew.install_packages(env, to_install=pkg_install)
def _custom_install_function(env, p, pkg_to_group):
"""
Find custom install function to execute based on package name to
pkg_to_group dict.
"""
try:
# Allow direct calling of a program install method, even if the program
# is not listed in the custom list (ie, not contained as a key value in
# pkg_to_group). For an example, see 'install_cloudman' or use p=cloudman.
mod_name = pkg_to_group[p] if p in pkg_to_group else p
env.logger.debug("Importing module cloudbio.custom.%s" % mod_name)
mod = __import__("cloudbio.custom.%s" % mod_name,
fromlist=["cloudbio", "custom"])
except ImportError:
raise ImportError("Need to write module cloudbio.custom.%s" %
pkg_to_group[p])
replace_chars = ["-"]
try:
for to_replace in replace_chars:
p = p.replace(to_replace, "_")
env.logger.debug("Looking for custom install function %s.install_%s"
% (mod.__name__, p))
fn = getattr(mod, "install_%s" % p)
except AttributeError:
raise ImportError("Need to write a install_%s function in custom.%s"
% (p, pkg_to_group[p]))
return fn
def _read_main_config():
"""Pull a list of groups to install based on our main configuration YAML.
Reads 'main.yaml' and returns packages and libraries
"""
yaml_file = get_config_file(env, "main.yaml").base
with open(yaml_file) as in_handle:
full_data = yaml.load(in_handle)
packages = full_data.get('packages', [])
packages = env.flavor.rewrite_config_items("main_packages", packages)
libraries = full_data.get('libraries', [])
custom_ignore = full_data.get('custom_ignore', [])
custom_add = full_data.get("custom_additional")
if packages is None: packages = []
if libraries is None: libraries = []
if custom_ignore is None: custom_ignore = []
env.logger.info("Meta-package information from {2}\n- Packages: {0}\n- Libraries: "
"{1}".format(",".join(packages), ",".join(libraries), yaml_file))
return packages, sorted(libraries), custom_ignore, custom_add
# ### Library specific installation code
def _python_library_installer(config):
"""Install python specific libraries using pip, conda and easy_install.
Handles using isolated anaconda environments.
"""
if shared._is_anaconda(env):
conda_bin = shared._conda_cmd(env)
for pname in env.flavor.rewrite_config_items("python", config.get("conda", [])):
env.safe_run("{0} install --yes {1}".format(conda_bin, pname))
cmd = env.safe_run
with settings(warn_only=True):
cmd("%s -U distribute" % os.path.join(os.path.dirname(conda_bin), "easy_install"))
else:
pip_bin = shared._pip_cmd(env)
ei_bin = pip_bin.replace("pip", "easy_install")
env.safe_sudo("%s -U pip" % ei_bin)
with settings(warn_only=True):
env.safe_sudo("%s -U distribute" % ei_bin)
cmd = env.safe_sudo
for pname in env.flavor.rewrite_config_items("python", config['pypi']):
cmd("{0} install --upgrade {1} --allow-unverified {1} --allow-external {1}".format(shared._pip_cmd(env), pname)) # fixes problem with packages not being in pypi
def _ruby_library_installer(config):
"""Install ruby specific gems.
"""
gem_ext = getattr(env, "ruby_version_ext", "")
def _cur_gems():
with settings(
hide('warnings', 'running', 'stdout', 'stderr')):
gem_info = env.safe_run_output("gem%s list --no-versions" % gem_ext)
return [l.rstrip("\r") for l in gem_info.split("\n") if l.rstrip("\r")]
installed = _cur_gems()
for gem in env.flavor.rewrite_config_items("ruby", config['gems']):
# update current gems only to check for new installs
if gem not in installed:
installed = _cur_gems()
if gem in installed:
env.safe_sudo("gem%s update %s" % (gem_ext, gem))
else:
env.safe_sudo("gem%s install %s" % (gem_ext, gem))
def _perl_library_installer(config):
"""Install perl libraries from CPAN with cpanminus.
"""
with shared._make_tmp_dir() as tmp_dir:
with cd(tmp_dir):
env.safe_run("wget --no-check-certificate -O cpanm "
"https://raw.github.com/miyagawa/cpanminus/master/cpanm")
env.safe_run("chmod a+rwx cpanm")
env.safe_sudo("mv cpanm %s/bin" % env.system_install)
sudo_str = "--sudo" if env.use_sudo else ""
for lib in env.flavor.rewrite_config_items("perl", config['cpan']):
# Need to hack stdin because of some problem with cpanminus script that
# causes fabric to hang
# http://agiletesting.blogspot.com/2010/03/getting-past-hung-remote-processes-in.html
env.safe_run("cpanm %s --skip-installed --notest %s < /dev/null" % (sudo_str, lib))
def _haskell_library_installer(config):
"""Install haskell libraries using cabal.
"""
run("cabal update")
for lib in config["cabal"]:
sudo_str = "--root-cmd=sudo" if env.use_sudo else ""
env.safe_run("cabal install %s --global %s" % (sudo_str, lib))
lib_installers = {
"r-libs" : libraries.r_library_installer,
"python-libs" : _python_library_installer,
"ruby-libs" : _ruby_library_installer,
"perl-libs" : _perl_library_installer,
"haskell-libs": _haskell_library_installer,
}
def install_libraries(language):
"""High level target to install libraries for a specific language.
"""
_setup_logging(env)
_check_fabric_version()
_configure_fabric_environment(env, ignore_distcheck=True)
_do_library_installs(["%s-libs" % language])
def _do_library_installs(to_install):
for iname in to_install:
yaml_file = get_config_file(env, "%s.yaml" % iname).base
with open(yaml_file) as in_handle:
config = yaml.load(in_handle)
lib_installers[iname](config)
| mit |
Keleir/django-basic-apps | basic/invitations/models.py | 10 | 2238 | import random
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.hashcompat import sha_constructor
INVITATION_ALLOTMENT = getattr(settings, 'INVITATION_ALLOTMENT', 5)
INVITATION_STATUS_SENT = 0
INVITATION_STATUS_ACCEPTED = 1
INVITATION_STATUS_DECLINED = 2
INVITATION_STATUS_CHOICES = (
(INVITATION_STATUS_SENT, 'Sent'),
(INVITATION_STATUS_DECLINED, 'Accepted'),
(INVITATION_STATUS_DECLINED, 'Declined'),
)
class InvitationManager(models.Manager):
def get_invitation(self, token):
try:
return self.get(token=token, status=INVITATION_STATUS_SENT)
except self.model.DoesNotExist:
return False
def create_token(self, email):
salt = sha_constructor(str(random.random())).hexdigest()[:5]
token = sha_constructor(salt+email).hexdigest()
return token
class Invitation(models.Model):
""" Invitation model """
from_user = models.ForeignKey(User)
token = models.CharField(max_length=40)
name = models.CharField(blank=True, max_length=100)
email = models.EmailField()
message = models.TextField(blank=True)
status = models.PositiveSmallIntegerField(choices=INVITATION_STATUS_CHOICES, default=0)
site = models.ForeignKey(Site, default=settings.SITE_ID)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = InvitationManager()
def __unicode__(self):
return '<Invite>'
@models.permalink
def get_absolute_url(self):
return ('invitations:invitation', [self.token])
class InvitationAllotment(models.Model):
""" InvitationAllotment model """
user = models.OneToOneField(User, related_name='invitation_allotment')
amount = models.IntegerField(default=INVITATION_ALLOTMENT)
site = models.ForeignKey(Site, default=settings.SITE_ID)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return '<Invitation Allotment>'
def decrement(self, amount=1):
self.amount = self.amount - amount
self.save()
| bsd-3-clause |
steventimberman/masterDebater | env/lib/python2.7/site-packages/django/http/request.py | 44 | 21383 | from __future__ import unicode_literals
import copy
import re
import sys
from io import BytesIO
from itertools import chain
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import ImmutableList, MultiValueDict
from django.utils.encoding import (
escape_uri_path, force_bytes, force_str, force_text, iri_to_uri,
)
from django.utils.http import is_same_domain, limited_parse_qsl
from django.utils.six.moves.urllib.parse import (
quote, urlencode, urljoin, urlsplit,
)
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return force_str('<%s>' % self.__class__.__name__)
return force_str(
'<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path()))
)
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(self.path),
'/' if force_append_slash and not self.path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Returns 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header) == value:
return 'https'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, 'GET'):
del self.GET
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
'encoding': encoding,
}
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
else:
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value='', mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls('', mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend(encode(k, force_bytes(v, self.encoding))
for v in list_)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
domain, port = bits if len(bits) == 2 else (bits[0], '')
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith('.') else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
for pattern in allowed_hosts:
if pattern == '*' or is_same_domain(host, pattern):
return True
return False
| mit |
ChronoMonochrome/android_external_chromium_org | tools/telemetry/telemetry/page/actions/page_action.py | 23 | 2747 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class PageActionNotSupported(Exception):
pass
class PageActionFailed(Exception):
pass
class PageAction(object):
"""Represents an action that a user might try to perform to a page."""
_next_timeline_marker_id = 0
def __init__(self, attributes=None):
if attributes:
for k, v in attributes.iteritems():
setattr(self, k, v)
self._timeline_marker_base_name = None
self._timeline_marker_id = None
def CustomizeBrowserOptions(self, options):
"""Override to add action-specific options to the BrowserOptions
object."""
pass
def WillRunAction(self, page, tab):
"""Override to do action-specific setup before
Test.WillRunAction is called."""
pass
def RunAction(self, page, tab, previous_action):
raise NotImplementedError()
def RunsPreviousAction(self):
"""Some actions require some initialization to be performed before the
previous action. For example, wait for href change needs to record the old
href before the previous action changes it. Therefore, we allow actions to
run the previous action. An action that does this should override this to
return True in order to prevent the previous action from being run twice."""
return False
def CleanUp(self, page, tab):
pass
def CanBeBound(self):
"""If this class implements BindMeasurementJavaScript, override CanBeBound
to return True so that a test knows it can bind measurements."""
return False
def BindMeasurementJavaScript(
self, tab, start_js, stop_js): # pylint: disable=W0613
"""Let this action determine when measurements should start and stop.
A measurement can call this method to provide the action
with JavaScript code that starts and stops measurements. The action
determines when to execute the provided JavaScript code, for more accurate
timings.
Args:
tab: The tab to do everything on.
start_js: JavaScript code that starts measurements.
stop_js: JavaScript code that stops measurements.
"""
raise Exception('This action cannot be bound.')
@staticmethod
def ResetNextTimelineMarkerId():
PageAction._next_timeline_marker_id = 0
def _SetTimelineMarkerBaseName(self, name):
self._timeline_marker_base_name = name
self._timeline_marker_id = PageAction._next_timeline_marker_id
PageAction._next_timeline_marker_id += 1
def GetTimelineMarkerName(self):
if self._timeline_marker_base_name:
return \
'%s_%d' % (self._timeline_marker_base_name, self._timeline_marker_id)
else:
return None
| bsd-3-clause |
gsteenss/Hacking-Team-Sweeper | simplescripts/sweeplinux.py | 1 | 1105 | #!/usr/bin/python2.7
# sweeplinux v0.1: a simple script to look for signs of HackingTeam RCS Linux agent
# gsteenss@riseup.net
#
# based on: https://github.com/0xPoly/Hacking-Team-Sweeper/blob/master/signatures/linux.md
import glob
import sys
from platform import platform,architecture
from os.path import expanduser
whoopsie=expanduser('~/.whoopsie*')
crashreports='/var/crash/.reports-*-*'
tmpreports='/var/tmp/.reports-*-*'
#print(sys.version,platform(),architecture())
ok=True
if glob.glob(whoopsie)!=[]:
print('WARNING: Detected HT whoopsie file in home directory, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if glob.glob(crashreports)!=[]:
print('WARNING: Detected HT crash reports, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if glob.glob(tmpreports)!=[]:
print('WARNING: Detected HT tmp reports, Your computer may be infected with a version of HackingTeam RCS Agent!')
ok=False
if ok:
print('OK: Nothing strange to report.')
else:
print('Please shutdown your network connection NOW!')
| agpl-3.0 |
mark-me/Pi-Jukebox | venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/chardet/compat.py | 270 | 1134 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Dan Blanchard
# Ian Cordasco
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
PY2 = True
PY3 = False
base_str = (str, unicode)
text_type = unicode
else:
PY2 = False
PY3 = True
base_str = (bytes, str)
text_type = str
| agpl-3.0 |
13anjou/Research-Internship | Analysis/Dictionnary.py | 1 | 23710 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import csv
from pylab import*
import numpy
import math
import matplotlib.pyplot as plt
import numbers
from allTest import k, s
def main() :
dicoEchant = corres()
dicoTotal = dict()
#On va importer toutes les donnees :
#0 echantillon
#1 u
#2 v
#3 Okubo Weiss
#4 Lyapunov exp
#5 SST_adv
#6 SST_AMSRE
#7 grad_SST_adv
#8 age_from_bathy
#9 lon_from_bathy
#10 Shannon_Darwin_mean_all
#11 Shannon_Darwin_month_all
#12 Shannon_Darwin_mean_grp
#13 Shannon_Darwin_month_grp
#14 Shannon_Darwin_physat_month
#15 Shannon_Darwin_physat_mean
#16 Shannon_Darwin_retention
#17 lon_origin_45d
#18 lat_origin_45d
#19 physat_type
#20 abundance
#21 richness
#22 Shannon
#23 Simpson
#24 log(alpha)
#25 Jevenness
#26 S.obs especes observees
#27 S.chao1 indice de chao observe
#28 se.chao1 standard error estimee sur l'indice de chao
#29 S.ACE abundance based coverage estimation
#30 se.ACE standard error on the abundance based coverage estimation
#31 distance à la powerlaw
#32 nb de pentes
#33 pente 1 (derniere pente, la queue)
#34 pente 2 (avant derniere pente)
#35 longueur de la pente 1 (derniere pente, la queue)
#36 longueur de la pente 2 (avant derniere pente)
#37 poids de la pente 1
#38 poids de la pente 2
#On commence par importer Tara_stations_multisat_extract.csv
premier = True
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\Tara_stations_multisat_extract.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
if premier :
premier = False
l=len(row)
liste_param = ['echantillon']+row[1:l]
elif row ==[] :
pass
else :
dicoTotal[int(float(row[0])),5] = ['nan']+row[1:l]
dicoTotal[int(float(row[0])),20] = ['nan']+row[1:l]
dicoTotal[int(float(row[0])),180] = ['nan']+row[1:l]
dicoTotal[int(float(row[0])),2000] = ['nan']+row[1:l]
dicoTotal[11,5] = ['nan']*l
dicoTotal[11,20] = ['nan']*l
dicoTotal[11,180] = ['nan']*l
dicoTotal[11,2000] = ['nan']*l
#Puis on import les echantillons mais on les ajoute en premier
#Attention au fait qu'il y a les SUR et les DCM !
for clef in dicoEchant :
if dicoEchant[clef][1] == 'SUR' :
try :
dicoTotal[dicoEchant[clef][0],dicoEchant[clef][2]][0]=clef
except :
pass
#Puis on importe les ribotypes
premier=True
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\diversityProtistRibotypesORGNL', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
l= len(row)
if premier :
premier =False
else :
try :
a=dicoEchant[row[0]]
if a[1] == 'SUR' :
dicoTotal[a[0],a[2]] = dicoTotal[a[0],a[2]] + row[1:l]
except :
pass
#A ce moment, chaque ligne du dictionnaire devrait contenir 26 elements
#On s'en assure pour ne pas avoir de decalage.
l1 = 26
for clef in dicoTotal :
l2 = len(dicoTotal[clef])
if l2 < 26 :
dicoTotal[clef] = dicoTotal[clef] + ['nan']*(26-l2)
elif l2>26 :
print("entree {} trop longue !".format(clef[0]))
pass
else :
pass
#Puis on import les donnees de richness
premier = True
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\Richness_NonParametrics', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
if premier :
premier = False
else :
try :
row2 = decoupage(row) #On separe les elements de la ligne au niveau des espaces
l=len(row2)
if dicoEchant[row2[0]][1] == 'SUR' :
a=dicoEchant[row2[0]]
dicoTotal[a[0],a[2]] = dicoTotal[a[0],a[2]] + row2[1:l]
except :
pass
#A ce moment, chaque ligne du dictionnaire devrait contenir 30 elements
#On s'en assure pour ne pas avoir de decalage.
l1 = 31
for clef in dicoTotal :
l2 = len(dicoTotal[clef])
if l2 < 31 :
#print("perte de donnees sur l'echantillon' {}".format(clef[0]))
dicoTotal[clef] = dicoTotal[clef] + ['nan']*(31-l2)
elif l2>31 :
print("entree {} trop longue !".format(clef[0]))
pass
#On importe les distance à la loi de puissance
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Resultats\\log_log\\10_{}\\toutes\\ecartPowerLaw_{}.csv'.format(k,k), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';',quotechar=',', quoting=csv.QUOTE_MINIMAL)
for row in reader :
for clef in dicoTotal :
if float(enlever(row[0])) == clef[0] :
try :
dicoTotal[clef][31]=float(enlever(row[1]))
except :
dicoTotal[clef]=dicoTotal[clef]+[float(enlever(row[1]))]
elif len(dicoTotal[clef])==31 :
dicoTotal[clef] = dicoTotal[clef] + [-100]
#Il ne reste qu'a importer les pentes
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Resultats\\log_log\\10_{}\\toutes\\pentes_{}.csv'.format(k,k), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
pStations = float(remove(row[0],','))
pPentes1 = float(remove(row[2],','))
pNb = int(float(remove(row[3],',')))
diam = int(float(remove(row[4],',')))
if int(float(remove(row[3],','))) == 1 :
pPentes2 = 'Null'
pLong2 = 'Null'
pLong1 = float(remove(row[5],','))
poids1 = float(remove(row[6],','))
poids2 = 'Null'
else :
pLong2 = float(remove(row[5],','))
pLong1 = float(remove(row[6],','))
pPentes2 = float(remove(row[1],','))
poids2 = float(remove(row[7],','))
poids1 = float(remove(row[8],','))
try :
dicoTotal[pStations,diam] = dicoTotal[pStations,diam] + [pNb,pPentes1,pPentes2,pLong1,pLong2,poids1,poids2]
except :
pass
#On va enfin transformer tous les elements en nombres, quand c'est possible
indice = 0
for clef in dicoTotal :
indice = 0
for i in dicoTotal[clef] :
try :
dicoTotal[clef][indice] = float(dicoTotal[clef][indice])
except :
(a,b) = tentative(dicoTotal[clef][indice])
if a :
dicoTotal[clef][indice] = b
pass
indice = indice + 1
return(dicoTotal)
def corres() :
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\photic_samples.csv', 'r') as csvfile:
echantillons = dict()
reader = csv.reader(csvfile, delimiter=';', quotechar='|') #Ici on utilise un CSV avec des , comme separateurs
for row in reader:
a= row[0]
b=row[2] #Station
c= row[1] #Sur ou dcm
if row[3]=='180-2000' :
d=2000
if row[3]=='0.8-5' :
d=5
if row[3]=='20-180' :
d=180
if row[3]=='5a20' :
d=20
echantillons[a] = (int(float(b)),c,d)
return(echantillons)
def decoupage(row) :
res=list()
a = list()
for lettre in row[0] :
if lettre == ' ' :
res = res + [a]
a=''
else :
if str(a)=='[]T' :
a = 'T'
a = str(a) + str(lettre)
res = res + [a]
return(res)
def tracer(a, b) : #Quand on veut juste placer pour chaque stattion un parametre en fonction d'un autre.
#Ne fonctionne pas avec les pentes !
liste = selection(a,b)
abcisse = list()
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
noma = noms[a]
nomb = noms[b]
bool5 = True
bool20 = True
bool180 = True
bool2000 = True
for i in liste :
ajout = True
for dot in abcisse :
if dot == i[2] :
ajout = False
if str(i[2]) == 'nan' :
ajout = False
if ajout :
abcisse = abcisse + [i[2]]
prec = i[2]
abcisse.sort()
l = len(abcisse)
plt.figure(figsize=(30,20))
xticks(arange(l),abcisse,rotation=90,fontsize=40)
indice = 0
plt.figure(figsize=(30,20))
indice = 0
for dot in abcisse :
for i in liste :
if i[2]==dot :
if i[1] == 5 :
c = 'red'
if bool5 :
bool5 = False
scatter(i[2],i[3],color=c,s=120,label='taille 0.8 a 5, queue')
annotate(i[0],(i[2],i[3]))
print(test)
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
if i[1] == 20 :
c = 'magenta'
if bool20 :
bool20 = False
scatter(i[2],i[3],color=c,s=120,label='taille 5 a 20, queue')
annotate(i[0],(i[2],i[3]))
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
if i[1] == 180 :
c = 'green'
if bool180 :
bool180 = False
scatter(i[2],i[3],color=c,s=120,label='taille 20 a 180, queue')
annotate(i[0],(i[2],i[3]))
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
if i[1] == 2000 :
c = 'blue'
if bool5 :
bool5 = False
scatter(i[2],i[3],color=c,s=120,label='taille 180 a 2000, queue')
annotate(i[0],(i[2],i[3]))
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
indice = indice + 1
plt.title("trace de {} en fonction de {}".format(nomb, noma),fontsize=40)
plt.legend()
yticks(fontsize=40)
xticks(fontsize=40)
plt.xlabel("{}".format(noma), fontsize=40)
plt.ylabel("{}".format(nomb), fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\{}F{}.png'.format(nomb,noma))
def selection(a, b) :
dicoTotal = main()
res = list()
for clef in dicoTotal :
if dicoTotal[clef][a] != -1 and dicoTotal[clef][b] != -1 and str(dicoTotal[clef][a]) != 'nan' and str(dicoTotal[clef][a]) != 'nan' and dicoTotal[clef][a] != -100 and dicoTotal[clef][b] != -100:
res = res + [(clef[0], clef[1], dicoTotal[clef][a], dicoTotal[clef][b])]
return(res)
def remove(l,item) :
res = str()
for data in l :
if item == data :
pass
else :
res = res + data
return(res)
def tentative(s) :
ok = False
res = str()
expo = str()
for lettre in s :
if lettre == 'e' :
ok = True
elif ok :
expo = expo + str(lettre)
else :
res = res + str(lettre)
if ok :
res = float(res)
expo = float(expo)
res = res**expo
print('tentative')
return(ok,res)
def serie(k) :
for i in [3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18] :
tracer(i,k)
def pente(a) :
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
nom = noms[a]
dicoTotal = main()
liste1 = list()
liste2 = list()
for clef in dicoTotal :
try :
if dicoTotal[clef][32] == 1 :
liste1 = liste1 + [clef]
elif dicoTotal[clef][32] ==2 :
liste2 = liste2 + [clef]
except :
pass
plt.figure(figsize=(30,20))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste1 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33],))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste2 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
plt.legend()
plt.title('pentes en fonction de {}'.format(nom),fontsize=40)
plt.xlabel("{}".format(nom), fontsize=40)
plt.ylabel("slope", fontsize=40)
yticks(fontsize=40)
xticks(fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\pentesF{}.png'.format(nom))
def penteSans(a) :
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
nom = noms[a]
dicoTotal = effacer(s)
liste1 = list()
liste2 = list()
for clef in dicoTotal :
try :
if dicoTotal[clef][32] == 1 :
liste1 = liste1 + [clef]
elif dicoTotal[clef][32] ==2 :
liste2 = liste2 + [clef]
except :
pass
plt.figure(figsize=(30,20))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste1 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33],))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste2 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
plt.legend()
plt.title('pentes en fonction de {}'.format(nom),fontsize=40)
plt.xlabel("{}".format(nom), fontsize=40)
plt.ylabel("slope", fontsize=40)
yticks(fontsize=40)
xticks(fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\pentesF{}.png'.format(nom))
def pentesAvec() :
indice = 3
while indice < 32 :
pente(indice)
indice = indice + 1
def pentesSans() :
indice = 3
while indice < 32 :
penteSans(indice)
indice = indice + 1
def effacer(s) :
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
dicoTotal = main()
for clef in dicoTotal :
try :
if dicoTotal[clef][32] == 1:
if dicoTotal[clef][37] > s :
dicoTotal[clef][32]=0
elif dicoTotal[clef][32] == 2 :
if dicoTotal[clef][37] > s :
if dicoTotal[clef][38] > s :
dicoTotal[clef][32] = 0
else :
dicoTotal[clef][32] = 1
dicoTotal[clef][33] = dicoTotal[clef][34]
dicoTotal[clef][37]=dicoTotal[clef][38]
dicoTotal[clef][35]=dicoTotal[clef][36]
if dicoTotal[clef][38] >s and dicoTotal[clef][37] <s :
dicoTotal[clef][32] = 1
except :
pass
return(dicoTotal)
def enlever(s) :
res = str()
for l in s :
if l ==' ' :
pass
else :
res=res + l
return(res)
if __name__ == "__main__" :
pentes()
| gpl-2.0 |
yfried/ansible | test/units/modules/network/dellos9/test_dellos9_command.py | 68 | 4281 | # (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.dellos9 import dellos9_command
from units.modules.utils import set_module_args
from .dellos9_module import TestDellos9Module, load_fixture
class TestDellos9CommandModule(TestDellos9Module):
module = dellos9_command
def setUp(self):
super(TestDellos9CommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.dellos9.dellos9_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestDellos9CommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_dellos9_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Dell Real Time'))
def test_dellos9_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Dell Real Time'))
def test_dellos9_command_wait_for(self):
wait_for = 'result[0] contains "Dell Real"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_dellos9_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_dellos9_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_dellos9_command_match_any(self):
wait_for = ['result[0] contains "Dell Real"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_dellos9_command_match_all(self):
wait_for = ['result[0] contains "Dell Real"',
'result[0] contains "Operating System"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_dellos9_command_match_all_failure(self):
wait_for = ['result[0] contains "Dell Real"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
redhat-openstack/nova | nova/ipv6/rfc2462.py | 97 | 1712 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RFC2462 style IPv6 address generation."""
import netaddr
from nova.i18n import _
def to_global(prefix, mac, project_id):
try:
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') |
maskIP).format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
mask2 = netaddr.IPAddress('::0200:0:0:0')
mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
| apache-2.0 |
i5o/openshot-sugar | openshot/openshot/windows/TreeBlender.py | 3 | 6181 | # OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os
import gtk, gobject, pango, mlt
from classes import project, effect
# init the foreign language
from language import Language_Init
class OpenShotTree:
def __init__(self, treeview, project):
# Add language support
_ = Language_Init.Translator(project).lang.gettext
self._ = _
# init vars
self.treeview = treeview
self.project = project
# create a TreeStore
self.store = gtk.TreeStore(gtk.gdk.Pixbuf, str, str, str)
# Set the treeview's data model
self.treeview.set_model(self.store)
self.treeviewAddGeneralPixbufColumn(self.treeview, _("Thumb"), 0, resizable=False, reorderable=False, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, _("Name"), 1, resizable=False, reorderable=True, editable=False, visible=True, elipses=False, autosize=True, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, "blend_file", 2, resizable=True, reorderable=True, editable=False, visible=False, project=self.project)
self.treeviewAddGeneralTextColumn(self.treeview, "unique_id", 3, resizable=True, reorderable=True, editable=False, visible=False, project=self.project)
# populate tree
self.populate_tree()
# connect signals
#self.treeview.connect_after('drag_begin', self.on_treeEffects_drag_begin)
def populate_tree(self, clip_effects=None):
# get correct gettext method
_ = self._
# clear the tree data
self.store.clear()
# Init List of Blender Files
BLENDER_DIR = self.project.BLENDER_DIR
my_effects = []
unique_ids = []
my_effects = self.project.form.blender_list
# Add effects to dropdown
counter = 0
for my_effect in my_effects:
# get image for filter
file_path = os.path.join(BLENDER_DIR, "icons", my_effect.icon)
# get the pixbuf
pbThumb = gtk.gdk.pixbuf_new_from_file(file_path)
# resize thumbnail
pbThumb = pbThumb.scale_simple(80, 62, gtk.gdk.INTERP_BILINEAR)
# perminately save resized icon
# pbThumb.save(os.path.join("/home/jonathan/Desktop/thumbnails", my_effect.icon), "png", {})
# add transition to tree
item = self.store.append(None)
self.store.set_value(item, 0, pbThumb)
self.store.set_value(item, 1, _(my_effect.title))
if my_effect.audio_effect:
self.store.set_value(item, 2, "%s:%s" % (my_effect.service, my_effect.audio_effect))
else:
self.store.set_value(item, 2, my_effect.service)
if clip_effects:
self.store.set_value(item, 3, unique_ids[counter])
else:
self.store.set_value(item, 3, None)
counter += 1
def get_real_effect(self, service=None, title=None):
""" Get the actual effect object from the service name """
# get correct gettext method
_ = self._
# loop through the effects
for my_effect in self.project.form.blender_list:
if service:
# find matching effect
if my_effect.service == service or my_effect.service + ":" + my_effect.audio_effect == service:
return my_effect
if title:
# find matching effect
if _(my_effect.title) == _(title):
return my_effect
# no match found
return None
# def on_treeEffects_drag_begin(self, widget, *args):
# context = args[0]
#
# # update drag type
# self.project.form.drag_type = "effect"
#
# # Get the drag icon
# play_image = gtk.image_new_from_file(os.path.join(self.project.THEMES_DIR, self.project.theme, "icons", "plus.png"))
# pixbuf = play_image.get_pixbuf()
# context.set_icon_pixbuf(pixbuf, 15, 10)
def treeviewAddGeneralTextColumn(self, treeview, name, pos = 0, resizable=True, reorderable=False, editable=False, visible=True, elipses=False, autosize=False, project=None):
'''Add a new text column to the model'''
cell = gtk.CellRendererText()
cell.set_property('editable', editable)
if (elipses):
cell.set_property("ellipsize", pango.ELLIPSIZE_END)
col = gtk.TreeViewColumn(name, cell, markup = pos)
if (autosize):
col.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
col.set_expand(False)
col.set_resizable(resizable)
col.set_reorderable(reorderable)
col.set_property("visible", visible)
treeview.append_column(col)
treeview.set_headers_clickable(True)
if (editable):
model = treeview.get_model()
cell.connect('edited', self.cell_edited,model, project)
if (reorderable):
col.set_sort_column_id(pos)
return cell, col
def treeviewAddGeneralPixbufColumn(self, treeview, name, pos = 0, resizable=True, reorderable=False, project=None):
'''Add a new gtk.gdk.Pixbuf column to the model'''
cell = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn(name, cell, pixbuf = pos)
col.set_resizable(resizable)
col.set_reorderable(reorderable)
col.set_alignment(0.0)
treeview.append_column(col)
treeview.set_headers_clickable(True)
if (reorderable):
col.set_sort_column_id(pos)
return cell, col
def cell_edited(self, cell, row, new_text, model, project=None):
##Fired when the editable label cell is edited
#get the row that was edited
#iter = model.get_iter_from_string(row)
#column = cell.get_data(_("Label"))
#set the edit in the model
#model.set(iter,3,new_text)
#update the file object with the label edit
#filename = model.get_value(iter, 1)
#project.project_folder.UpdateFileLabel(filename, new_text, 0)
pass
| gpl-3.0 |
yanboliang/spark | examples/src/main/python/als.py | 121 | 3267 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is an example implementation of ALS for learning how to use Spark. Please refer to
pyspark.ml.recommendation.ALS for more conventional use.
This example requires numpy (http://www.numpy.org/)
"""
from __future__ import print_function
import sys
import numpy as np
from numpy.random import rand
from numpy import matrix
from pyspark.sql import SparkSession
LAMBDA = 0.01 # regularization
np.random.seed(42)
def rmse(R, ms, us):
diff = R - ms * us.T
return np.sqrt(np.sum(np.power(diff, 2)) / (M * U))
def update(i, mat, ratings):
uu = mat.shape[0]
ff = mat.shape[1]
XtX = mat.T * mat
Xty = mat.T * ratings[i, :].T
for j in range(ff):
XtX[j, j] += LAMBDA * uu
return np.linalg.solve(XtX, Xty)
if __name__ == "__main__":
"""
Usage: als [M] [U] [F] [iterations] [partitions]"
"""
print("""WARN: This is a naive implementation of ALS and is given as an
example. Please use pyspark.ml.recommendation.ALS for more
conventional use.""", file=sys.stderr)
spark = SparkSession\
.builder\
.appName("PythonALS")\
.getOrCreate()
sc = spark.sparkContext
M = int(sys.argv[1]) if len(sys.argv) > 1 else 100
U = int(sys.argv[2]) if len(sys.argv) > 2 else 500
F = int(sys.argv[3]) if len(sys.argv) > 3 else 10
ITERATIONS = int(sys.argv[4]) if len(sys.argv) > 4 else 5
partitions = int(sys.argv[5]) if len(sys.argv) > 5 else 2
print("Running ALS with M=%d, U=%d, F=%d, iters=%d, partitions=%d\n" %
(M, U, F, ITERATIONS, partitions))
R = matrix(rand(M, F)) * matrix(rand(U, F).T)
ms = matrix(rand(M, F))
us = matrix(rand(U, F))
Rb = sc.broadcast(R)
msb = sc.broadcast(ms)
usb = sc.broadcast(us)
for i in range(ITERATIONS):
ms = sc.parallelize(range(M), partitions) \
.map(lambda x: update(x, usb.value, Rb.value)) \
.collect()
# collect() returns a list, so array ends up being
# a 3-d array, we take the first 2 dims for the matrix
ms = matrix(np.array(ms)[:, :, 0])
msb = sc.broadcast(ms)
us = sc.parallelize(range(U), partitions) \
.map(lambda x: update(x, msb.value, Rb.value.T)) \
.collect()
us = matrix(np.array(us)[:, :, 0])
usb = sc.broadcast(us)
error = rmse(R, ms, us)
print("Iteration %d:" % i)
print("\nRMSE: %5.4f\n" % error)
spark.stop()
| apache-2.0 |
wfxiang08/django185 | tests/responses/tests.py | 23 | 3713 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.test import SimpleTestCase
UTF8 = 'utf-8'
ISO88591 = 'iso-8859-1'
class HttpResponseBaseTests(SimpleTestCase):
def test_closed(self):
r = HttpResponseBase()
self.assertIs(r.closed, False)
r.close()
self.assertIs(r.closed, True)
def test_write(self):
r = HttpResponseBase()
self.assertIs(r.writable(), False)
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.write('asdf')
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.writelines(['asdf\n', 'qwer\n'])
def test_tell(self):
r = HttpResponseBase()
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance cannot tell its position'):
r.tell()
def test_setdefault(self):
"""
HttpResponseBase.setdefault() should not change an existing header
and should be case insensitive.
"""
r = HttpResponseBase()
r['Header'] = 'Value'
r.setdefault('header', 'changed')
self.assertEqual(r['header'], 'Value')
r.setdefault('x-header', 'DefaultValue')
self.assertEqual(r['X-Header'], 'DefaultValue')
class HttpResponseTests(SimpleTestCase):
def test_status_code(self):
resp = HttpResponse(status=418)
self.assertEqual(resp.status_code, 418)
self.assertEqual(resp.reason_phrase, "I'M A TEAPOT")
def test_reason_phrase(self):
reason = "I'm an anarchist coffee pot on crack."
resp = HttpResponse(status=814, reason=reason)
self.assertEqual(resp.status_code, 814)
self.assertEqual(resp.reason_phrase, reason)
def test_charset_detection(self):
""" HttpResponse should parse charset from content_type."""
response = HttpResponse('ok')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
self.assertEqual(response['Content-Type'], 'text/html; charset=%s' % ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(content_type='text/plain')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
def test_response_content_charset(self):
"""HttpResponse should encode based on charset."""
content = "Café :)"
utf8_content = content.encode(UTF8)
iso_content = content.encode(ISO88591)
response = HttpResponse(utf8_content)
self.assertContains(response, utf8_content)
response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content, content_type='text/plain')
self.assertContains(response, iso_content)
| bsd-3-clause |
kingvuplus/ts-gui-3 | lib/python/Components/Sources/FrontendStatus.py | 25 | 1592 | from Source import Source
from enigma import eTimer
class FrontendStatus(Source):
def __init__(self, service_source = None, frontend_source = None, update_interval = 1000):
Source.__init__(self)
self.update_interval = update_interval
self.service_source = service_source
self.frontend_source = frontend_source
self.invalidate()
self.poll_timer = eTimer()
self.poll_timer.callback.append(self.updateFrontendStatus)
self.poll_timer.start(update_interval, True)
def invalidate(self):
self.snr = self.agc = self.ber = self.lock = self.snr_db = None
def updateFrontendStatus(self):
status = self.getFrontendStatus()
if not status:
self.invalidate()
else:
self.snr = status.get("tuner_signal_quality")
self.snr_db = status.get("tuner_signal_quality_db")
self.agc = status.get("tuner_signal_power")
self.ber = status.get("tuner_bit_error_rate")
self.lock = status.get("tuner_locked")
self.changed((self.CHANGED_ALL, ))
self.poll_timer.start(self.update_interval, True)
def getFrontendStatus(self):
if self.frontend_source:
frontend = self.frontend_source()
dict = { }
if frontend:
frontend.getFrontendStatus(dict)
return dict
elif self.service_source:
service = self.service_source()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendStatus()
else:
return None
def doSuspend(self, suspended):
if suspended:
self.poll_timer.stop()
else:
self.updateFrontendStatus()
def destroy(self):
self.poll_timer.callback.remove(self.updateFrontendStatus)
Source.destroy(self)
| gpl-2.0 |
Clever/Diamond | src/collectors/dropwizard/dropwizard.py | 16 | 3333 | # coding=utf-8
"""
Collect [dropwizard](http://dropwizard.codahale.com/) stats for the local node
"""
import urllib2
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
class DropwizardCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(DropwizardCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DropwizardCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 8081,
'path': 'dropwizard',
})
return config
def collect(self):
if json is None:
self.log.error('Unable to import json')
return {}
url = 'http://%s:%i/metrics' % (
self.config['host'], int(self.config['port']))
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError, err:
self.log.error("%s: %s", url, err)
return
try:
result = json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as" +
" a json object")
return
metrics = {}
memory = result['jvm']['memory']
mempool = memory['memory_pool_usages']
jvm = result['jvm']
thread_st = jvm['thread-states']
metrics['jvm.memory.totalInit'] = memory['totalInit']
metrics['jvm.memory.totalUsed'] = memory['totalUsed']
metrics['jvm.memory.totalMax'] = memory['totalMax']
metrics['jvm.memory.totalCommitted'] = memory['totalCommitted']
metrics['jvm.memory.heapInit'] = memory['heapInit']
metrics['jvm.memory.heapUsed'] = memory['heapUsed']
metrics['jvm.memory.heapMax'] = memory['heapMax']
metrics['jvm.memory.heapCommitted'] = memory['heapCommitted']
metrics['jvm.memory.heap_usage'] = memory['heap_usage']
metrics['jvm.memory.non_heap_usage'] = memory['non_heap_usage']
metrics['jvm.memory.code_cache'] = mempool['Code Cache']
metrics['jvm.memory.eden_space'] = mempool['PS Eden Space']
metrics['jvm.memory.old_gen'] = mempool['PS Old Gen']
metrics['jvm.memory.perm_gen'] = mempool['PS Perm Gen']
metrics['jvm.memory.survivor_space'] = mempool['PS Survivor Space']
metrics['jvm.daemon_thread_count'] = jvm['daemon_thread_count']
metrics['jvm.thread_count'] = jvm['thread_count']
metrics['jvm.fd_usage'] = jvm['fd_usage']
metrics['jvm.thread_states.timed_waiting'] = thread_st['timed_waiting']
metrics['jvm.thread_states.runnable'] = thread_st['runnable']
metrics['jvm.thread_states.blocked'] = thread_st['blocked']
metrics['jvm.thread_states.waiting'] = thread_st['waiting']
metrics['jvm.thread_states.new'] = thread_st['new']
metrics['jvm.thread_states.terminated'] = thread_st['terminated']
for key in metrics:
self.publish(key, metrics[key])
| mit |
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/disks/flags.py | 6 | 2470 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the compute disks commands."""
import argparse
from googlecloudsdk.command_lib.compute import flags as compute_flags
_DETAILED_SOURCE_SNAPSHOT_HELP = """\
A source snapshot used to create the disks. It is safe to
delete a snapshot after a disk has been created from the
snapshot. In such cases, the disks will no longer reference
the deleted snapshot. To get a list of snapshots in your
current project, run `gcloud compute snapshots list`. A
snapshot from an existing disk can be created using the
`gcloud compute disks snapshot` command. This flag is mutually
exclusive with *--image*.
When using this option, the size of the disks must be at least
as large as the snapshot size. Use *--size* to adjust the
size of the disks.
"""
def MakeDiskArg(plural):
return compute_flags.ResourceArgument(
resource_name='disk',
completion_resource_id='compute.disks',
plural=plural,
name='DISK_NAME',
zonal_collection='compute.disks',
zone_explanation=compute_flags.ZONE_PROPERTY_EXPLANATION)
def MakeDiskArgZonalOrRegional(plural):
return compute_flags.ResourceArgument(
resource_name='disk',
completion_resource_id='compute.disks',
plural=plural,
name='DISK_NAME',
zonal_collection='compute.disks',
regional_collection='compute.regionDisks',
zone_explanation=compute_flags.ZONE_PROPERTY_EXPLANATION,
region_explanation=argparse.SUPPRESS)
SOURCE_SNAPSHOT_ARG = compute_flags.ResourceArgument(
resource_name='snapshot',
completion_resource_id='compute.snapshots',
name='--source-snapshot',
plural=False,
required=False,
global_collection='compute.snapshots',
short_help='A source snapshot used to create the disks.',
detailed_help=_DETAILED_SOURCE_SNAPSHOT_HELP,)
| mit |
gencer/sentry | src/sentry/middleware/locale.py | 1 | 2028 | """
sentry.middleware.locale
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import pytz
from django.conf import settings
from django.middleware.locale import LocaleMiddleware
from sentry.models import UserOption
from sentry.utils.safe import safe_execute
class SentryLocaleMiddleware(LocaleMiddleware):
def process_request(self, request):
# No locale for static media
# This avoids touching user session, which means we avoid
# setting `Vary: Cookie` as a response header which will
# break HTTP caching entirely.
self.__skip_caching = request.path_info.startswith(settings.ANONYMOUS_STATIC_PREFIXES)
if self.__skip_caching:
return
safe_execute(self.load_user_conf, request, _with_transaction=False)
super(SentryLocaleMiddleware, self).process_request(request)
def load_user_conf(self, request):
if not request.user.is_authenticated():
return
language = UserOption.objects.get_value(user=request.user, key='language')
if language:
request.session['django_language'] = language
timezone = UserOption.objects.get_value(user=request.user, key='timezone')
if timezone:
request.timezone = pytz.timezone(timezone)
def process_response(self, request, response):
# If static bound, we don't want to run the normal process_response since this
# adds an extra `Vary: Accept-Language`. Static files don't need this and is
# less effective for caching.
try:
if self.__skip_caching:
return response
except AttributeError:
# catch ourselves in case __skip_caching never got set.
# It's possible that process_request never ran.
pass
return super(SentryLocaleMiddleware, self).process_response(request, response)
| bsd-3-clause |
ychen820/microblog | src/lib/wtforms/fields/simple.py | 51 | 1464 | from .. import widgets
from .core import StringField, BooleanField
__all__ = (
'BooleanField', 'TextAreaField', 'PasswordField', 'FileField',
'HiddenField', 'SubmitField', 'TextField'
)
class TextField(StringField):
"""
Legacy alias for StringField
"""
class TextAreaField(TextField):
"""
This field represents an HTML ``<textarea>`` and can be used to take
multi-line input.
"""
widget = widgets.TextArea()
class PasswordField(TextField):
"""
A StringField, except renders an ``<input type="password">``.
Also, whatever value is accepted by this field
"""
widget = widgets.PasswordInput()
class FileField(TextField):
"""
Can render a file-upload field. Will take any passed filename value, if
any is sent by the browser in the post params. This field will NOT
actually handle the file upload portion, as wtforms does not deal with
individual frameworks' file handling capabilities.
"""
widget = widgets.FileInput()
class HiddenField(TextField):
"""
HiddenField is a convenience for a StringField with a HiddenInput widget.
It will render as an ``<input type="hidden">`` but otherwise coerce to a string.
"""
widget = widgets.HiddenInput()
class SubmitField(BooleanField):
"""
Represents an ``<input type="submit">``. This allows checking if a given
submit button has been pressed.
"""
widget = widgets.SubmitInput()
| bsd-3-clause |
40423117/2017springcd_hw | plugin/liquid_tags/giphy.py | 273 | 2433 | """
Giphy Tag
---------
This implements a Liquid-style Giphy tag for Pelican.
IMPORTANT: You have to request a production API key from giphy `here <https://api.giphy.com/submit>`.
For the first runs you could also use the public beta key you can get `here <https://github.com/giphy/GiphyAPI>`.
Syntax
------
{% giphy gif_id ["alt text"|'alt text'] %}
Example
-------
{% giphy aMSJFS6oFX0fC 'ive had some free time' %}
Output
------
<a href="http://giphy.com/gifs/veronica-mars-aMSJFS6oFX0fC"><img src="http://media4.giphy.com/media/aMSJFS6oFX0fC/giphy.gif" alt="ive had some free time"></a>
"""
import json
import re
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from .mdx_liquid_tags import LiquidTags
SYNTAX = '''{% giphy gif_id ["alt text"|'alt text'] %}'''
GIPHY = re.compile('''(?P<gif_id>[\S+]+)(?:\s+(['"]{0,1})(?P<alt>.+)(\\2))?''')
def get_gif(api_key, gif_id):
'''Returns dict with gif informations from the API.'''
url = 'http://api.giphy.com/v1/gifs/{}?api_key={}'.format(gif_id, api_key)
r = urlopen(url)
return json.loads(r.read().decode('utf-8'))
def create_html(api_key, attrs):
'''Returns complete html tag string.'''
gif = get_gif(api_key, attrs['gif_id'])
if 'alt' not in attrs.keys():
attrs['alt'] = 'source: {}'.format(gif['data']['source'])
html_out = '<a href="{}">'.format(gif['data']['url'])
html_out += '<img src="{}" alt="{}">'.format(
gif['data']['images']['original']['url'],
attrs['alt'])
html_out += '</a>'
return html_out
def main(api_key, markup):
'''Doing the regex parsing and running the create_html function.'''
match = GIPHY.search(markup)
attrs = None
if match:
attrs = dict(
[(key, value.strip())
for (key, value) in match.groupdict().items() if value])
else:
raise ValueError('Error processing input. '
'Expected syntax: {}'.format(SYNTAX))
return create_html(api_key, attrs)
@LiquidTags.register('giphy')
def giphy(preprocessor, tag, markup):
api_key = preprocessor.configs.getConfig('GIPHY_API_KEY')
if api_key is None:
raise ValueError('Please set GIPHY_API_KEY.')
return main(api_key, markup)
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/test/test_property.py | 109 | 7815 | # Test case for property
# more tests are in test_descr
import sys
import unittest
from test.support import run_unittest
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@PropertyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class PropertyTests(unittest.TestCase):
def test_property_decorator_baseclass(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.spam.__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_baseclass_doc(self):
base = BaseClass()
self.assertEqual(base.__class__.spam.__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.spam.__doc__, "spam spam spam")
self.assertEqual(sub.__class__.spam.__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_getter_doc_override(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.spam.__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.spam.__doc__, "new docstring")
def test_property___isabstractmethod__descriptor(self):
for val in (True, False, [], [1], '', '1'):
class C(object):
def foo(self):
pass
foo.__isabstractmethod__ = val
foo = property(foo)
self.assertIs(C.foo.__isabstractmethod__, bool(val))
# check that the property's __isabstractmethod__ descriptor does the
# right thing when presented with a value that fails truth testing:
class NotBool(object):
def __nonzero__(self):
raise ValueError()
__len__ = __nonzero__
with self.assertRaises(ValueError):
class C(object):
def foo(self):
pass
foo.__isabstractmethod__ = NotBool()
foo = property(foo)
C.foo.__isabstractmethod__
# Issue 5890: subclasses of property do not preserve method __doc__ strings
class PropertySub(property):
"""This is a subclass of property"""
class PropertySubSlots(property):
"""This is a subclass of property that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
except AttributeError:
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_docstring_copy(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
class FooSub(Foo):
@Foo.spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
class Foo2(FooBase):
@FooBase.spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
def test_main():
run_unittest(PropertyTests, PropertySubclassTests)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
OSSystems/jenkins-job-builder | tests/views/test_views.py | 9 | 1147 | # Copyright 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import os
import os
from jenkins_jobs.modules import view_list
from jenkins_jobs.modules import view_pipeline
from tests import base
class TestCaseModuleViewList(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = base.get_scenarios(fixtures_path)
klass = view_list.List
class TestCaseModuleViewPipeline(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = base.get_scenarios(fixtures_path)
klass = view_pipeline.Pipeline
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.