repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
MER-GROUP/intellij-community
python/helpers/py2only/docutils/languages/en.py
246
1848
# $Id: en.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ English-language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': 'Author', 'authors': 'Authors', 'organization': 'Organization', 'address': 'Address', 'contact': 'Contact', 'version': 'Version', 'revision': 'Revision', 'status': 'Status', 'date': 'Date', 'copyright': 'Copyright', 'dedication': 'Dedication', 'abstract': 'Abstract', 'attention': 'Attention!', 'caution': 'Caution!', 'danger': '!DANGER!', 'error': 'Error', 'hint': 'Hint', 'important': 'Important', 'note': 'Note', 'tip': 'Tip', 'warning': 'Warning', 'contents': 'Contents'} """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed 'author': 'author', 'authors': 'authors', 'organization': 'organization', 'address': 'address', 'contact': 'contact', 'version': 'version', 'revision': 'revision', 'status': 'status', 'date': 'date', 'copyright': 'copyright', 'dedication': 'dedication', 'abstract': 'abstract'} """English (lowcased) to canonical name mapping for bibliographic fields.""" author_separators = [';', ','] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
apache-2.0
yasserglez/tagfs
packages/tagfs/contrib/django/utils/datastructures.py
4
14127
class MergeDict(object): """ A simple class for creating new "virtual" dictionaries that actually look up values in more than one dictionary, passed in the constructor. If a key appears in more than one of the given dictionaries, only the first occurrence will be used. """ def __init__(self, *dicts): self.dicts = dicts def __getitem__(self, key): for dict_ in self.dicts: try: return dict_[key] except KeyError: pass raise KeyError def __copy__(self): return self.__class__(*self.dicts) def get(self, key, default=None): try: return self[key] except KeyError: return default def getlist(self, key): for dict_ in self.dicts: if key in dict_.keys(): return dict_.getlist(key) return [] def items(self): item_list = [] for dict_ in self.dicts: item_list.extend(dict_.items()) return item_list def has_key(self, key): for dict_ in self.dicts: if key in dict_: return True return False __contains__ = has_key def copy(self): """Returns a copy of this object.""" return self.__copy__() class SortedDict(dict): """ A dictionary that keeps its keys in the order in which they're inserted. """ def __new__(cls, *args, **kwargs): instance = super(SortedDict, cls).__new__(cls, *args, **kwargs) instance.keyOrder = [] return instance def __init__(self, data=None): if data is None: data = {} super(SortedDict, self).__init__(data) if isinstance(data, dict): self.keyOrder = data.keys() else: self.keyOrder = [] for key, value in data: if key not in self.keyOrder: self.keyOrder.append(key) def __deepcopy__(self, memo): from copy import deepcopy return self.__class__([(key, deepcopy(value, memo)) for key, value in self.iteritems()]) def __setitem__(self, key, value): super(SortedDict, self).__setitem__(key, value) if key not in self.keyOrder: self.keyOrder.append(key) def __delitem__(self, key): super(SortedDict, self).__delitem__(key) self.keyOrder.remove(key) def __iter__(self): for k in self.keyOrder: yield k def pop(self, k, *args): result = super(SortedDict, self).pop(k, *args) try: self.keyOrder.remove(k) except ValueError: # Key wasn't in the dictionary in the first place. No problem. pass return result def popitem(self): result = super(SortedDict, self).popitem() self.keyOrder.remove(result[0]) return result def items(self): return zip(self.keyOrder, self.values()) def iteritems(self): for key in self.keyOrder: yield key, super(SortedDict, self).__getitem__(key) def keys(self): return self.keyOrder[:] def iterkeys(self): return iter(self.keyOrder) def values(self): return map(super(SortedDict, self).__getitem__, self.keyOrder) def itervalues(self): for key in self.keyOrder: yield super(SortedDict, self).__getitem__(key) def update(self, dict_): for k, v in dict_.items(): self.__setitem__(k, v) def setdefault(self, key, default): if key not in self.keyOrder: self.keyOrder.append(key) return super(SortedDict, self).setdefault(key, default) def value_for_index(self, index): """Returns the value of the item at the given zero-based index.""" return self[self.keyOrder[index]] def insert(self, index, key, value): """Inserts the key, value pair before the item with the given index.""" if key in self.keyOrder: n = self.keyOrder.index(key) del self.keyOrder[n] if n < index: index -= 1 self.keyOrder.insert(index, key) super(SortedDict, self).__setitem__(key, value) def copy(self): """Returns a copy of this object.""" # This way of initializing the copy means it works for subclasses, too. obj = self.__class__(self) obj.keyOrder = self.keyOrder[:] return obj def __repr__(self): """ Replaces the normal dict.__repr__ with a version that returns the keys in their sorted order. """ return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()]) def clear(self): super(SortedDict, self).clear() self.keyOrder = [] class MultiValueDictKeyError(KeyError): pass class MultiValueDict(dict): """ A subclass of dictionary customized to handle multiple values for the same key. >>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']}) >>> d['name'] 'Simon' >>> d.getlist('name') ['Adrian', 'Simon'] >>> d.get('lastname', 'nonexistent') 'nonexistent' >>> d.setlist('lastname', ['Holovaty', 'Willison']) This class exists to solve the irritating problem raised by cgi.parse_qs, which returns a list for every key, even though most Web forms submit single name-value pairs. """ def __init__(self, key_to_list_mapping=()): super(MultiValueDict, self).__init__(key_to_list_mapping) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, super(MultiValueDict, self).__repr__()) def __getitem__(self, key): """ Returns the last data value for this key, or [] if it's an empty list; raises KeyError if not found. """ try: list_ = super(MultiValueDict, self).__getitem__(key) except KeyError: raise MultiValueDictKeyError, "Key %r not found in %r" % (key, self) try: return list_[-1] except IndexError: return [] def __setitem__(self, key, value): super(MultiValueDict, self).__setitem__(key, [value]) def __copy__(self): return self.__class__(super(MultiValueDict, self).items()) def __deepcopy__(self, memo=None): import copy if memo is None: memo = {} result = self.__class__() memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def __getstate__(self): obj_dict = self.__dict__.copy() obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self]) return obj_dict def __setstate__(self, obj_dict): data = obj_dict.pop('_data', {}) for k, v in data.items(): self.setlist(k, v) self.__dict__.update(obj_dict) def get(self, key, default=None): """ Returns the last data value for the passed key. If key doesn't exist or value is an empty list, then default is returned. """ try: val = self[key] except KeyError: return default if val == []: return default return val def getlist(self, key): """ Returns the list of values for the passed key. If key doesn't exist, then an empty list is returned. """ try: return super(MultiValueDict, self).__getitem__(key) except KeyError: return [] def setlist(self, key, list_): super(MultiValueDict, self).__setitem__(key, list_) def setdefault(self, key, default=None): if key not in self: self[key] = default return self[key] def setlistdefault(self, key, default_list=()): if key not in self: self.setlist(key, default_list) return self.getlist(key) def appendlist(self, key, value): """Appends an item to the internal list associated with key.""" self.setlistdefault(key, []) super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value]) def items(self): """ Returns a list of (key, value) pairs, where value is the last item in the list associated with the key. """ return [(key, self[key]) for key in self.keys()] def iteritems(self): """ Yields (key, value) pairs, where value is the last item in the list associated with the key. """ for key in self.keys(): yield (key, self[key]) def lists(self): """Returns a list of (key, list) pairs.""" return super(MultiValueDict, self).items() def iterlists(self): """Yields (key, list) pairs.""" return super(MultiValueDict, self).iteritems() def values(self): """Returns a list of the last value on every key list.""" return [self[key] for key in self.keys()] def itervalues(self): """Yield the last value on every key list.""" for key in self.iterkeys(): yield self[key] def copy(self): """Returns a copy of this object.""" return self.__deepcopy__() def update(self, *args, **kwargs): """ update() extends rather than replaces existing key lists. Also accepts keyword args. """ if len(args) > 1: raise TypeError, "update expected at most 1 arguments, got %d" % len(args) if args: other_dict = args[0] if isinstance(other_dict, MultiValueDict): for key, value_list in other_dict.lists(): self.setlistdefault(key, []).extend(value_list) else: try: for key, value in other_dict.items(): self.setlistdefault(key, []).append(value) except TypeError: raise ValueError, "MultiValueDict.update() takes either a MultiValueDict or dictionary" for key, value in kwargs.iteritems(): self.setlistdefault(key, []).append(value) class DotExpandedDict(dict): """ A special dictionary constructor that takes a dictionary in which the keys may contain dots to specify inner dictionaries. It's confusing, but this example should make sense. >>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \ 'person.1.lastname': ['Willison'], \ 'person.2.firstname': ['Adrian'], \ 'person.2.lastname': ['Holovaty']}) >>> d {'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}} >>> d['person'] {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}} >>> d['person']['1'] {'lastname': ['Willison'], 'firstname': ['Simon']} # Gotcha: Results are unpredictable if the dots are "uneven": >>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1}) {'c': 1} """ def __init__(self, key_to_list_mapping): for k, v in key_to_list_mapping.items(): current = self bits = k.split('.') for bit in bits[:-1]: current = current.setdefault(bit, {}) # Now assign value to current position try: current[bits[-1]] = v except TypeError: # Special-case if current isn't a dict. current = {bits[-1]: v} class ImmutableList(tuple): """ A tuple-like object that raises useful errors when it is asked to mutate. Example:: >>> a = ImmutableList(range(5), warning="You cannot mutate this.") >>> a[3] = '4' Traceback (most recent call last): ... AttributeError: You cannot mutate this. """ def __new__(cls, *args, **kwargs): if 'warning' in kwargs: warning = kwargs['warning'] del kwargs['warning'] else: warning = 'ImmutableList object is immutable.' self = tuple.__new__(cls, *args, **kwargs) self.warning = warning return self def complain(self, *wargs, **kwargs): if isinstance(self.warning, Exception): raise self.warning else: raise AttributeError, self.warning # All list mutation functions complain. __delitem__ = complain __delslice__ = complain __iadd__ = complain __imul__ = complain __setitem__ = complain __setslice__ = complain append = complain extend = complain insert = complain pop = complain remove = complain sort = complain reverse = complain class DictWrapper(dict): """ Wraps accesses to a dictionary so that certain values (those starting with the specified prefix) are passed through a function before being returned. The prefix is removed before looking up the real value. Used by the SQL construction code to ensure that values are correctly quoted before being used. """ def __init__(self, data, func, prefix): super(DictWrapper, self).__init__(data) self.func = func self.prefix = prefix def __getitem__(self, key): """ Retrieves the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value. """ if key.startswith(self.prefix): use_func = True key = key[len(self.prefix):] else: use_func = False value = super(DictWrapper, self).__getitem__(key) if use_func: return self.func(value) return value
mit
verse/verse-entities
vrsent/verse_node.py
1
21666
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### """ This module includes class VerseNode representing verse node """ import verse as vrs from . import verse_entity def find_node_subclass(cls, custom_type): """ This method tries to find subclass of class with specified custom_type in __subclasses__ """ sub_cls = cls for sub_cls_it in cls.__subclasses__(): # Try to get attribute custom_type sub_cls_custom_type = getattr(sub_cls_it, 'custom_type', None) # Raise error, when developer created subclass without custom_type if sub_cls_custom_type is None: raise AttributeError( 'Subclass of VerseNode: ' + str(sub_cls_it) + ' does not have attribute custom_type') elif sub_cls_custom_type == custom_type: # When subclass is found, then store it in dictionary of subclasses sub_cls = cls.subclasses[custom_type] = verse_entity.last_subclass(sub_cls_it) break return sub_cls def custom_type_subclass(custom_type): """ This method tries to return VerseNode subclass with specified custom type. Otherwise it returns VerseNode class. """ # Default class is VerseNode and it is returned, when there is not any # subclass with this custom_type try: sub_cls = VerseNode.subclasses[custom_type] except KeyError: sub_cls = find_node_subclass(VerseNode, custom_type) else: # Try to find last subclass with specified custom_type sub_cls = verse_entity.last_subclass(sub_cls) return sub_cls class VerseNode(verse_entity.VerseEntity): """ Class representing Verse node """ # The dictionary of subclasses. When subclass of VerseNode is created, # then this subclass has to have unique custom_type subclasses = {} # This is used in subclasses of VerseNode custom_type = None def __new__(cls, *args, **kwargs): """ Pre-constructor of VerseNode. It can return class defined by custom_type of received command or corresponding node. """ if len(cls.__subclasses__()) == 0: return super(VerseNode, cls).__new__(cls) else: try: custom_type = kwargs['custom_type'] except KeyError: # Return class of object, when VerseNode() was # called without custom_type if cls.__name__ == 'VerseNode': return super(VerseNode, cls).__new__(cls) else: # When subclass is called without custom_type, then # generate custom type from class name custom_type = verse_entity.name_to_custom_type(cls.__name__) # Add generated custom_type to keyed arguments kwargs['custom_type'] = custom_type # Assign generated cutom_type to class member cls.custom_type = custom_type try: sub_cls = cls.subclasses[custom_type] except KeyError: # When instance of this class has never been created, then try # to find corresponding subclass. sub_cls = find_node_subclass(cls, custom_type) return super(VerseNode, sub_cls).__new__(sub_cls) def __init__(self, session, node_id=None, parent=None, user_id=None, custom_type=None): """ Constructor of VerseNode """ # Check if this object is created with right custom_type if self.__class__.custom_type is not None and \ custom_type is not None: assert self.__class__.custom_type == custom_type super(VerseNode, self).__init__(custom_type=custom_type) self.session = session self.id = node_id # When parent node is set, then it has to be subclass of VerseNode if parent is not None: if issubclass(parent.__class__, VerseNode) is not True: raise TypeError("Node is not subclass of model.VerseNode") self._parent_node = parent self.user_id = user_id self.child_nodes = {} self.tag_groups = {} self.tg_queue = {} self.layers = {} self.layer_queue = {} self._prio = vrs.DEFAULT_PRIORITY self.perms = {} self._lock_state = 'UNLOCKED' self.locker_id = None # Change state and send commands self._create() # When node_id is not set, then: if node_id is None: # Try to find queue of custom_type of node or create new one try: node_queue = self.session.my_node_queues[custom_type] except KeyError: node_queue = [] self.session.my_node_queues[self.custom_type] = node_queue # Add this object to the queue node_queue.insert(0, self) else: self.session.nodes[node_id] = self if self._parent_node is not None: self._parent_node.child_nodes[node_id] = self def __str__(self): """ This method print content of VerseNode """ parent_id = str(self._parent_node.id) if self._parent_node is not None else 'None' return 'VerseNode, id: ' + \ str(self.id) + \ ', owner_id: ' + \ str(self.user_id) + \ ', parent_id: ' + \ parent_id + \ ', prio: ' + \ str(self._prio) + \ ', locked: ' + \ str(self.locked) + \ ', custom_type: ' + \ str(self.custom_type) def destroy(self, send_destroy_cmd=True): """ This method try to send destroy command """ # Change state and send commands self._destroy() def clean(self): """ This method try to destroy all data in this object """ # Delete all child nodes, but do not send destroy command # for these nodes for child_node in self.child_nodes.values(): child_node.parent = None child_node.clean() self.child_nodes.clear() # Remove reference on this node if self.id is not None: # Remove this node from dictionary of nodes self.session.nodes.pop(self.id) # Remove this node from dictionary of child nodes if self._parent_node is not None: try: self._parent_node.child_nodes.pop(self.id) except KeyError: pass self._parent_node = None # Clear tag groups self.tag_groups.clear() self.tg_queue.clear() # Clear layers self.layers.clear() self.layer_queue.clear() def _send_create(self): """ This method send node create command to Verse server """ if self.session.state == 'CONNECTED' and self.id is None: self.session.send_node_create(self._prio, self.custom_type) def _send_destroy(self): """ This method send destroy command to Verse server """ if self.session.state == 'CONNECTED' and self.id is not None: self.session.send_node_destroy(self._prio, self.id) def subscribe(self): """ This method tries to send node_subscribe command to Verse server """ if self.session.state == 'CONNECTED' and \ self.id is not None: self.session.send_node_subscribe(self._prio, self.id, self.version, self.crc32) self.subscribed = True return self.subscribed def unsubscribe(self): """ This method tries to send node_unsubscribe command to Verse server """ if self.session.state == 'CONNECTED' and \ self.id is not None: # TODO: Add request for versioning, when verse will support it self.session.send_node_unsubscribe(self._prio, self.id, 0) # This value will be false in all situations self.subscribed = False return self.subscribed @property def parent(self): """ This is getter of parent node """ return self._parent_node @parent.setter def parent(self, parent): """ This is setter of parent node """ self._parent_node = parent if self.session.state == 'CONNECTED' and self.id is not None: self.session.send_node_link(self._prio, parent.id, self.id) @property def prio(self): """ This is getter of node priority """ return self._prio @prio.setter def prio(self, new_prio): """ This is setter of node priority """ self._prio = new_prio if self.id is not None: self.session.send_node_link(self._prio, self.id, self._prio) @property def locker(self): """ This is getter of current locker of this node """ if self.locker_id is None: return None else: try: locker = self.session.avatars[self.locker_id] except KeyError: # Verse node of this avatar hasn't been received yet return None else: return locker @property def locked(self): """ Getter of lock state. """ if self._lock_state == 'LOCKED': return True else: return False @property def locked_by_me(self): """ :return: True, when this node is locked by current client """ if self.locker_id == self.session.avatar_id: return True else: return False def lock(self): """ This method tries to lock this node """ self._lock_state = 'LOCKING' if self.session.state == 'CONNECTED' and self.id is not None: self.session.send_node_lock(self._prio, self.id) def unlock(self): """ This method tries to unlock this node """ if self.locker_id != self.session.avatar_id: raise TypeError('Node locked by other user can not be unlocked') self._lock_state = 'UNLOCKING' if self.session.state == 'CONNECTED' and \ self.id is not None and \ self.locker_id == self.session.avatar_id: self.session.send_node_unlock(self._prio, self.id) @property def owner(self): """ This getter of current owner of this node """ try: owner = self.session.users[self.user_id] except KeyError: return None else: return owner @property def owned_by_me(self): """ :param self: :return: """ if self.session.user_id == self.user_id: return True else: return False def can_read(self, user_id=None): """ This method returns True, when user with user_id can can read this node. Otherwise it returns False. """ if user_id is None and self.owned_by_me is True: return True elif user_id == self.user_id: return True else: try: perm = self.perms[user_id] except KeyError: try: perm = self.perms[vrs.OTHER_USERS_UID] except KeyError: return False return True if perm & vrs.PERM_NODE_READ else False def can_write(self, user_id=None): """ This method returns True, when user with user_id can can write to this node. Otherwise it returns False. """ if user_id is None and self.owned_by_me is True: return True elif user_id == self.user_id: return True else: try: perm = self.perms[user_id] except KeyError: try: perm = self.perms[vrs.OTHER_USERS_UID] except KeyError: return False return True if perm & vrs.PERM_NODE_WRITE else False @owner.setter def owner(self, owner): """ This is setter of current owner of this node. This setter will have effect, when client is owner of this node. In this situation corresponding verse command will be sent to Verse server. """ if self.user_id == self.session.user_id: # Set new ID fo owner self.user_id = owner.id # Send command self.session.send_node_owner(self._prio, self.id, self.user_id) @classmethod def cb_receive_node_create(cls, session, node_id, parent_id, user_id, custom_type): """ Static method of class that should be called, when corresponding callback method of class is called. This method moves node from queue to the dictionary of nodes and send pending commands. """ send_pending_data = False # Try to find parent node try: parent_node = session.nodes[parent_id] except KeyError: parent_node = None # Is it node created by this client? if parent_id == session.avatar_id and user_id == session.user_id: node_queue = session.my_node_queues[custom_type] # If this is node created by this client, then remove it from # the queue of nodes and add it to the dictionary of nodes node = node_queue.pop() session.nodes[node_id] = node # Set node ID, when it is known node.id = node_id # Set user id if node.user_id is None: node.user_id = user_id # Set parent node if node.parent is None: node.parent = parent_node # And add node to the dictionary of child nodes parent_node.child_nodes[node_id] = node # Send pending data (tag groups, layers, new paren) send_pending_data = True else: # Was this node already created? # e.g.: avatar node, user node, parent of scene node, etc. try: node = session.nodes[node_id] except KeyError: node = VerseNode(session=session, node_id=node_id, parent=parent_node, user_id=user_id, custom_type=custom_type) else: send_pending_data = True # Change state of node node.cb_receive_create() # When this node was created by this client, then it is necessary to send # create/set command for node priority, tag_groups and layers if send_pending_data is True: # When node priority is different from default node priority if node.prio != vrs.DEFAULT_PRIORITY: session.send_node_prio(node.prio, node.id, node.prio) # When parent node is different then current parent, then send node_link # command to Verse server if node.parent is not None and parent_id != node.parent.id: session.send_node_link(node.prio, node.parent.id, node.id) # Add reference to list of child nodes to parent node now, # because it is possible to do now (node id is known) node.parent.child_nodes[node.id] = node # Try to lock node, when client requested locking of node if node._lock_state == 'LOCKING': session.send_node_lock(node.prio, node.id) # Send tag_group_create command for pending tag groups for custom_type in node.tg_queue.keys(): session.send_taggroup_create(node.prio, node.id, custom_type) # Send layer_create command for pending layers without parent layer # This module will send automatically layer_create command for layers # with parent layers, when layer_create command of their parent layers # will be received for layer in node.layer_queue.values(): if layer.parent_layer is None: session.send_layer_create( node.prio, node.id, -1, layer.data_type, layer.count, layer.custom_type) # Return reference at node return node @classmethod def cb_receive_node_destroy(cls, session, node_id): """ Static method of class that should be called, when destroy_node callback method of Session class is called. This method removes node from dictionary and node will be destroyed. """ # Try to find node try: node = session.nodes[node_id] except KeyError: return # Set entity state and clean data in this node node.cb_receive_destroy() # Return reference at this node return node @classmethod def cb_receive_node_link(cls, session, parent_node_id, child_node_id): """ Static method of class that should be called, when node_link callback method of Session class is called. This method change links between nodes. """ # Try to find parent node try: parent_node = session.nodes[parent_node_id] except KeyError: return # Try to find child node try: child_node = session.nodes[child_node_id] except KeyError: return # When current link between nodes is different, then # set new link between nodes if child_node.parent.id != parent_node.id: # First remove child node from list of child nodes # of current parent node child_node.parent.child_nodes.pop(child_node_id) # Set new parent of child node child_node.parent = parent_node # Add child node to the list of child node of new # parent node parent_node.child_nodes[child_node_id] = child_node # Return reference at child node return child_node @classmethod def cb_receive_node_lock(cls, session, node_id, avatar_id): """ Static method of class that is called, when client received information about locking of the node """ try: node = session.nodes[node_id] except KeyError: return node._lock_state = 'LOCKED' node.locker_id = avatar_id return node @classmethod def cb_receive_node_unlock(cls, session, node_id, avatar_id): """ Static method of class that is called, when client received information about unlocking of the node """ try: node = session.nodes[node_id] except KeyError: return node._lock_state = 'UNLOCKED' node.locker_id = None return node @classmethod def cb_receive_node_owner(cls, session, node_id, user_id): """ Static method of class that is called, when client received information about new owner of the node """ try: node = session.nodes[node_id] except KeyError: return else: node.user_id = user_id return None @classmethod def cb_receive_node_perm(cls, session, node_id, user_id, perm): """ Static method of class that is called, when client received information about permission for specific user """ try: node = session.nodes[node_id] except KeyError: return # Store information about this permissions node.perms[user_id] = perm # Return reference at this node return node @classmethod def cb_receive_node_subscribe(cls, session, node_id, version, crc32): """ Static method of class that should be called when node subscribe command is received from Verse server """ # TODO: implement this method, then this will be supported by Verse server pass @classmethod def cb_receive_node_unsubscribe(cls, session, node_id, version, crc32): """ Static method of class that should be called when node unsubscribe command is received from Verse server """ # TODO: implement this method, then this will be supported by Verse server pass
gpl-2.0
chongtianfeiyu/kbengine
kbe/res/scripts/common/Lib/sched.py
88
6354
"""A generally useful event scheduler class. Each instance of this class manages its own queue. No multi-threading is implied; you are supposed to hack that yourself, or use a single instance per application. Each instance is parametrized with two functions, one that is supposed to return the current time, one that is supposed to implement a delay. You can implement real-time scheduling by substituting time and sleep from built-in module time, or you can implement simulated time by writing your own functions. This can also be used to integrate scheduling with STDWIN events; the delay function is allowed to modify the queue. Time can be expressed as integers or floating point numbers, as long as it is consistent. Events are specified by tuples (time, priority, action, argument, kwargs). As in UNIX, lower priority numbers mean higher priority; in this way the queue can be maintained as a priority queue. Execution of the event means calling the action function, passing it the argument sequence in "argument" (remember that in Python, multiple function arguments are be packed in a sequence) and keyword parameters in "kwargs". The action function may be an instance method so it has another way to reference private data (besides global variables). """ # XXX The timefunc and delayfunc should have been defined as methods # XXX so you can define new kinds of schedulers using subclassing # XXX instead of having to define a module or class just to hold # XXX the global state of your particular time and delay functions. import time import heapq from collections import namedtuple try: import threading except ImportError: import dummy_threading as threading try: from time import monotonic as _time except ImportError: from time import time as _time __all__ = ["scheduler"] class Event(namedtuple('Event', 'time, priority, action, argument, kwargs')): def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority) def __ne__(s, o): return (s.time, s.priority) != (o.time, o.priority) def __lt__(s, o): return (s.time, s.priority) < (o.time, o.priority) def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority) def __gt__(s, o): return (s.time, s.priority) > (o.time, o.priority) def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority) _sentinel = object() class scheduler: def __init__(self, timefunc=_time, delayfunc=time.sleep): """Initialize a new instance, passing the time and delay functions""" self._queue = [] self._lock = threading.RLock() self.timefunc = timefunc self.delayfunc = delayfunc def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel): """Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary. """ if kwargs is _sentinel: kwargs = {} event = Event(time, priority, action, argument, kwargs) with self._lock: heapq.heappush(self._queue, event) return event # The ID def enter(self, delay, priority, action, argument=(), kwargs=_sentinel): """A variant that specifies the time as a relative time. This is actually the more commonly used interface. """ time = self.timefunc() + delay return self.enterabs(time, priority, action, argument, kwargs) def cancel(self, event): """Remove an event from the queue. This must be presented the ID as returned by enter(). If the event is not in the queue, this raises ValueError. """ with self._lock: self._queue.remove(event) heapq.heapify(self._queue) def empty(self): """Check whether the queue is empty.""" with self._lock: return not self._queue def run(self, blocking=True): """Execute events until the queue is empty. If blocking is False executes the scheduled events due to expire soonest (if any) and then return the deadline of the next scheduled call in the scheduler. When there is a positive delay until the first event, the delay function is called and the event is left in the queue; otherwise, the event is removed from the queue and executed (its action function is called, passing it the argument). If the delay function returns prematurely, it is simply restarted. It is legal for both the delay function and the action function to modify the queue or to raise an exception; exceptions are not caught but the scheduler's state remains well-defined so run() may be called again. A questionable hack is added to allow other threads to run: just after an event is executed, a delay of 0 is executed, to avoid monopolizing the CPU when other threads are also runnable. """ # localize variable access to minimize overhead # and to improve thread safety lock = self._lock q = self._queue delayfunc = self.delayfunc timefunc = self.timefunc pop = heapq.heappop while True: with lock: if not q: break time, priority, action, argument, kwargs = q[0] now = timefunc() if time > now: delay = True else: delay = False pop(q) if delay: if not blocking: return time - now delayfunc(time - now) else: action(*argument, **kwargs) delayfunc(0) # Let other threads run @property def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments, kwargs """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. with self._lock: events = self._queue[:] return list(map(heapq.heappop, [events]*len(events)))
lgpl-3.0
miabbott/UATFramework
steps/vagrant.py
7
5287
'''test methods related to vagrant''' from behave import * import ConfigParser config = ConfigParser.ConfigParser() config.read('config/uat.cfg') remote_user = config.get("vagrant", "user") vagrant_build_dependencies = "ruby,ruby-devel,ruby-libvirt,rubygem-ruby-libvirt,libvirt,libvirt-devel,rubygem-bundler,rubygem-bundler-doc,rubygem-nokogiri,libxml2-devel,libxslt-devel,rubygem-rake" @given(u'vagrant plugin is "{plugin_name}"') def step_impl(context, plugin_name): context.vagrant_plugin = plugin_name @given(u'install vagrant plugin') def step_impl(context): assert context.remote_cmd("command", context.target_host, remote_user=remote_user, module_args="vagrant plugin install %s" % context.vagrant_plugin) @then(u'vagrant plugin is verified as installed') @given(u'vagrant plugin is verified as installed') def step_impl(context): r = context.remote_cmd("command", context.target_host, remote_user=remote_user, module_args="vagrant plugin list") for i in r: assert i['stdout'].index(context.vagrant_plugin) >= 0 @given(u'vagrant box "{box}" is already installed') def step_impl(context, box, host="cihosts"): r = context.remote_cmd("command", host, remote_user=remote_user, module_args="vagrant box list") for i in r: assert i['stdout'].index(box) >= 0 @given(u'source of the plugin is cloned from "{url}"') def step_impl(context, url): context.execute_steps(u""" Given clone "{project_name}" from "{url_name}" """.format(project_name=context.vagrant_plugin, url_name=url)) @given(u'Clone CDK from "{url}"') def step_impl(context, url, host="cihosts"): assert context.remote_cmd("git", host, remote_user=remote_user, module_args="repo=%s dest=~/cdk" % url) @when(u'Vagrantfile is linked') def step_impl(context, host="cihosts"): assert context.remote_cmd("file", host, remote_user=remote_user, module_args="src=~/cdk/components/standalone-rhel/Vagrantfile dest=~/Vagrantfile state=link") @when(u'vagrant up') def step_impl(context, host="cihosts"): assert context.remote_cmd("command", host, remote_user=remote_user, module_args="vagrant up") @then(u'vagrant connect to "{guest}"') def step_impl(context, guest, host="cihosts"): assert context.remote_cmd("command", host, remote_user=remote_user, module_args="vagrant up") @then(u'vagrant "{guest}" is destroyed') def step_impl(context, guest, host="cihosts"): assert context.remote_cmd("command", host, remote_user=remote_user, module_args="vagrant up") @then(u'vagrant "{guest}" is auto-subscribed') def step_impl(context, guest, host="cihosts"): assert context.remote_cmd("command", host, remote_user=remote_user, module_args="vagrant ssh -c 'sudo subscription-manager status'") #requires querying the customer portal to find out if the registration was remove (the box it was on is gone) #@then(u'vagrant "{guest}" is unsubscribed and unregistered') #def step_impl(context, guest, host="cihosts"): # assert False @given(u'vagrant is installed') def step_impl(context, host="cihosts"): assert context.remote_cmd("command", host, remote_user=remote_user, module_args="which vagrant") #not sure why this doesn't work @given(u'vagrant plugin build dependencies are installed') def step_impl(context, host="cihosts"): context.execute_steps(u""" given "{package_names}" are already installed on "{vagrant_host}" """.format(package_names=vagrant_build_dependencies,vagrant_host=context.target_host)) #def step_impl(context, vagrant_plugin, host="cihosts"): @given(u'bundler has been used to install ruby dependencies') def step_impl(context): assert context.remote_cmd("command", context.target_host, remote_user=remote_user, module_args="cd ~/%s && bundle config build.nokogiri --use-system-libraries && bundle install" % context.vagrant_plugin) @when(u'vagrant plugin is built') def step_impl(context): assert context.remote_cmd("command", context.target_host, remote_user=remote_user, module_args="cd ~/%s && rake build" % context.vagrant_plugin) @then(u'local "vagrant-registration" gem is successfully installed') def step_impl(context): context.execute_steps(u""" given vagrant plugin is verified as installed """)
gpl-2.0
danigonza/phantomjs
src/breakpad/src/third_party/protobuf/protobuf/python/stubout.py
671
4940
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is used for testing. The original is at: # http://code.google.com/p/pymox/ class StubOutForTesting: """Sample Usage: You want os.path.exists() to always return true during testing. stubs = StubOutForTesting() stubs.Set(os.path, 'exists', lambda x: 1) ... stubs.UnsetAll() The above changes os.path.exists into a lambda that returns 1. Once the ... part of the code finishes, the UnsetAll() looks up the old value of os.path.exists and restores it. """ def __init__(self): self.cache = [] self.stubs = [] def __del__(self): self.SmartUnsetAll() self.UnsetAll() def SmartSet(self, obj, attr_name, new_attr): """Replace obj.attr_name with new_attr. This method is smart and works at the module, class, and instance level while preserving proper inheritance. It will not stub out C types however unless that has been explicitly allowed by the type. This method supports the case where attr_name is a staticmethod or a classmethod of obj. Notes: - If obj is an instance, then it is its class that will actually be stubbed. Note that the method Set() does not do that: if obj is an instance, it (and not its class) will be stubbed. - The stubbing is using the builtin getattr and setattr. So, the __get__ and __set__ will be called when stubbing (TODO: A better idea would probably be to manipulate obj.__dict__ instead of getattr() and setattr()). Raises AttributeError if the attribute cannot be found. """ if (inspect.ismodule(obj) or (not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))): orig_obj = obj orig_attr = getattr(obj, attr_name) else: if not inspect.isclass(obj): mro = list(inspect.getmro(obj.__class__)) else: mro = list(inspect.getmro(obj)) mro.reverse() orig_attr = None for cls in mro: try: orig_obj = cls orig_attr = getattr(obj, attr_name) except AttributeError: continue if orig_attr is None: raise AttributeError("Attribute not found.") # Calling getattr() on a staticmethod transforms it to a 'normal' function. # We need to ensure that we put it back as a staticmethod. old_attribute = obj.__dict__.get(attr_name) if old_attribute is not None and isinstance(old_attribute, staticmethod): orig_attr = staticmethod(orig_attr) self.stubs.append((orig_obj, attr_name, orig_attr)) setattr(orig_obj, attr_name, new_attr) def SmartUnsetAll(self): """Reverses all the SmartSet() calls, restoring things to their original definition. Its okay to call SmartUnsetAll() repeatedly, as later calls have no effect if no SmartSet() calls have been made. """ self.stubs.reverse() for args in self.stubs: setattr(*args) self.stubs = [] def Set(self, parent, child_name, new_child): """Replace child_name's old definition with new_child, in the context of the given parent. The parent could be a module when the child is a function at module scope. Or the parent could be a class when a class' method is being replaced. The named child is set to new_child, while the prior definition is saved away for later, when UnsetAll() is called. This method supports the case where child_name is a staticmethod or a classmethod of parent. """ old_child = getattr(parent, child_name) old_attribute = parent.__dict__.get(child_name) if old_attribute is not None and isinstance(old_attribute, staticmethod): old_child = staticmethod(old_child) self.cache.append((parent, old_child, child_name)) setattr(parent, child_name, new_child) def UnsetAll(self): """Reverses all the Set() calls, restoring things to their original definition. Its okay to call UnsetAll() repeatedly, as later calls have no effect if no Set() calls have been made. """ # Undo calls to Set() in reverse order, in case Set() was called on the # same arguments repeatedly (want the original call to be last one undone) self.cache.reverse() for (parent, old_child, child_name) in self.cache: setattr(parent, child_name, old_child) self.cache = []
bsd-3-clause
hanlind/nova
nova/virt/ironic/patcher.py
5
4163
# coding=utf-8 # # Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Helper classes for Ironic HTTP PATCH creation. """ from oslo_serialization import jsonutils import six import nova.conf CONF = nova.conf.CONF def create(node): """Create an instance of the appropriate DriverFields class. :param node: a node object returned from ironicclient :returns: A GenericDriverFields instance. """ return GenericDriverFields(node) class GenericDriverFields(object): def __init__(self, node): self.node = node def get_deploy_patch(self, instance, image_meta, flavor, preserve_ephemeral=None): """Build a patch to add the required fields to deploy a node. :param instance: the instance object. :param image_meta: the nova.objects.ImageMeta object instance :param flavor: the flavor object. :param preserve_ephemeral: preserve_ephemeral status (bool) to be specified during rebuild. :returns: a json-patch with the fields that needs to be updated. """ patch = [] patch.append({'path': '/instance_info/image_source', 'op': 'add', 'value': image_meta.id}) patch.append({'path': '/instance_info/root_gb', 'op': 'add', 'value': str(instance.flavor.root_gb)}) patch.append({'path': '/instance_info/swap_mb', 'op': 'add', 'value': str(flavor['swap'])}) patch.append({'path': '/instance_info/display_name', 'op': 'add', 'value': instance.display_name}) patch.append({'path': '/instance_info/vcpus', 'op': 'add', 'value': str(instance.flavor.vcpus)}) patch.append({'path': '/instance_info/memory_mb', 'op': 'add', 'value': str(instance.flavor.memory_mb)}) patch.append({'path': '/instance_info/local_gb', 'op': 'add', 'value': str(self.node.properties.get('local_gb', 0))}) if instance.flavor.ephemeral_gb: patch.append({'path': '/instance_info/ephemeral_gb', 'op': 'add', 'value': str(instance.flavor.ephemeral_gb)}) if CONF.default_ephemeral_format: patch.append({'path': '/instance_info/ephemeral_format', 'op': 'add', 'value': CONF.default_ephemeral_format}) if preserve_ephemeral is not None: patch.append({'path': '/instance_info/preserve_ephemeral', 'op': 'add', 'value': str(preserve_ephemeral)}) capabilities = {} # read the flavor and get the extra_specs value. extra_specs = flavor.get('extra_specs') # scan through the extra_specs values and ignore the keys # not starting with keyword 'capabilities'. for key, val in six.iteritems(extra_specs): if not key.startswith('capabilities:'): continue # split the extra_spec key to remove the keyword # 'capabilities' and get the actual key. capabilities_string, capabilities_key = key.split(':', 1) if capabilities_key: capabilities[capabilities_key] = val if capabilities: patch.append({'path': '/instance_info/capabilities', 'op': 'add', 'value': jsonutils.dumps(capabilities)}) return patch
apache-2.0
adam111316/SickGear
lib/sqlalchemy/engine/interfaces.py
75
26652
# engine/interfaces.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Define core interfaces used by the engine system.""" from .. import util, event # backwards compat from ..sql.compiler import Compiled, TypeCompiler class Dialect(object): """Define the behavior of a specific database and DB-API combination. Any aspect of metadata definition, SQL query generation, execution, result-set handling, or anything else which varies between databases is defined under the general category of the Dialect. The Dialect acts as a factory for other database-specific object implementations including ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. All Dialects implement the following attributes: name identifying name for the dialect from a DBAPI-neutral point of view (i.e. 'sqlite') driver identifying name for the dialect's DBAPI positional True if the paramstyle for this Dialect is positional. paramstyle the paramstyle to be used (some DB-APIs support multiple paramstyles). convert_unicode True if Unicode conversion should be applied to all ``str`` types. encoding type of encoding to use for unicode, usually defaults to 'utf-8'. statement_compiler a :class:`.Compiled` class used to compile SQL statements ddl_compiler a :class:`.Compiled` class used to compile DDL statements server_version_info a tuple containing a version number for the DB backend in use. This value is only available for supporting dialects, and is typically populated during the initial connection to the database. default_schema_name the name of the default schema. This value is only available for supporting dialects, and is typically populated during the initial connection to the database. execution_ctx_cls a :class:`.ExecutionContext` class used to handle statement execution execute_sequence_format either the 'tuple' or 'list' type, depending on what cursor.execute() accepts for the second argument (they vary). preparer a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to quote identifiers. supports_alter ``True`` if the database supports ``ALTER TABLE``. max_identifier_length The maximum length of identifier names. supports_unicode_statements Indicate whether the DB-API can receive SQL statements as Python unicode strings supports_unicode_binds Indicate whether the DB-API can receive string bind parameters as Python unicode strings supports_sane_rowcount Indicate whether the dialect properly implements rowcount for ``UPDATE`` and ``DELETE`` statements. supports_sane_multi_rowcount Indicate whether the dialect properly implements rowcount for ``UPDATE`` and ``DELETE`` statements when executed via executemany. preexecute_autoincrement_sequences True if 'implicit' primary key functions must be executed separately in order to get their value. This is currently oriented towards Postgresql. implicit_returning use RETURNING or equivalent during INSERT execution in order to load newly generated primary keys and other column defaults in one execution, which are then available via inserted_primary_key. If an insert statement has returning() specified explicitly, the "implicit" functionality is not used and inserted_primary_key will not be available. dbapi_type_map A mapping of DB-API type objects present in this Dialect's DB-API implementation mapped to TypeEngine implementations used by the dialect. This is used to apply types to result sets based on the DB-API types present in cursor.description; it only takes effect for result sets against textual statements where no explicit typemap was present. colspecs A dictionary of TypeEngine classes from sqlalchemy.types mapped to subclasses that are specific to the dialect class. This dictionary is class-level only and is not accessed from the dialect instance itself. supports_default_values Indicates if the construct ``INSERT INTO tablename DEFAULT VALUES`` is supported supports_sequences Indicates if the dialect supports CREATE SEQUENCE or similar. sequences_optional If True, indicates if the "optional" flag on the Sequence() construct should signal to not generate a CREATE SEQUENCE. Applies only to dialects that support sequences. Currently used only to allow Postgresql SERIAL to be used on a column that specifies Sequence() for usage on other backends. supports_native_enum Indicates if the dialect supports a native ENUM construct. This will prevent types.Enum from generating a CHECK constraint when that type is used. supports_native_boolean Indicates if the dialect supports a native boolean construct. This will prevent types.Boolean from generating a CHECK constraint when that type is used. """ _has_events = False def create_connect_args(self, url): """Build DB-API compatible connection arguments. Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple consisting of a `*args`/`**kwargs` suitable to send directly to the dbapi's connect function. """ raise NotImplementedError() @classmethod def type_descriptor(cls, typeobj): """Transform a generic type to a dialect-specific type. Dialect classes will usually use the :func:`.types.adapt_type` function in the types module to accomplish this. The returned result is cached *per dialect class* so can contain no dialect-instance state. """ raise NotImplementedError() def initialize(self, connection): """Called during strategized creation of the dialect with a connection. Allows dialects to configure options based on server version info or other properties. The connection passed here is a SQLAlchemy Connection object, with full capabilities. The initalize() method of the base dialect should be called via super(). """ pass def reflecttable(self, connection, table, include_columns, exclude_columns): """Load table description from the database. Given a :class:`.Connection` and a :class:`~sqlalchemy.schema.Table` object, reflect its columns and properties from the database. The implementation of this method is provided by :meth:`.DefaultDialect.reflecttable`, which makes use of :class:`.Inspector` to retrieve column information. Dialects should **not** seek to implement this method, and should instead implement individual schema inspection operations such as :meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`, etc. """ raise NotImplementedError() def get_columns(self, connection, table_name, schema=None, **kw): """Return information about columns in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return column information as a list of dictionaries with these keys: name the column's name type [sqlalchemy.types#TypeEngine] nullable boolean default the column's default value autoincrement boolean sequence a dictionary of the form {'name' : str, 'start' :int, 'increment': int} Additional column attributes may be present. """ raise NotImplementedError() def get_primary_keys(self, connection, table_name, schema=None, **kw): """Return information about primary keys in `table_name`. Deprecated. This method is only called by the default implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should instead implement the :meth:`.Dialect.get_pk_constraint` method directly. """ raise NotImplementedError() def get_pk_constraint(self, connection, table_name, schema=None, **kw): """Return information about the primary key constraint on table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return primary key information as a dictionary with these keys: constrained_columns a list of column names that make up the primary key name optional name of the primary key constraint. """ raise NotImplementedError() def get_foreign_keys(self, connection, table_name, schema=None, **kw): """Return information about foreign_keys in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return foreign key information as a list of dicts with these keys: name the constraint's name constrained_columns a list of column names that make up the foreign key referred_schema the name of the referred schema referred_table the name of the referred table referred_columns a list of column names in the referred table that correspond to constrained_columns """ raise NotImplementedError() def get_table_names(self, connection, schema=None, **kw): """Return a list of table names for `schema`.""" raise NotImplementedError def get_view_names(self, connection, schema=None, **kw): """Return a list of all view names available in the database. schema: Optional, retrieve names from a non-default schema. """ raise NotImplementedError() def get_view_definition(self, connection, view_name, schema=None, **kw): """Return view definition. Given a :class:`.Connection`, a string `view_name`, and an optional string `schema`, return the view definition. """ raise NotImplementedError() def get_indexes(self, connection, table_name, schema=None, **kw): """Return information about indexes in `table_name`. Given a :class:`.Connection`, a string `table_name` and an optional string `schema`, return index information as a list of dictionaries with these keys: name the index's name column_names list of column names in order unique boolean """ raise NotImplementedError() def get_unique_constraints(self, connection, table_name, schema=None, **kw): """Return information about unique constraints in `table_name`. Given a string `table_name` and an optional string `schema`, return unique constraint information as a list of dicts with these keys: name the unique constraint's name column_names list of column names in order \**kw other options passed to the dialect's get_unique_constraints() method. .. versionadded:: 0.9.0 """ raise NotImplementedError() def normalize_name(self, name): """convert the given name to lowercase if it is detected as case insensitive. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def denormalize_name(self, name): """convert the given name to a case insensitive identifier for the backend if it is an all-lowercase name. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def has_table(self, connection, table_name, schema=None): """Check the existence of a particular table in the database. Given a :class:`.Connection` object and a string `table_name`, return True if the given table (possibly within the specified `schema`) exists in the database, False otherwise. """ raise NotImplementedError() def has_sequence(self, connection, sequence_name, schema=None): """Check the existence of a particular sequence in the database. Given a :class:`.Connection` object and a string `sequence_name`, return True if the given sequence exists in the database, False otherwise. """ raise NotImplementedError() def _get_server_version_info(self, connection): """Retrieve the server version info from the given connection. This is used by the default implementation to populate the "server_version_info" attribute and is called exactly once upon first connect. """ raise NotImplementedError() def _get_default_schema_name(self, connection): """Return the string name of the currently selected schema from the given connection. This is used by the default implementation to populate the "default_schema_name" attribute and is called exactly once upon first connect. """ raise NotImplementedError() def do_begin(self, dbapi_connection): """Provide an implementation of ``connection.begin()``, given a DB-API connection. The DBAPI has no dedicated "begin" method and it is expected that transactions are implicit. This hook is provided for those DBAPIs that might need additional help in this area. Note that :meth:`.Dialect.do_begin` is not called unless a :class:`.Transaction` object is in use. The :meth:`.Dialect.do_autocommit` hook is provided for DBAPIs that need some extra commands emitted after a commit in order to enter the next transaction, when the SQLAlchemy :class:`.Connection` is used in it's default "autocommit" mode. :param dbapi_connection: a DBAPI connection, typically proxied within a :class:`.ConnectionFairy`. """ raise NotImplementedError() def do_rollback(self, dbapi_connection): """Provide an implementation of ``connection.rollback()``, given a DB-API connection. :param dbapi_connection: a DBAPI connection, typically proxied within a :class:`.ConnectionFairy`. """ raise NotImplementedError() def do_commit(self, dbapi_connection): """Provide an implementation of ``connection.commit()``, given a DB-API connection. :param dbapi_connection: a DBAPI connection, typically proxied within a :class:`.ConnectionFairy`. """ raise NotImplementedError() def do_close(self, dbapi_connection): """Provide an implementation of ``connection.close()``, given a DBAPI connection. This hook is called by the :class:`.Pool` when a connection has been detached from the pool, or is being returned beyond the normal capacity of the pool. .. versionadded:: 0.8 """ raise NotImplementedError() def create_xid(self): """Create a two-phase transaction ID. This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified. """ raise NotImplementedError() def do_savepoint(self, connection, name): """Create a savepoint with the given name. :param connection: a :class:`.Connection`. :param name: savepoint name. """ raise NotImplementedError() def do_rollback_to_savepoint(self, connection, name): """Rollback a connection to the named savepoint. :param connection: a :class:`.Connection`. :param name: savepoint name. """ raise NotImplementedError() def do_release_savepoint(self, connection, name): """Release the named savepoint on a connection. :param connection: a :class:`.Connection`. :param name: savepoint name. """ raise NotImplementedError() def do_begin_twophase(self, connection, xid): """Begin a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid """ raise NotImplementedError() def do_prepare_twophase(self, connection, xid): """Prepare a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid """ raise NotImplementedError() def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): """Rollback a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid :param is_prepared: whether or not :meth:`.TwoPhaseTransaction.prepare` was called. :param recover: if the recover flag was passed. """ raise NotImplementedError() def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): """Commit a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid :param is_prepared: whether or not :meth:`.TwoPhaseTransaction.prepare` was called. :param recover: if the recover flag was passed. """ raise NotImplementedError() def do_recover_twophase(self, connection): """Recover list of uncommited prepared two phase transaction identifiers on the given connection. :param connection: a :class:`.Connection`. """ raise NotImplementedError() def do_executemany(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.executemany(statement, parameters)``.""" raise NotImplementedError() def do_execute(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.execute(statement, parameters)``.""" raise NotImplementedError() def do_execute_no_params(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.execute(statement)``. The parameter collection should not be sent. """ raise NotImplementedError() def is_disconnect(self, e, connection, cursor): """Return True if the given DB-API error indicates an invalid connection""" raise NotImplementedError() def connect(self): """return a callable which sets up a newly created DBAPI connection. The callable accepts a single argument "conn" which is the DBAPI connection itself. It has no return value. This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc. If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed. If None is returned, no listener will be generated. """ return None def reset_isolation_level(self, dbapi_conn): """Given a DBAPI connection, revert its isolation to the default.""" raise NotImplementedError() def set_isolation_level(self, dbapi_conn, level): """Given a DBAPI connection, set its isolation level.""" raise NotImplementedError() def get_isolation_level(self, dbapi_conn): """Given a DBAPI connection, return its isolation level.""" raise NotImplementedError() class ExecutionContext(object): """A messenger object for a Dialect that corresponds to a single execution. ExecutionContext should have these data members: connection Connection object which can be freely used by default value generators to execute SQL. This Connection should reference the same underlying connection/transactional resources of root_connection. root_connection Connection object which is the source of this ExecutionContext. This Connection may have close_with_result=True set, in which case it can only be used once. dialect dialect which created this ExecutionContext. cursor DB-API cursor procured from the connection, compiled if passed to constructor, sqlalchemy.engine.base.Compiled object being executed, statement string version of the statement to be executed. Is either passed to the constructor, or must be created from the sql.Compiled object by the time pre_exec() has completed. parameters bind parameters passed to the execute() method. For compiled statements, this is a dictionary or list of dictionaries. For textual statements, it should be in a format suitable for the dialect's paramstyle (i.e. dict or list of dicts for non positional, list or list of lists/tuples for positional). isinsert True if the statement is an INSERT. isupdate True if the statement is an UPDATE. should_autocommit True if the statement is a "committable" statement. prefetch_cols a list of Column objects for which a client-side default was fired off. Applies to inserts and updates. postfetch_cols a list of Column objects for which a server-side default or inline SQL expression value was fired off. Applies to inserts and updates. """ def create_cursor(self): """Return a new cursor generated from this ExecutionContext's connection. Some dialects may wish to change the behavior of connection.cursor(), such as postgresql which may return a PG "server side" cursor. """ raise NotImplementedError() def pre_exec(self): """Called before an execution of a compiled statement. If a compiled statement was passed to this ExecutionContext, the `statement` and `parameters` datamembers must be initialized after this statement is complete. """ raise NotImplementedError() def post_exec(self): """Called after the execution of a compiled statement. If a compiled statement was passed to this ExecutionContext, the `last_insert_ids`, `last_inserted_params`, etc. datamembers should be available after this method completes. """ raise NotImplementedError() def result(self): """Return a result object corresponding to this ExecutionContext. Returns a ResultProxy. """ raise NotImplementedError() def handle_dbapi_exception(self, e): """Receive a DBAPI exception which occurred upon execute, result fetch, etc.""" raise NotImplementedError() def should_autocommit_text(self, statement): """Parse the given textual statement and return True if it refers to a "committable" statement""" raise NotImplementedError() def lastrow_has_defaults(self): """Return True if the last INSERT or UPDATE row contained inlined or database-side defaults. """ raise NotImplementedError() def get_rowcount(self): """Return the DBAPI ``cursor.rowcount`` value, or in some cases an interpreted value. See :attr:`.ResultProxy.rowcount` for details on this. """ raise NotImplementedError() class Connectable(object): """Interface for an object which supports execution of SQL constructs. The two implementations of :class:`.Connectable` are :class:`.Connection` and :class:`.Engine`. Connectable must also implement the 'dialect' member which references a :class:`.Dialect` instance. """ def connect(self, **kwargs): """Return a :class:`.Connection` object. Depending on context, this may be ``self`` if this object is already an instance of :class:`.Connection`, or a newly procured :class:`.Connection` if this object is an instance of :class:`.Engine`. """ def contextual_connect(self): """Return a :class:`.Connection` object which may be part of an ongoing context. Depending on context, this may be ``self`` if this object is already an instance of :class:`.Connection`, or a newly procured :class:`.Connection` if this object is an instance of :class:`.Engine`. """ raise NotImplementedError() @util.deprecated("0.7", "Use the create() method on the given schema " "object directly, i.e. :meth:`.Table.create`, " ":meth:`.Index.create`, :meth:`.MetaData.create_all`") def create(self, entity, **kwargs): """Emit CREATE statements for the given schema entity. """ raise NotImplementedError() @util.deprecated("0.7", "Use the drop() method on the given schema " "object directly, i.e. :meth:`.Table.drop`, " ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") def drop(self, entity, **kwargs): """Emit DROP statements for the given schema entity. """ raise NotImplementedError() def execute(self, object, *multiparams, **params): """Executes the given construct and returns a :class:`.ResultProxy`.""" raise NotImplementedError() def scalar(self, object, *multiparams, **params): """Executes and returns the first column of the first row. The underlying cursor is closed after execution. """ raise NotImplementedError() def _run_visitor(self, visitorcallable, element, **kwargs): raise NotImplementedError() def _execute_clauseelement(self, elem, multiparams=None, params=None): raise NotImplementedError()
gpl-3.0
cogeorg/BlackRhino
examples/Georg2012/networkx/generators/social.py
41
11395
""" Famous social networks. """ import networkx as nx __author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>', 'Katy Bold <kbold@princeton.edu>', 'Aric Hagberg <aric.hagberg@gmail.com)']) __all__ = ['karate_club_graph','davis_southern_women_graph', 'florentine_families_graph'] def karate_club_graph(): """Return Zachary's Karate club graph. References ---------- .. [1] Zachary W. An information flow model for conflict and fission in small groups. Journal of Anthropological Research, 33, 452-473, (1977). .. [2] Data file from: http://vlado.fmf.uni-lj.si/pub/networks/data/Ucinet/UciData.htm """ G=nx.Graph() G.add_nodes_from(range(34)) G.name="Zachary's Karate Club" zacharydat="""\ 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0 1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0""" row=0 for line in zacharydat.split('\n'): thisrow=list(map(int,line.split(' '))) for col in range(0,len(thisrow)): if thisrow[col]==1: G.add_edge(row,col) # col goes from 0,33 row+=1 club1 = 'Mr. Hi' club2 = 'Officer' G.node[0]['club'] = club1 G.node[1]['club'] = club1 G.node[2]['club'] = club1 G.node[3]['club'] = club1 G.node[4]['club'] = club1 G.node[5]['club'] = club1 G.node[6]['club'] = club1 G.node[7]['club'] = club1 G.node[8]['club'] = club1 G.node[9]['club'] = club2 G.node[10]['club'] = club1 G.node[11]['club'] = club1 G.node[12]['club'] = club1 G.node[13]['club'] = club1 G.node[14]['club'] = club2 G.node[15]['club'] = club2 G.node[16]['club'] = club1 G.node[17]['club'] = club1 G.node[18]['club'] = club2 G.node[19]['club'] = club1 G.node[20]['club'] = club2 G.node[21]['club'] = club1 G.node[22]['club'] = club2 G.node[23]['club'] = club2 G.node[24]['club'] = club2 G.node[25]['club'] = club2 G.node[26]['club'] = club2 G.node[27]['club'] = club2 G.node[28]['club'] = club2 G.node[29]['club'] = club2 G.node[30]['club'] = club2 G.node[31]['club'] = club2 G.node[32]['club'] = club2 G.node[33]['club'] = club2 return G def davis_southern_women_graph(): """Return Davis Southern women social network. This is a bipartite graph. References ---------- .. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South. University of Chicago Press, Chicago, IL. """ G = nx.Graph() # Top nodes G.add_nodes_from(["Evelyn Jefferson", "Laura Mandeville", "Theresa Anderson", "Brenda Rogers", "Charlotte McDowd", "Frances Anderson", "Eleanor Nye", "Pearl Oglethorpe", "Ruth DeSand", "Verne Sanderson", "Myra Liddel", "Katherina Rogers", "Sylvia Avondale", "Nora Fayette", "Helen Lloyd", "Dorothy Murchison", "Olivia Carleton", "Flora Price"], bipartite=0) # Bottom nodes G.add_nodes_from(["E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8", "E9", "E10", "E11", "E12", "E13", "E14"], bipartite=1) G.add_edges_from([("Evelyn Jefferson","E1"), ("Evelyn Jefferson","E2"), ("Evelyn Jefferson","E3"), ("Evelyn Jefferson","E4"), ("Evelyn Jefferson","E5"), ("Evelyn Jefferson","E6"), ("Evelyn Jefferson","E8"), ("Evelyn Jefferson","E9"), ("Laura Mandeville","E1"), ("Laura Mandeville","E2"), ("Laura Mandeville","E3"), ("Laura Mandeville","E5"), ("Laura Mandeville","E6"), ("Laura Mandeville","E7"), ("Laura Mandeville","E8"), ("Theresa Anderson","E2"), ("Theresa Anderson","E3"), ("Theresa Anderson","E4"), ("Theresa Anderson","E5"), ("Theresa Anderson","E6"), ("Theresa Anderson","E7"), ("Theresa Anderson","E8"), ("Theresa Anderson","E9"), ("Brenda Rogers","E1"), ("Brenda Rogers","E3"), ("Brenda Rogers","E4"), ("Brenda Rogers","E5"), ("Brenda Rogers","E6"), ("Brenda Rogers","E7"), ("Brenda Rogers","E8"), ("Charlotte McDowd","E3"), ("Charlotte McDowd","E4"), ("Charlotte McDowd","E5"), ("Charlotte McDowd","E7"), ("Frances Anderson","E3"), ("Frances Anderson","E5"), ("Frances Anderson","E6"), ("Frances Anderson","E8"), ("Eleanor Nye","E5"), ("Eleanor Nye","E6"), ("Eleanor Nye","E7"), ("Eleanor Nye","E8"), ("Pearl Oglethorpe","E6"), ("Pearl Oglethorpe","E8"), ("Pearl Oglethorpe","E9"), ("Ruth DeSand","E5"), ("Ruth DeSand","E7"), ("Ruth DeSand","E8"), ("Ruth DeSand","E9"), ("Verne Sanderson","E7"), ("Verne Sanderson","E8"), ("Verne Sanderson","E9"), ("Verne Sanderson","E12"), ("Myra Liddel","E8"), ("Myra Liddel","E9"), ("Myra Liddel","E10"), ("Myra Liddel","E12"), ("Katherina Rogers","E8"), ("Katherina Rogers","E9"), ("Katherina Rogers","E10"), ("Katherina Rogers","E12"), ("Katherina Rogers","E13"), ("Katherina Rogers","E14"), ("Sylvia Avondale","E7"), ("Sylvia Avondale","E8"), ("Sylvia Avondale","E9"), ("Sylvia Avondale","E10"), ("Sylvia Avondale","E12"), ("Sylvia Avondale","E13"), ("Sylvia Avondale","E14"), ("Nora Fayette","E6"), ("Nora Fayette","E7"), ("Nora Fayette","E9"), ("Nora Fayette","E10"), ("Nora Fayette","E11"), ("Nora Fayette","E12"), ("Nora Fayette","E13"), ("Nora Fayette","E14"), ("Helen Lloyd","E7"), ("Helen Lloyd","E8"), ("Helen Lloyd","E10"), ("Helen Lloyd","E11"), ("Helen Lloyd","E12"), ("Dorothy Murchison","E8"), ("Dorothy Murchison","E9"), ("Olivia Carleton","E9"), ("Olivia Carleton","E11"), ("Flora Price","E9"), ("Flora Price","E11")]) return G def florentine_families_graph(): """Return Florentine families graph. References ---------- .. [1] Ronald L. Breiger and Philippa E. Pattison Cumulated social roles: The duality of persons and their algebras,1 Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256 """ G=nx.Graph() G.add_edge('Acciaiuoli','Medici') G.add_edge('Castellani','Peruzzi') G.add_edge('Castellani','Strozzi') G.add_edge('Castellani','Barbadori') G.add_edge('Medici','Barbadori') G.add_edge('Medici','Ridolfi') G.add_edge('Medici','Tornabuoni') G.add_edge('Medici','Albizzi') G.add_edge('Medici','Salviati') G.add_edge('Salviati','Pazzi') G.add_edge('Peruzzi','Strozzi') G.add_edge('Peruzzi','Bischeri') G.add_edge('Strozzi','Ridolfi') G.add_edge('Strozzi','Bischeri') G.add_edge('Ridolfi','Tornabuoni') G.add_edge('Tornabuoni','Guadagni') G.add_edge('Albizzi','Ginori') G.add_edge('Albizzi','Guadagni') G.add_edge('Bischeri','Guadagni') G.add_edge('Guadagni','Lamberteschi') return G
gpl-3.0
flyfei/python-for-android
python-build/python-libs/gdata/build/lib/gdata/alt/appengine.py
133
10734
#!/usr/bin/python # # Copyright (C) 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides HTTP functions for gdata.service to use on Google App Engine AppEngineHttpClient: Provides an HTTP request method which uses App Engine's urlfetch API. Set the http_client member of a GDataService object to an instance of an AppEngineHttpClient to allow the gdata library to run on Google App Engine. run_on_appengine: Function which will modify an existing GDataService object to allow it to run on App Engine. It works by creating a new instance of the AppEngineHttpClient and replacing the GDataService object's http_client. """ __author__ = 'api.jscudder (Jeff Scudder)' import StringIO import pickle import atom.http_interface import atom.token_store from google.appengine.api import urlfetch from google.appengine.ext import db from google.appengine.api import users from google.appengine.api import memcache def run_on_appengine(gdata_service, store_tokens=True, single_user_mode=False): """Modifies a GDataService object to allow it to run on App Engine. Args: gdata_service: An instance of AtomService, GDataService, or any of their subclasses which has an http_client member and a token_store member. store_tokens: Boolean, defaults to True. If True, the gdata_service will attempt to add each token to it's token_store when SetClientLoginToken or SetAuthSubToken is called. If False the tokens will not automatically be added to the token_store. single_user_mode: Boolean, defaults to False. If True, the current_token member of gdata_service will be set when SetClientLoginToken or SetAuthTubToken is called. If set to True, the current_token is set in the gdata_service and anyone who accesses the object will use the same token. Note: If store_tokens is set to False and single_user_mode is set to False, all tokens will be ignored, since the library assumes: the tokens should not be stored in the datastore and they should not be stored in the gdata_service object. This will make it impossible to make requests which require authorization. """ gdata_service.http_client = AppEngineHttpClient() gdata_service.token_store = AppEngineTokenStore() gdata_service.auto_store_tokens = store_tokens gdata_service.auto_set_current_token = single_user_mode return gdata_service class AppEngineHttpClient(atom.http_interface.GenericHttpClient): def __init__(self, headers=None): self.debug = False self.headers = headers or {} def request(self, operation, url, data=None, headers=None): """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. Usage example, perform and HTTP GET on http://www.google.com/: import atom.http client = atom.http.HttpClient() http_response = client.request('GET', 'http://www.google.com/') Args: operation: str The HTTP operation to be performed. This is usually one of 'GET', 'POST', 'PUT', or 'DELETE' data: filestream, list of parts, or other object which can be converted to a string. Should be set to None when performing a GET or DELETE. If data is a file-like object which can be read, this method will read a chunk of 100K bytes at a time and send them. If the data is a list of parts to be sent, each part will be evaluated and sent. url: The full URL to which the request should be sent. Can be a string or atom.url.Url. headers: dict of strings. HTTP headers which should be sent in the request. """ all_headers = self.headers.copy() if headers: all_headers.update(headers) # Construct the full payload. # Assume that data is None or a string. data_str = data if data: if isinstance(data, list): # If data is a list of different objects, convert them all to strings # and join them together. converted_parts = [_convert_data_part(x) for x in data] data_str = ''.join(converted_parts) else: data_str = _convert_data_part(data) # If the list of headers does not include a Content-Length, attempt to # calculate it based on the data object. if data and 'Content-Length' not in all_headers: all_headers['Content-Length'] = str(len(data_str)) # Set the content type to the default value if none was set. if 'Content-Type' not in all_headers: all_headers['Content-Type'] = 'application/atom+xml' # Lookup the urlfetch operation which corresponds to the desired HTTP verb. if operation == 'GET': method = urlfetch.GET elif operation == 'POST': method = urlfetch.POST elif operation == 'PUT': method = urlfetch.PUT elif operation == 'DELETE': method = urlfetch.DELETE else: method = None return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, method=method, headers=all_headers, follow_redirects=False)) def _convert_data_part(data): if not data or isinstance(data, str): return data elif hasattr(data, 'read'): # data is a file like object, so read it completely. return data.read() # The data object was not a file. # Try to convert to a string and send the data. return str(data) class HttpResponse(object): """Translates a urlfetch resoinse to look like an hhtplib resoinse. Used to allow the resoinse from HttpRequest to be usable by gdata.service methods. """ def __init__(self, urlfetch_response): self.body = StringIO.StringIO(urlfetch_response.content) self.headers = urlfetch_response.headers self.status = urlfetch_response.status_code self.reason = '' def read(self, length=None): if not length: return self.body.read() else: return self.body.read(length) def getheader(self, name): if not self.headers.has_key(name): return self.headers[name.lower()] return self.headers[name] class TokenCollection(db.Model): """Datastore Model which associates auth tokens with the current user.""" user = db.UserProperty() pickled_tokens = db.BlobProperty() class AppEngineTokenStore(atom.token_store.TokenStore): """Stores the user's auth tokens in the App Engine datastore. Tokens are only written to the datastore if a user is signed in (if users.get_current_user() returns a user object). """ def __init__(self): self.user = None def add_token(self, token): """Associates the token with the current user and stores it. If there is no current user, the token will not be stored. Returns: False if the token was not stored. """ tokens = load_auth_tokens(self.user) if not hasattr(token, 'scopes') or not token.scopes: return False for scope in token.scopes: tokens[str(scope)] = token key = save_auth_tokens(tokens, self.user) if key: return True return False def find_token(self, url): """Searches the current user's collection of token for a token which can be used for a request to the url. Returns: The stored token which belongs to the current user and is valid for the desired URL. If there is no current user, or there is no valid user token in the datastore, a atom.http_interface.GenericToken is returned. """ if url is None: return None if isinstance(url, (str, unicode)): url = atom.url.parse_url(url) tokens = load_auth_tokens(self.user) if url in tokens: token = tokens[url] if token.valid_for_scope(url): return token else: del tokens[url] save_auth_tokens(tokens, self.user) for scope, token in tokens.iteritems(): if token.valid_for_scope(url): return token return atom.http_interface.GenericToken() def remove_token(self, token): """Removes the token from the current user's collection in the datastore. Returns: False if the token was not removed, this could be because the token was not in the datastore, or because there is no current user. """ token_found = False scopes_to_delete = [] tokens = load_auth_tokens(self.user) for scope, stored_token in tokens.iteritems(): if stored_token == token: scopes_to_delete.append(scope) token_found = True for scope in scopes_to_delete: del tokens[scope] if token_found: save_auth_tokens(tokens, self.user) return token_found def remove_all_tokens(self): """Removes all of the current user's tokens from the datastore.""" save_auth_tokens({}, self.user) def save_auth_tokens(token_dict, user): """Associates the tokens with the current user and writes to the datastore. If there us no current user, the tokens are not written and this function returns None. Returns: The key of the datastore entity containing the user's tokens, or None if there was no current user. """ if user is None: user = users.get_current_user() if user is None: return None user_tokens = TokenCollection.all().filter('user =', user).get() if user_tokens: user_tokens.pickled_tokens = pickle.dumps(token_dict) return user_tokens.put() else: user_tokens = TokenCollection( user=user, pickled_tokens=pickle.dumps(token_dict)) return user_tokens.put() def load_auth_tokens(user): """Reads a dictionary of the current user's tokens from the datastore. If there is no current user (a user is not signed in to the app) or the user does not have any tokens, an empty dictionary is returned. """ if user is None: user = users.get_current_user() if user is None: return {} user_tokens = TokenCollection.all().filter('user =', user).get() if user_tokens: return pickle.loads(user_tokens.pickled_tokens) return {}
apache-2.0
Sony-Kitakami/android_kernel_sony
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
11088
3246
# Core.py - Python extension for perf script, core functions # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from collections import defaultdict def autodict(): return defaultdict(autodict) flag_fields = autodict() symbolic_fields = autodict() def define_flag_field(event_name, field_name, delim): flag_fields[event_name][field_name]['delim'] = delim def define_flag_value(event_name, field_name, value, field_str): flag_fields[event_name][field_name]['values'][value] = field_str def define_symbolic_field(event_name, field_name): # nothing to do, really pass def define_symbolic_value(event_name, field_name, value, field_str): symbolic_fields[event_name][field_name]['values'][value] = field_str def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: print_delim = 0 keys = flag_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break if idx and (value & idx) == idx: if print_delim and flag_fields[event_name][field_name]['delim']: string += " " + flag_fields[event_name][field_name]['delim'] + " " string += flag_fields[event_name][field_name]['values'][idx] print_delim = 1 value &= ~idx return string def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: keys = symbolic_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string = symbolic_fields[event_name][field_name]['values'][idx] break if (value == idx): string = symbolic_fields[event_name][field_name]['values'][idx] break return string trace_flags = { 0x00: "NONE", \ 0x01: "IRQS_OFF", \ 0x02: "IRQS_NOSUPPORT", \ 0x04: "NEED_RESCHED", \ 0x08: "HARDIRQ", \ 0x10: "SOFTIRQ" } def trace_flag_str(value): string = "" print_delim = 0 keys = trace_flags.keys() for idx in keys: if not value and not idx: string += "NONE" break if idx and (value & idx) == idx: if print_delim: string += " | "; string += trace_flags[idx] print_delim = 1 value &= ~idx return string def taskState(state): states = { 0 : "R", 1 : "S", 2 : "D", 64: "DEAD" } if state not in states: return "Unknown" return states[state] class EventHeaders: def __init__(self, common_cpu, common_secs, common_nsecs, common_pid, common_comm): self.cpu = common_cpu self.secs = common_secs self.nsecs = common_nsecs self.pid = common_pid self.comm = common_comm def ts(self): return (self.secs * (10 ** 9)) + self.nsecs def ts_format(self): return "%d.%d" % (self.secs, int(self.nsecs / 1000))
gpl-2.0
willingc/oh-mainline
vendor/packages/Django/django/conf/locale/pt_BR/formats.py
107
1315
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'j \d\e F \d\e Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i' YEAR_MONTH_FORMAT = r'F \d\e Y' MONTH_DAY_FORMAT = r'j \d\e F' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' # '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006' # '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006' ) DATETIME_INPUT_FORMATS = ( '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
agpl-3.0
vinegret/youtube-dl
youtube_dl/extractor/odnoklassniki.py
12
9492
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_etree_fromstring, compat_parse_qs, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, unified_strdate, int_or_none, qualities, unescapeHTML, urlencode_postdata, ) class OdnoklassnikiIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:(?:www|m|mobile)\.)? (?:odnoklassniki|ok)\.ru/ (?: video(?:embed)?/| web-api/video/moviePlayer/| live/| dk\?.*?st\.mvId= ) (?P<id>[\d-]+) ''' _TESTS = [{ # metadata in JSON 'url': 'http://ok.ru/video/20079905452', 'md5': '0b62089b479e06681abaaca9d204f152', 'info_dict': { 'id': '20079905452', 'ext': 'mp4', 'title': 'Культура меняет нас (прекрасный ролик!))', 'duration': 100, 'upload_date': '20141207', 'uploader_id': '330537914540', 'uploader': 'Виталий Добровольский', 'like_count': int, 'age_limit': 0, }, }, { # metadataUrl 'url': 'http://ok.ru/video/63567059965189-0?fromTime=5', 'md5': '6ff470ea2dd51d5d18c295a355b0b6bc', 'info_dict': { 'id': '63567059965189-0', 'ext': 'mp4', 'title': 'Девушка без комплексов ...', 'duration': 191, 'upload_date': '20150518', 'uploader_id': '534380003155', 'uploader': '☭ Андрей Мещанинов ☭', 'like_count': int, 'age_limit': 0, 'start_time': 5, }, }, { # YouTube embed (metadataUrl, provider == USER_YOUTUBE) 'url': 'http://ok.ru/video/64211978996595-1', 'md5': '2f206894ffb5dbfcce2c5a14b909eea5', 'info_dict': { 'id': 'V_VztHT5BzY', 'ext': 'mp4', 'title': 'Космическая среда от 26 августа 2015', 'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0', 'duration': 440, 'upload_date': '20150826', 'uploader_id': 'tvroscosmos', 'uploader': 'Телестудия Роскосмоса', 'age_limit': 0, }, }, { # YouTube embed (metadata, provider == USER_YOUTUBE, no metadata.movie.title field) 'url': 'http://ok.ru/video/62036049272859-0', 'info_dict': { 'id': '62036049272859-0', 'ext': 'mp4', 'title': 'МУЗЫКА ДОЖДЯ .', 'description': 'md5:6f1867132bd96e33bf53eda1091e8ed0', 'upload_date': '20120106', 'uploader_id': '473534735899', 'uploader': 'МARINA D', 'age_limit': 0, }, 'params': { 'skip_download': True, }, 'skip': 'Video has not been found', }, { 'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452', 'only_matching': True, }, { 'url': 'http://www.ok.ru/video/20648036891', 'only_matching': True, }, { 'url': 'http://www.ok.ru/videoembed/20648036891', 'only_matching': True, }, { 'url': 'http://m.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'http://mobile.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'https://www.ok.ru/live/484531969818', 'only_matching': True, }, { 'url': 'https://m.ok.ru/dk?st.cmd=movieLayer&st.discId=863789452017&st.retLoc=friend&st.rtu=%2Fdk%3Fst.cmd%3DfriendMovies%26st.mode%3Down%26st.mrkId%3D%257B%2522uploadedMovieMarker%2522%253A%257B%2522marker%2522%253A%25221519410114503%2522%252C%2522hasMore%2522%253Atrue%257D%252C%2522sharedMovieMarker%2522%253A%257B%2522marker%2522%253Anull%252C%2522hasMore%2522%253Afalse%257D%257D%26st.friendId%3D561722190321%26st.frwd%3Don%26_prevCmd%3DfriendMovies%26tkn%3D7257&st.discType=MOVIE&st.mvId=863789452017&_prevCmd=friendMovies&tkn=3648#lst#', 'only_matching': True, }, { # Paid video 'url': 'https://ok.ru/video/954886983203', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): start_time = int_or_none(compat_parse_qs( compat_urllib_parse_urlparse(url).query).get('fromTime', [None])[0]) video_id = self._match_id(url) webpage = self._download_webpage( 'http://ok.ru/video/%s' % video_id, video_id) error = self._search_regex( r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) player = self._parse_json( unescapeHTML(self._search_regex( r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id, webpage, 'player', group='player')), video_id) flashvars = player['flashvars'] metadata = flashvars.get('metadata') if metadata: metadata = self._parse_json(metadata, video_id) else: data = {} st_location = flashvars.get('location') if st_location: data['st.location'] = st_location metadata = self._download_json( compat_urllib_parse_unquote(flashvars['metadataUrl']), video_id, 'Downloading metadata JSON', data=urlencode_postdata(data)) movie = metadata['movie'] # Some embedded videos may not contain title in movie dict (e.g. # http://ok.ru/video/62036049272859-0) thus we allow missing title # here and it's going to be extracted later by an extractor that # will process the actual embed. provider = metadata.get('provider') title = movie['title'] if provider == 'UPLOADED_ODKL' else movie.get('title') thumbnail = movie.get('poster') duration = int_or_none(movie.get('duration')) author = metadata.get('author', {}) uploader_id = author.get('id') uploader = author.get('name') upload_date = unified_strdate(self._html_search_meta( 'ya:ovs:upload_date', webpage, 'upload date', default=None)) age_limit = None adult = self._html_search_meta( 'ya:ovs:adult', webpage, 'age limit', default=None) if adult: age_limit = 18 if adult == 'true' else 0 like_count = int_or_none(metadata.get('likeCount')) info = { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader': uploader, 'uploader_id': uploader_id, 'like_count': like_count, 'age_limit': age_limit, 'start_time': start_time, } if provider == 'USER_YOUTUBE': info.update({ '_type': 'url_transparent', 'url': movie['contentId'], }) return info assert title if provider == 'LIVE_TV_APP': info['title'] = self._live_title(title) quality = qualities(('4', '0', '1', '2', '3', '5')) formats = [{ 'url': f['url'], 'ext': 'mp4', 'format_id': f['name'], } for f in metadata['videos']] m3u8_url = metadata.get('hlsManifestUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) dash_manifest = metadata.get('metadataEmbedded') if dash_manifest: formats.extend(self._parse_mpd_formats( compat_etree_fromstring(dash_manifest), 'mpd')) for fmt in formats: fmt_type = self._search_regex( r'\btype[/=](\d)', fmt['url'], 'format type', default=None) if fmt_type: fmt['quality'] = quality(fmt_type) # Live formats m3u8_url = metadata.get('hlsMasterPlaylistUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8', m3u8_id='hls', fatal=False)) rtmp_url = metadata.get('rtmpUrl') if rtmp_url: formats.append({ 'url': rtmp_url, 'format_id': 'rtmp', 'ext': 'flv', }) if not formats: payment_info = metadata.get('paymentInfo') if payment_info: raise ExtractorError('This video is paid, subscribe to download it', expected=True) self._sort_formats(formats) info['formats'] = formats return info
unlicense
syhost/android_kernel_pantech_ef50l
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
gauribhoite/personfinder
env/google_appengine/lib/jinja2-2.6/jinja2/bccache.py
117
10623
# -*- coding: utf-8 -*- """ jinja2.bccache ~~~~~~~~~~~~~~ This module implements the bytecode cache system Jinja is optionally using. This is useful if you have very complex template situations and the compiliation of all those templates slow down your application too much. Situations where this is useful are often forking web applications that are initialized on the first request. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ from os import path, listdir import sys import marshal import tempfile import cPickle as pickle import fnmatch try: from hashlib import sha1 except ImportError: from sha import new as sha1 from jinja2.utils import open_if_exists # marshal works better on 3.x, one hack less required if sys.version_info > (3, 0): from io import BytesIO marshal_dump = marshal.dump marshal_load = marshal.load else: from cStringIO import StringIO as BytesIO def marshal_dump(code, f): if isinstance(f, file): marshal.dump(code, f) else: f.write(marshal.dumps(code)) def marshal_load(f): if isinstance(f, file): return marshal.load(f) return marshal.loads(f.read()) bc_version = 2 # magic version used to only change with new jinja versions. With 2.6 # we change this to also take Python version changes into account. The # reason for this is that Python tends to segfault if fed earlier bytecode # versions because someone thought it would be a good idea to reuse opcodes # or make Python incompatible with earlier versions. bc_magic = 'j2'.encode('ascii') + \ pickle.dumps(bc_version, 2) + \ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) class Bucket(object): """Buckets are used to store the bytecode for one template. It's created and initialized by the bytecode cache and passed to the loading functions. The buckets get an internal checksum from the cache assigned and use this to automatically reject outdated cache material. Individual bytecode cache subclasses don't have to care about cache invalidation. """ def __init__(self, environment, key, checksum): self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self): """Resets the bucket (unloads the bytecode).""" self.code = None def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return self.code = marshal_load(f) def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError('can\'t write empty bucket') f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) def bytecode_from_string(self, string): """Load bytecode from a string.""" self.load_bytecode(BytesIO(string)) def bytecode_to_string(self): """Return the bytecode as string.""" out = BytesIO() self.write_bytecode(out) return out.getvalue() class BytecodeCache(object): """To implement your own bytecode cache you have to subclass this class and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of these methods are passed a :class:`~jinja2.bccache.Bucket`. A very basic bytecode cache that saves the bytecode on the file system:: from os import path class MyCache(BytecodeCache): def __init__(self, directory): self.directory = directory def load_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) if path.exists(filename): with open(filename, 'rb') as f: bucket.load_bytecode(f) def dump_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) with open(filename, 'wb') as f: bucket.write_bytecode(f) A more advanced version of a filesystem based bytecode cache is part of Jinja2. """ def load_bytecode(self, bucket): """Subclasses have to override this method to load bytecode into a bucket. If they are not able to find code in the cache for the bucket, it must not do anything. """ raise NotImplementedError() def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError() def clear(self): """Clears the cache. This method is not used by Jinja2 but should be implemented to allow applications to clear the bytecode cache used by a particular environment. """ def get_cache_key(self, name, filename=None): """Returns the unique hash key for this template name.""" hash = sha1(name.encode('utf-8')) if filename is not None: filename = '|' + filename if isinstance(filename, unicode): filename = filename.encode('utf-8') hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): """Returns a checksum for the source.""" return sha1(source.encode('utf-8')).hexdigest() def get_bucket(self, environment, name, filename, source): """Return a cache bucket for the given template. All arguments are mandatory but filename may be `None`. """ key = self.get_cache_key(name, filename) checksum = self.get_source_checksum(source) bucket = Bucket(environment, key, checksum) self.load_bytecode(bucket) return bucket def set_bucket(self, bucket): """Put the bucket into the cache.""" self.dump_bytecode(bucket) class FileSystemBytecodeCache(BytecodeCache): """A bytecode cache that stores bytecode on the filesystem. It accepts two arguments: The directory where the cache items are stored and a pattern string that is used to build the filename. If no directory is specified the system temporary items folder is used. The pattern can be used to have multiple separate caches operate on the same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` is replaced with the cache key. >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') This bytecode cache supports clearing of the cache using the clear method. """ def __init__(self, directory=None, pattern='__jinja2_%s.cache'): if directory is None: directory = tempfile.gettempdir() self.directory = directory self.pattern = pattern def _get_cache_filename(self, bucket): return path.join(self.directory, self.pattern % bucket.key) def load_bytecode(self, bucket): f = open_if_exists(self._get_cache_filename(bucket), 'rb') if f is not None: try: bucket.load_bytecode(f) finally: f.close() def dump_bytecode(self, bucket): f = open(self._get_cache_filename(bucket), 'wb') try: bucket.write_bytecode(f) finally: f.close() def clear(self): # imported lazily here because google app-engine doesn't support # write access on the file system and the function does not exist # normally. from os import remove files = fnmatch.filter(listdir(self.directory), self.pattern % '*') for filename in files: try: remove(path.join(self.directory, filename)) except OSError: pass class MemcachedBytecodeCache(BytecodeCache): """This class implements a bytecode cache that uses a memcache cache for storing the information. It does not enforce a specific memcache library (tummy's memcache or cmemcache) but will accept any class that provides the minimal interface required. Libraries compatible with this class: - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache - `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_ - `cmemcache <http://gijsbert.org/cmemcache/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only unicode. You can however pass the underlying cache client to the bytecode cache which is available as `django.core.cache.cache._client`.) The minimal interface for the client passed to the constructor is this: .. class:: MinimalClientInterface .. method:: set(key, value[, timeout]) Stores the bytecode in the cache. `value` is a string and `timeout` the timeout of the key. If timeout is not provided a default timeout or no timeout should be assumed, if it's provided it's an integer with the number of seconds the cache item should exist. .. method:: get(key) Returns the value for the cache key. If the item does not exist in the cache the return value must be `None`. The other arguments to the constructor are the prefix for all keys that is added before the actual cache key and the timeout for the bytecode in the cache system. We recommend a high (or no) timeout. This bytecode cache does not support clearing of used items in the cache. The clear method is a no-operation function. """ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None): self.client = client self.prefix = prefix self.timeout = timeout def load_bytecode(self, bucket): code = self.client.get(self.prefix + bucket.key) if code is not None: bucket.bytecode_from_string(code) def dump_bytecode(self, bucket): args = (self.prefix + bucket.key, bucket.bytecode_to_string()) if self.timeout is not None: args += (self.timeout,) self.client.set(*args)
apache-2.0
gammalib/gammalib
test/dev/test_model_spatial_const.py
1
1836
#! /usr/bin/env python # ========================================================================== # This script tests the GModelSpatialDiffuseConst model. # # Copyright (C) 2015 Juergen Knoedlseder # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ========================================================================== import gammalib #import math # ============== # # Evaluate model # # ============== # def test_mc(samples=100000): """ This function tests the mc() method. """ # Create model model = gammalib.GModelSpatialDiffuseConst(1.0) # Create WCS map map = gammalib.GSkyMap("AIT", "GAL", 0.0, 0.0, -1.0, 1.0, 360, 180) # Set fixed parameters energy = gammalib.GEnergy() time = gammalib.GTime() # Allocate random number generator ran = gammalib.GRan() # Fill map for i in range(samples): dir = model.mc(energy, time, ran) pixel = map.dir2inx(dir) map[pixel] += 1.0 # Save map map.save("test_model_spatial_const.fits", True) # Return return # ================ # # Main entry point # # ================ # if __name__ == '__main__': """ Main entry point """ # Test Monte Carlo method test_mc()
gpl-3.0
mahak/neutron
neutron/tests/functional/agent/l3/test_ha_router.py
2
23197
# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from neutron_lib import constants from oslo_utils import netutils import testtools from neutron.agent.common import ovs_lib from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework class L3HATestCase(framework.L3AgentTestFramework): def test_ha_router_update_floatingip_statuses(self): self._test_update_floatingip_statuses( self.generate_router_info(enable_ha=True)) def test_keepalived_state_change_notification(self): enqueue_mock = mock.patch.object( self.agent, 'enqueue_state_change', side_effect=self.change_router_state).start() router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'primary') self.fail_ha_router(router) common_utils.wait_until_true(lambda: router.ha_state == 'backup') common_utils.wait_until_true(lambda: (enqueue_mock.call_count == 3 or enqueue_mock.call_count == 4)) calls = [args[0] for args in enqueue_mock.call_args_list] self.assertEqual((router.router_id, 'backup'), calls[0]) self.assertEqual((router.router_id, 'primary'), calls[1]) self.assertEqual((router.router_id, 'backup'), calls[-1]) def _expected_rpc_report(self, expected): calls = (args[0][1] for args in self.agent.plugin_rpc.update_ha_routers_states.call_args_list) # Get the last state reported for each router actual_router_states = {} for call in calls: for router_id, state in call.items(): actual_router_states[router_id] = state return actual_router_states == expected def test_keepalived_state_change_bulk_rpc(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self.fail_ha_router(router1) router_info = self.generate_router_info(enable_ha=True) router2 = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router1.ha_state == 'backup') common_utils.wait_until_true(lambda: router2.ha_state == 'primary') common_utils.wait_until_true( lambda: self._expected_rpc_report( {router1.router_id: 'standby', router2.router_id: 'active'})) def test_ha_router_lifecycle(self): router_info = self._router_lifecycle(enable_ha=True) # ensure everything was cleaned up self._router_lifecycle(enable_ha=True, router_info=router_info) def test_conntrack_disassociate_fip_ha_router(self): self._test_conntrack_disassociate_fip(ha=True) def test_ipv6_ha_router_lifecycle(self): self._router_lifecycle(enable_ha=True, ip_version=constants.IP_VERSION_6) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet(self): self.agent.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') self._router_lifecycle(enable_ha=True, ip_version=constants.IP_VERSION_6, v6_ext_gw_with_sub=False) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet_for_router_advts(self): # Verify that router gw interface is configured to receive Router # Advts from upstream router when no external gateway is configured. self._router_lifecycle(enable_ha=True, dual_stack=True, v6_ext_gw_with_sub=False) def _test_ipv6_router_advts_and_fwd_helper(self, state, enable_v6_gw, expected_ra, expected_forwarding): # Schedule router to l3 agent, and then add router gateway. Verify # that router gw interface is configured to receive Router Advts and # IPv6 forwarding is enabled. router_info = l3_test_common.prepare_router_data( enable_snat=True, enable_ha=True, dual_stack=True, enable_gw=False) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'primary') if state == 'backup': self.fail_ha_router(router) common_utils.wait_until_true(lambda: router.ha_state == 'backup') _ext_dev_name, ex_port = l3_test_common.prepare_ext_gw_test( mock.Mock(), router, dual_stack=enable_v6_gw) router_info['gw_port'] = ex_port router.process() self._assert_ipv6_accept_ra(router, expected_ra) # As router is going first to primary and than to backup mode, # ipv6_forwarding should be enabled on "all" interface always after # that transition self._assert_ipv6_forwarding(router, expected_forwarding, True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ipv6_router_advts_and_fwd_after_router_state_change_primary(self): # Check that RA and forwarding are enabled when there's no IPv6 # gateway. self._test_ipv6_router_advts_and_fwd_helper('primary', enable_v6_gw=False, expected_ra=True, expected_forwarding=True) # Check that RA is disabled and forwarding is enabled when an IPv6 # gateway is configured. self._test_ipv6_router_advts_and_fwd_helper('primary', enable_v6_gw=True, expected_ra=False, expected_forwarding=True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ipv6_router_advts_and_fwd_after_router_state_change_backup(self): # Check that both RA and forwarding are disabled on backup instances self._test_ipv6_router_advts_and_fwd_helper('backup', enable_v6_gw=False, expected_ra=False, expected_forwarding=False) self._test_ipv6_router_advts_and_fwd_helper('backup', enable_v6_gw=True, expected_ra=False, expected_forwarding=False) def test_keepalived_configuration(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) expected = self.get_expected_keepalive_configuration(router) self.assertEqual(expected, router.keepalived_manager.get_conf_on_disk()) # Add a new FIP and change the GW IP address router.router = copy.deepcopy(router.router) existing_fip = '19.4.4.2' new_fip = '19.4.4.3' self._add_fip(router, new_fip) subnet_id = framework._uuid() fixed_ips = [{'ip_address': '19.4.4.10', 'prefixlen': 24, 'subnet_id': subnet_id}] subnets = [{'id': subnet_id, 'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.5'}] router.router['gw_port']['subnets'] = subnets router.router['gw_port']['fixed_ips'] = fixed_ips router.process() # Get the updated configuration and assert that both FIPs are in, # and that the GW IP address was updated. new_config = router.keepalived_manager.config.get_config_str() old_gw = '0.0.0.0/0 via 19.4.4.1' new_gw = '0.0.0.0/0 via 19.4.4.5' old_external_device_ip = '19.4.4.4' new_external_device_ip = '19.4.4.10' self.assertIn(existing_fip, new_config) self.assertIn(new_fip, new_config) self.assertNotIn(old_gw, new_config) self.assertIn(new_gw, new_config) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self.assertNotIn('%s/24 dev %s' % (old_external_device_ip, external_device_name), new_config) self.assertIn('%s/24 dev %s' % (new_external_device_ip, external_device_name), new_config) def test_ha_router_conf_on_restarted_agent(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self._add_fip(router1, '192.168.111.12') restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) common_utils.wait_until_true( lambda: self.floating_ips_configured(router1)) self.assertIn( router1._get_primary_vip(), self._get_addresses_on_device( router1.ns_name, router1.get_ha_device_name())) def test_ha_router_ipv6_radvd_status(self): router_info = self.generate_router_info( ip_version=constants.IP_VERSION_6, enable_ha=True) router1 = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router1.ha_state == 'primary') common_utils.wait_until_true(lambda: router1.radvd.enabled) def _check_lla_status(router, expected): internal_devices = router.router[constants.INTERFACE_KEY] for device in internal_devices: lladdr = ip_lib.get_ipv6_lladdr(device['mac_address']) exists = ip_lib.device_exists_with_ips_and_mac( router.get_internal_device_name(device['id']), [lladdr], device['mac_address'], router.ns_name) self.assertEqual(expected, exists) _check_lla_status(router1, True) device_name = router1.get_ha_device_name() ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name) ha_device.link.set_down() common_utils.wait_until_true(lambda: router1.ha_state == 'backup') common_utils.wait_until_true( lambda: not router1.radvd.enabled, timeout=10) _check_lla_status(router1, False) def test_ha_router_process_ipv6_subnets_to_existing_port(self): router_info = self.generate_router_info(enable_ha=True, ip_version=constants.IP_VERSION_6) router = self.manage_router(self.agent, router_info) def verify_ip_in_keepalived_config(router, iface): config = router.keepalived_manager.config.get_config_str() ip_cidrs = common_utils.fixed_ip_cidrs(iface['fixed_ips']) for ip_addr in ip_cidrs: self.assertIn(ip_addr, config) interface_id = router.router[constants.INTERFACE_KEY][0]['id'] slaac = constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} # Add a second IPv6 subnet to the router internal interface. self._add_internal_interface_by_subnet(router.router, count=1, ip_version=constants.IP_VERSION_6, ipv6_subnet_modes=[slaac_mode], interface_id=interface_id) router.process() common_utils.wait_until_true(lambda: router.ha_state == 'primary') # Verify that router internal interface is present and is configured # with IP address from both the subnets. internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(2, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) # Remove one subnet from the router internal iface interfaces = copy.deepcopy(router.router.get( constants.INTERFACE_KEY, [])) fixed_ips, subnets = [], [] fixed_ips.append(interfaces[0]['fixed_ips'][0]) subnets.append(interfaces[0]['subnets'][0]) interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets}) router.router[constants.INTERFACE_KEY] = interfaces router.process() # Verify that router internal interface has a single ipaddress internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(1, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) def test_delete_external_gateway_on_standby_router(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) self.fail_ha_router(router) common_utils.wait_until_true(lambda: router.ha_state == 'backup') # The purpose of the test is to simply make sure no exception is raised port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) router.external_gateway_removed(port, interface_name) def test_removing_floatingip_immediately(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) ex_gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_interface_name(ex_gw_port) common_utils.wait_until_true(lambda: router.ha_state == 'primary') self._add_fip(router, '172.168.1.20', fixed_address='10.0.0.3') router.process() router.router[constants.FLOATINGIP_KEY] = [] # The purpose of the test is to simply make sure no exception is raised # Because router.process will consume the FloatingIpSetupException, # call the configure_fip_addresses directly here router.configure_fip_addresses(interface_name) def test_ha_port_status_update(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router1 = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router1.ha_state == 'backup') router1.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router1.router) common_utils.wait_until_true(lambda: router1.ha_state == 'primary') def test_ha_router_namespace_has_ip_nonlocal_bind_disabled(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) try: ip_nonlocal_bind_value = ip_lib.get_ip_nonlocal_bind( router.router_namespace.name) except RuntimeError as rte: stat_message = 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind' if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network namespaces." % ( ip_lib.IP_NONLOCAL_BIND)) raise self.assertEqual(0, ip_nonlocal_bind_value) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_namespace_has_ipv6_forwarding_disabled(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) common_utils.wait_until_true(lambda: router.ha_state == 'backup') self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, 0) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) common_utils.wait_until_true(lambda: router.ha_state == 'primary') self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, 1) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_without_gw_ipv6_forwarding_state(self): router_info = self.generate_router_info( enable_ha=True, enable_gw=False) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'backup') self._wait_until_ipv6_forwarding_has_state(router.ns_name, 'all', 0) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) common_utils.wait_until_true(lambda: router.ha_state == 'primary') self._wait_until_ipv6_forwarding_has_state(router.ns_name, 'all', 1) class L3HATestFailover(framework.L3AgentTestFramework): def setUp(self): super(L3HATestFailover, self).setUp() conf = self._configure_agent('agent2') self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport( 'agent2', conf) br_int_1 = self._get_agent_ovs_integration_bridge(self.agent) br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent) veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports veth1.link.set_up() veth2.link.set_up() br_int_1.add_port(veth1.name) br_int_2.add_port(veth2.name) @staticmethod def fail_gw_router_port(router): # NOTE(slaweq): in HA failover tests there are two integration bridges # connected with veth pair to each other. To stop traffic from router's # namespace to gw ip (19.4.4.1) it needs to be blocked by openflow rule # as simple setting ovs_integration_bridge device DOWN will not be # enough because same IP address is also configured on # ovs_integration_bridge device from second router and it will still # respond to ping r_br = ovs_lib.OVSBridge(router.driver.conf.OVS.integration_bridge) external_port = router.get_ex_gw_port() for subnet in external_port['subnets']: r_br.add_flow( proto='ip', nw_dst=subnet['gateway_ip'], actions='drop') @staticmethod def restore_gw_router_port(router): r_br = ovs_lib.OVSBridge(router.driver.conf.OVS.integration_bridge) external_port = router.get_ex_gw_port() for subnet in external_port['subnets']: r_br.delete_flows(proto='ip', nw_dst=subnet['gateway_ip']) def test_ha_router_failover(self): router1, router2 = self.create_ha_routers() primary_router, backup_router = self._get_primary_and_backup_routers( router1, router2) self._assert_ipv6_accept_ra(primary_router, True) self._assert_ipv6_forwarding(primary_router, True, True) self._assert_ipv6_accept_ra(backup_router, False) self._assert_ipv6_forwarding(backup_router, False, False) self.fail_ha_router(router1) # NOTE: passing backup_router as first argument, because we expect # that this router should be the primary new_primary, new_backup = self._get_primary_and_backup_routers( backup_router, primary_router) self.assertEqual(primary_router, new_backup) self.assertEqual(backup_router, new_primary) self._assert_ipv6_accept_ra(new_primary, True) self._assert_ipv6_forwarding(new_primary, True, True) self._assert_ipv6_accept_ra(new_backup, False) # after transition from primary -> backup, 'all' IPv6 forwarding should # be enabled self._assert_ipv6_forwarding(new_backup, False, True) def test_ha_router_lost_gw_connection(self): self.agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) self.failover_agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) router1, router2 = self.create_ha_routers() primary_router, backup_router = self._get_primary_and_backup_routers( router1, router2) self.fail_gw_router_port(primary_router) # NOTE: passing backup_router as first argument, because we expect # that this router should be the primary new_primary, new_backup = self._get_primary_and_backup_routers( backup_router, primary_router) self.assertEqual(primary_router, new_backup) self.assertEqual(backup_router, new_primary) def test_both_ha_router_lost_gw_connection(self): self.agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) self.failover_agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) router1, router2 = self.create_ha_routers() primary_router, backup_router = self._get_primary_and_backup_routers( router1, router2) self.fail_gw_router_port(primary_router) self.fail_gw_router_port(backup_router) common_utils.wait_until_true( lambda: primary_router.ha_state == 'primary') common_utils.wait_until_true( lambda: backup_router.ha_state == 'primary') self.restore_gw_router_port(primary_router) new_primary, new_backup = self._get_primary_and_backup_routers( primary_router, backup_router) self.assertEqual(primary_router, new_primary) self.assertEqual(backup_router, new_backup) class LinuxBridgeL3HATestCase(L3HATestCase): INTERFACE_DRIVER = 'neutron.agent.linux.interface.BridgeInterfaceDriver'
apache-2.0
terinjokes/stgit
t/test.py
5
5836
# Run the test suite in parallel. import glob import itertools as it import math import optparse import os import os.path import random import shutil import subprocess import sys import threading import time import traceback # Number of jobs to run in parallel. def default_num_jobs(): try: # One job per processor should be about right. import multiprocessing return 2 * multiprocessing.cpu_count() except ImportError: # Couldn't determine number of processors (probably because # Python version is < 2.6); use a conservative fallback. return 4 class TestQueue(object): def __init__(self, tests, cleanup): def cleanup_jobs(top_dirs): for td in top_dirs: for e in os.listdir(td): yield os.path.join(td, e) self.__remaining = sorted(tests, reverse=True) self.__running = set() self.__success = set() self.__fail = set() self.__clean_jobs = set(cleanup) self.__clean_todo = set(cleanup_jobs(self.__clean_jobs)) self.__clean_running = set() self.__clean_done = set() self.lock = threading.Lock() self.__cv = threading.Condition(self.lock) def __iter__(self): return self # True if all jobs have completed. def __done(self): # Called with self.lock held. return (not self.__remaining and not self.__running and not len(self.__clean_todo) and not self.__clean_running) # Make progress report, and check if we're all done. def __report(self): # Called with self.lock held. cd = len(self.__clean_done) + 1e-3 # clever way to avoid div by zero cr = len(self.__clean_running) ct = len(self.__clean_todo) sys.stdout.write(("\rQueue: %3d, Running: %3d, OK: %3d," " Failed: %3d, Cleanup: %3d%%") % (len(self.__remaining), len(self.__running), len(self.__success), len(self.__fail), math.floor(100.0 * cd / (cd + cr + ct)))) sys.stdout.flush() if self.__done(): sys.stdout.write("\n") self.__cv.notifyAll() # Yield free jobs until none are left. def next(self): with self.lock: if not self.__remaining: raise StopIteration t = self.__remaining.pop() self.__running.add(t) self.__report() return t # Report that a job has completed. def finished(self, t, success): with self.lock: self.__running.remove(t) (self.__success if success else self.__fail).add(t) self.__report() # Yield free cleaning jobs until none are left. def cleaning_jobs(self): while True: with self.lock: if not self.__clean_todo: return c = self.__clean_todo.pop() self.__clean_running.add(c) yield c # Report that a cleaning job has completed. def deleted(self, c): with self.lock: self.__clean_running.remove(c) self.__clean_done.add(c) self.__report() # Wait for all jobs to complete. def wait(self): with self.lock: while not self.__done(): self.__cv.wait() for c in self.__clean_jobs: os.rmdir(c) return set(self.__fail) def start_worker(q): def w(): for t in q: try: ok = False # assume the worst until proven otherwise s = os.path.join("trash", t) e = dict(os.environ) e["SCRATCHDIR"] = s p = subprocess.Popen([os.path.join(os.getcwd(), t), "-v"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=e) (out, err) = p.communicate() assert err is None with open(os.path.join(s, "output"), "w") as f: f.write(out) f.write("\nExited with code %d\n" % p.returncode) if p.returncode == 0: ok = True except: # Log the traceback. Use the mutex so that we # won't write multiple tracebacks to stderr at the # same time. with q.lock: traceback.print_exc() finally: q.finished(t, ok) threading.Thread(target=w).start() def start_cleaner(q): def w(): for c in q.cleaning_jobs(): try: (shutil.rmtree if os.path.isdir(c) else os.remove)(c) finally: q.deleted(c) threading.Thread(target=w).start() def main(): p = optparse.OptionParser() p.add_option("-j", "--jobs", type="int", help="number of tests to run in parallel") (opts, tests) = p.parse_args() if not tests: tests = glob.glob("t[0-9][0-9][0-9][0-9]-*.sh") if opts.jobs is None: opts.jobs = default_num_jobs() print "Running %d tests in parallel" % opts.jobs if os.path.exists("trash"): os.rename("trash", "trash-being-deleted-%016x" % random.getrandbits(64)) os.mkdir("trash") q = TestQueue(tests, glob.glob("trash-being-deleted-*")) w = min(opts.jobs, len(tests)) for i in range(w): start_worker(q) for i in range(max(w / 4, 1)): start_cleaner(q) failed = q.wait() if failed: print "Failed:" for t in sorted(failed): print " ", t print "Done" if __name__ == "__main__": main()
gpl-2.0
taichatha/youtube-dl
youtube_dl/extractor/rtlnl.py
102
5637
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class RtlNlIE(InfoExtractor): IE_NAME = 'rtl.nl' IE_DESC = 'rtl.nl and rtlxl.nl' _VALID_URL = r'''(?x) https?://(?:www\.)? (?: rtlxl\.nl/\#!/[^/]+/| rtl\.nl/system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html\b.+?\buuid= ) (?P<id>[0-9a-f-]+)''' _TESTS = [{ 'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/6e4203a6-0a5e-3596-8424-c599a59e0677', 'md5': 'cc16baa36a6c169391f0764fa6b16654', 'info_dict': { 'id': '6e4203a6-0a5e-3596-8424-c599a59e0677', 'ext': 'mp4', 'title': 'RTL Nieuws - Laat', 'description': 'md5:6b61f66510c8889923b11f2778c72dc5', 'timestamp': 1408051800, 'upload_date': '20140814', 'duration': 576.880, }, }, { 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false', 'md5': 'dea7474214af1271d91ef332fb8be7ea', 'info_dict': { 'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed', 'ext': 'mp4', 'timestamp': 1424039400, 'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag', 'thumbnail': 're:^https?://screenshots\.rtl\.nl/system/thumb/sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$', 'upload_date': '20150215', 'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.', } }, { # empty synopsis and missing episodes (see https://github.com/rg3/youtube-dl/issues/6275) 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false', 'info_dict': { 'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a', 'ext': 'mp4', 'title': 'RTL Nieuws - Meer beelden van overval juwelier', 'thumbnail': 're:^https?://screenshots\.rtl\.nl/system/thumb/sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$', 'timestamp': 1437233400, 'upload_date': '20150718', 'duration': 30.474, }, 'params': { 'skip_download': True, }, }, { # encrypted m3u8 streams, georestricted 'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7', 'only_matching': True, }, { 'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0', 'only_matching': True, }] def _real_extract(self, url): uuid = self._match_id(url) info = self._download_json( 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid, uuid) material = info['material'][0] title = info['abstracts'][0]['name'] subtitle = material.get('title') if subtitle: title += ' - %s' % subtitle description = material.get('synopsis') meta = info.get('meta', {}) # m3u8 streams are encrypted and may not be handled properly by older ffmpeg/avconv. # To workaround this previously adaptive -> flash trick was used to obtain # unencrypted m3u8 streams (see https://github.com/rg3/youtube-dl/issues/4118) # and bypass georestrictions as well. # Currently, unencrypted m3u8 playlists are (intentionally?) invalid and therefore # unusable albeit can be fixed by simple string replacement (see # https://github.com/rg3/youtube-dl/pull/6337) # Since recent ffmpeg and avconv handle encrypted streams just fine encrypted # streams are used now. videopath = material['videopath'] m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4') video_urlpart = videopath.split('/adaptive/')[1][:-5] PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4' formats.extend([ { 'url': PG_URL_TEMPLATE % ('a2m', video_urlpart), 'format_id': 'pg-sd', }, { 'url': PG_URL_TEMPLATE % ('a3m', video_urlpart), 'format_id': 'pg-hd', 'quality': 0, } ]) self._sort_formats(formats) thumbnails = [] for p in ('poster_base_url', '"thumb_base_url"'): if not meta.get(p): continue thumbnails.append({ 'url': self._proto_relative_url(meta[p] + uuid), 'width': int_or_none(self._search_regex( r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)), 'height': int_or_none(self._search_regex( r'/sz=[0-9]+x([0-9]+)', meta[p], 'thumbnail height', fatal=False)) }) return { 'id': uuid, 'title': title, 'formats': formats, 'timestamp': material['original_date'], 'description': description, 'duration': parse_duration(material.get('duration')), 'thumbnails': thumbnails, }
unlicense
isb-cgc/ISB-CGC-pipelines
lib/pipelines/db.py
1
4172
import os import sqlite3 class PipelineDatabaseError(Exception): def __init__(self, msg): super(PipelineDatabaseError, self).__init__() self.msg = msg class DatabaseRecord(object): def __init__(self, innerDict): self.__dict__.update(innerDict) class PipelineDatabase(object): def __init__(self, config): if config.db == "mysql": pass # TODO: determine best production grade relational database to use elif config.db == "sqlite": self._dbConn = sqlite3.connect(os.environ["PIPELINES_DB"]) self._pipelinesDb = self._dbConn.cursor() def __del__(self): self._dbConn.close() def closeConnection(self): self._dbConn.close() def _parseCriteria(self, c): if "operation" in c.keys() and "values" in c.keys(): if c["operation"] in ["AND", "OR", "NOT"]: opString = " {op} ".format(op=c["operation"]) assignments = [] substitutions = [] for value in c["values"]: s, v = zip(*self._parseCriteria(value)) assignments.append(s) if v is not None: substitutions.append(v) return opString.join(assignments), substitutions else: raise PipelineDatabaseError("{op} not a valid operation!".format(op=c["operation"])) elif "key" in c.keys() and "value" in c.keys(): if type(c["value"]) is dict and "incr" in c["value"].keys(): return "{key} = {key} + {value}".format(key=c["key"], value=c["value"]["incr"]), None else: return "{key} = ?".format(key=c["key"]), c["value"] else: raise PipelineDatabaseError("Invalid parameters: {params}".format(params=','.join(c.keys()))) def select(self, table, *data, **criteria): if len(data) > 0: dataString = ', '.join(data) else: dataString = "*" where, subs = self._parseCriteria(criteria) query = "SELECT {data} from {table} WHERE {where}".format(data=dataString, table=table, where=where) records = self._pipelinesDb.execute(query, tuple(subs)).fetchall() results = { "results": [] } for r in records: o = {} for i, d in enumerate(data): o[d] = r[i] results["results"].append(o) return results def insert(self, table, **record): cols, vals = zip(*record.items()) valSubs = ','.join(['?' for x in range(0, len(vals))]) try: self._pipelinesDb.execute("INSERT INTO {table} ({cols}) VALUES ({valSubs})".format(table=table, cols=','.join(cols), valSubs=valSubs), tuple(vals)) self._dbConn.commit() except sqlite3.Error as e: raise PipelineDatabaseError("Couldn't create record: {reason}".format(reason=e)) return self._pipelinesDb.lastrowid def update(self, table, updates, criteria): query = "UPDATE {table} SET {values} WHERE {where}" updateCols, updateVals = zip(*updates.items()) valString = ','.join(["{v} = ?".format(v=v) for v in updateCols]) where, subs = self._parseCriteria(criteria) try: self._pipelinesDb.execute(query.format(table=table, values=valString, where=where), updateVals + tuple(subs)) except sqlite3.Error as e: raise PipelineDatabaseError("Couldn't update table: {reason}".format(reason=e)) else: self._dbConn.commit() def increment(self, table, column, incr, criteria): query = "UPDATE {table} SET {column} = {column} + {incr} WHERE {where}" where, subs = self._parseCriteria(criteria) try: self._pipelinesDb.execute(query.format(table=table, column=column, incr=str(incr), where=where), tuple(subs)) except sqlite3.Error as e: raise PipelineDatabaseError("Couldn't increment value: {reason}".format(reason=e)) else: self._dbConn.commit() def create(self, name, entity, criteria): check = 'SELECT name FROM sqlite_master WHERE type="{entity}" AND name="{name}"'.format(entity=entity, name=name) if len(self._pipelinesDb.execute(check).fetchall()) == 0: columns = ', '.join(["{col} {properties}".format(col=col, properties=properties) for col, properties in criteria.iteritems()]) create = "CREATE {entity} {name} ({columns})".format(entity=entity, name=name, columns=columns) try: self._pipelinesDb.execute(create) except sqlite3.Error as e: raise PipelineDatabaseError("Couldn't create jobs table: {reason}".format(reason=e)) else: self._dbConn.commit()
apache-2.0
sestrella/ansible
test/integration/targets/gathering_facts/cache_plugins/none.py
159
1114
# (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.cache import BaseCacheModule DOCUMENTATION = ''' cache: none short_description: write-only cache (no cache) description: - No caching at all version_added: historical author: core team (@ansible-core) ''' class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): self.empty = {} def get(self, key): return self.empty.get(key) def set(self, key, value): return value def keys(self): return self.empty.keys() def contains(self, key): return key in self.empty def delete(self, key): del self.emtpy[key] def flush(self): self.empty = {} def copy(self): return self.empty.copy() def __getstate__(self): return self.copy() def __setstate__(self, data): self.empty = data
gpl-3.0
leonardbinet/navitia_client
test/test_route_schedules.py
1
1235
"""Tests for the route_schedules module.""" import responses # initialize test package __init__, and does not mix up names import test as _test import navitia_client import requests class RouteSchedulesTest(_test.TestCase): def setUp(self): self.user = 'leo' self.core_url = "https://api.navitia.io/v1/" self.client = navitia_client.Client(self.user) self.coords = '2.333333;48.866667' self.datetime = '20161221T000000' def test_no_region(self): # Should fail, needs one region client = self.client with self.assertRaises(ValueError): client.route_schedules(raw="anything") def test_wrong_datafreshness(self): # Should fail, only base_schedule or realtime client = self.client client.set_region("sncf") with self.assertRaises(ValueError): client.route_schedules(raw="anything", data_freshness="lalala") def test_multiple_ressources_arguments(self): # Should fail, only one accepted out of line, raw, route etc.. client = self.client client.set_region("sncf") with self.assertRaises(ValueError): client.route_schedules(raw="anything", line="line")
mit
androidarmv6/android_external_chromium_org
native_client_sdk/src/build_tools/dsc2gyp.py
107
11228
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import StringIO import sys import os import optparse SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(os.path.dirname(SCRIPT_DIR), 'tools')) import getos valid_tools = ['newlib', 'glibc', getos.GetPlatform()] def Error(msg): print(msg) sys.exit(1) PREAMBLE = """\ { 'includes': ['%s/build_tools/nacl.gypi'], """ NEXE_TARGET = """\ { 'target_name': '%(NAME)s_x86_32%(EXT)s', 'product_name': '%(NAME)s_x86_32%(EXT)s', 'type': '%(GYP_TYPE)s', 'sources': %(SOURCES)s, 'libraries': %(LIBS)s, 'include_dirs': %(INCLUDES)s, 'cflags': ['-m32', '-pedantic'] + %(CFLAGS)s, 'make_valid_configurations': ['newlib-debug', 'newlib-release', 'glibc-debug', 'glibc-release'], 'ldflags': ['-m32', '-L../../lib/x86_32/<(CONFIGURATION_NAME)'], 'toolset': 'target', %(CONFIGS)s }, { 'target_name': '%(NAME)s_x86_64%(EXT)s', 'product_name': '%(NAME)s_x86_64%(EXT)s', 'type': '%(GYP_TYPE)s', 'sources': %(SOURCES)s, 'libraries': %(LIBS)s, 'include_dirs': %(INCLUDES)s, 'make_valid_configurations': ['newlib-debug', 'newlib-release', 'glibc-debug', 'glibc-release'], 'cflags': ['-m64', '-pedantic'] + %(CFLAGS)s, 'ldflags': ['-m64', '-L../../lib/x86_64/<(CONFIGURATION_NAME)'], 'toolset': 'target', %(CONFIGS)s }, """ NLIB_TARGET = """\ { 'target_name': '%(NAME)s_x86_32%(EXT)s', 'product_name': 'lib%(NAME)s%(EXT)s', 'product_dir': '../../lib/x86_32/<(CONFIGURATION_NAME)', 'type': '%(GYP_TYPE)s', 'sources': %(SOURCES)s, 'libraries': %(LIBS)s, 'include_dirs': %(INCLUDES)s, 'cflags': ['-m32', '-pedantic'] + %(CFLAGS)s, 'make_valid_configurations': ['newlib-debug', 'newlib-release', 'glibc-debug', 'glibc-release'], 'ldflags': ['-m32'], 'toolset': 'target', %(CONFIGS)s }, { 'target_name': '%(NAME)s_x86_64%(EXT)s', 'product_name': 'lib%(NAME)s%(EXT)s', 'product_dir': '../../lib/x86_64/<(CONFIGURATION_NAME)', 'type': '%(GYP_TYPE)s', 'sources': %(SOURCES)s, 'libraries': %(LIBS)s, 'include_dirs': %(INCLUDES)s, 'make_valid_configurations': ['newlib-debug', 'newlib-release', 'glibc-debug', 'glibc-release'], 'cflags': ['-m64', '-pedantic'] + %(CFLAGS)s, 'ldflags': ['-m64'], 'toolset': 'target', %(CONFIGS)s }, """ HOST_LIB_TARGET = """\ { 'target_name': '%(NAME)s%(EXT)s', 'type': '%(GYP_TYPE)s', 'toolset': 'host', 'sources': %(SOURCES)s, 'cflags': %(CFLAGS)s, 'cflags_c': ['-std=gnu99'], 'include_dirs': %(INCLUDES)s, 'make_valid_configurations': ['host-debug', 'host-release'], 'product_dir': '../../lib/%(ARCH)s/<(CONFIGURATION_NAME)', 'product_name': '%(NAME)s%(EXT)s', %(CONFIGS)s }, """ HOST_EXE_TARGET = """\ { 'target_name': '%(NAME)s%(EXT)s', 'type': '%(GYP_TYPE)s', 'toolset': 'host', 'sources': %(SOURCES)s, 'cflags': %(CFLAGS)s, 'cflags_c': ['-std=gnu99'], 'ldflags': ['-L../../lib/%(ARCH)s/<(CONFIGURATION_NAME)'], 'libraries': %(LIBS)s, 'include_dirs': %(INCLUDES)s, 'make_valid_configurations': ['host-debug', 'host-release'], 'msvs_settings': { 'VCLinkerTool': { 'AdditionalLibraryDirectories': ['../../lib/%(ARCH)s/<(CONFIGURATION_NAME)'], } }, %(CONFIGS)s }, """ NMF_TARGET = """\ { 'target_name': '%(NAME)s_%(TOOLCHAIN)s.nmf', 'product_name': '%(NAME)s.nmf', 'product_dir': '<(PRODUCT_DIR)/%(TOOLCHAIN)s', 'type': 'none', 'make_valid_configurations': ['%(TOOLCHAIN)s-debug', '%(TOOLCHAIN)s-release'], 'actions': [ { 'action_name': 'nmf', 'inputs': ['<(PRODUCT_DIR)/%(NAME)s_x86_32.nexe', '<(PRODUCT_DIR)/%(NAME)s_x86_64.nexe'] + %(SODEPS)s, 'outputs': ['<(PRODUCT_DIR)/%(NAME)s.nmf'], 'action': ['../../tools/create_nmf.py', '-t', '%(TOOLCHAIN)s', '-s', '<(PRODUCT_DIR)'] + %(NMFACTION)s, }, ] }, """ TOOLCHAIN_CONFIG = """\ '%(toolchain)s-release' : { 'cflags' : ['-O2'], }, '%(toolchain)s-debug' : { 'cflags' : ['-g', '-O0'], }, """ NEXE_CONFIG = """\ '%(toolchain)s-release' : { 'cflags' : ['--%(toolchain)s', '-O2', '-idirafter', '../../include'], 'ldflags' : ['--%(toolchain)s'], 'arflags' : ['--%(toolchain)s'], }, '%(toolchain)s-debug' : { 'cflags' : ['--%(toolchain)s', '-g', '-O0', '-idirafter', '../../include'], 'ldflags' : ['--%(toolchain)s'], 'arflags' : ['--%(toolchain)s'], }, """ WIN32_CONFIGS = """\ 'target_defaults': { 'default_configuration': 'Debug_PPAPI', 'configurations': { 'Debug_PPAPI': { 'msvs_configuration_platform': 'PPAPI', 'msbuild_configuration_attributes': { 'ConfigurationType': 'DynamicLibrary' }, 'include_dirs': ['../../include/win'], 'defines': ['_WINDOWS', '_DEBUG', 'WIN32'], }, 'Release_PPAPI': { 'msvs_configuration_platform': 'PPAPI', 'msbuild_configuration_attributes': { 'ConfigurationType': 'DynamicLibrary' }, 'include_dirs': ['../../include/win'], 'defines': ['_WINDOWS', 'NDEBUG', 'WIN32'], }, 'Debug_NaCl': { 'msvs_configuration_platform': 'NaCl', 'msbuild_configuration_attributes': { 'ConfigurationType': 'Application' }, }, 'Release_NaCl': { 'msvs_configuration_platform': 'NaCl', 'msbuild_configuration_attributes': { 'ConfigurationType': 'Application' }, }, }, }, """ def WriteNaClTargets(output, target, tools): configs = "'configurations' : {\n" for tc in tools: if tc not in valid_tools: continue if tc in ['newlib', 'glibc']: configs += NEXE_CONFIG % {'toolchain': tc} configs += " }" target['CONFIGS'] = configs if target['TYPE'] == 'lib': output.write(NLIB_TARGET % target) else: output.write(NEXE_TARGET % target) def ConfigName(toolchain): if toolchain == getos.GetPlatform(): return 'host' else: return toolchain def ProcessDSC(filename, outfile=None): if not os.path.exists(filename): Error("file not found: %s" % filename) desc = open(filename).read() desc = eval(desc, {}, {}) if not desc.get('TARGETS'): Error("no TARGETS found in dsc") if not outfile: outfile = desc['NAME'] + '.gyp' outfile = os.path.join(os.path.dirname(filename), outfile) output = StringIO.StringIO() srcdir = os.path.dirname(SCRIPT_DIR) output.write(PREAMBLE % srcdir.replace("\\", '/')) win32 = sys.platform in ('win32', 'cygwin') if win32: output.write(WIN32_CONFIGS) else: for tc in desc['TOOLS']: if tc in valid_tools: default = '%s-debug' % ConfigName(tc) break output.write("""\ 'target_defaults': { 'default_configuration': '%s', 'configurations' : {\n""" % default) for tc in desc['TOOLS']: if tc not in valid_tools: continue output.write(TOOLCHAIN_CONFIG % {'toolchain': ConfigName(tc)}) output.write(" }\n },\n") output.write("\n 'targets': [\n") # make a list of all the so target names so that the nmf rules # can depend on them all sofiles = [] soremap = [] for target in desc['TARGETS']: if target['TYPE'] == 'so': name = target['NAME'] sofiles.append('<(PRODUCT_DIR)/%s_x86_64.so' % name) sofiles.append('<(PRODUCT_DIR)/%s_x86_32.so' % name) soremap += ['-n', '%s_x86_64.so,%s.so' % (name, name)] soremap += ['-n', '%s_x86_32.so,%s.so' % (name, name)] # iterate through dsc targets generating gyp targets for target in desc['TARGETS']: target.setdefault('INCLUDES', []) target['INCLUDES'] = [x.replace("$(NACL_SDK_ROOT)", "../..") for x in target['INCLUDES']] libs = target.get('LIBS', []) if win32: libs = [l for l in libs if l not in ('ppapi', 'ppapi_cpp')] target['LIBS'] = ['-l' + l + '.lib' for l in libs] else: target['LIBS'] = ['-l' + l for l in libs] if target['TYPE'] == 'so': if win32: target['EXT'] = '' else: target['EXT'] = '.so' target['GYP_TYPE'] = 'shared_library' elif target['TYPE'] == 'lib': if win32: target['EXT'] = '' else: target['EXT'] = '.a' target['GYP_TYPE'] = 'static_library' elif target['TYPE'] == 'main': target['EXT'] = '.nexe' target['GYP_TYPE'] = 'executable' else: Error("unknown type: %s" % target['TYPE']) target['CFLAGS'] = target.get('CXXFLAGS', []) if not win32 and ('newlib' in desc['TOOLS'] or 'glibc' in desc['TOOLS']): WriteNaClTargets(output, target, desc['TOOLS']) if target['TYPE'] == 'main': target['SODEPS'] = sofiles target['NMFACTION'] = ['-o', '<@(_outputs)', '-L<(NMF_PATH1)', '-L<(NMF_PATH2)', '-D', '<(OBJDUMP)', '<@(_inputs)'] target['NMFACTION'] += soremap if 'newlib' in desc['TOOLS']: target['TOOLCHAIN'] = 'newlib' output.write(NMF_TARGET % target) if 'glibc' in desc['TOOLS']: target['TOOLCHAIN'] = 'glibc' output.write(NMF_TARGET % target) if win32 or getos.GetPlatform() in desc['TOOLS']: target['ARCH'] = 'x86_32' target['INCLUDES'].append('../../include') if win32: target['HOST'] = 'win' target['CONFIGS'] = '' target['CFLAGS'] = [] else: target['CONFIGS'] = '' target['HOST'] = 'linux' target['CFLAGS'].append('-fPIC') if target['TYPE'] == 'main': target['GYP_TYPE'] = 'shared_library' if win32: target['EXT'] = '' else: target['EXT'] = '.so' output.write(HOST_EXE_TARGET % target) else: output.write(HOST_LIB_TARGET % target) output.write(' ],\n}\n') print('Writing: ' + outfile) open(outfile, 'w').write(output.getvalue()) def main(args): parser = optparse.OptionParser() parser.add_option('-o', help='Set output filename.', dest='output') options, args = parser.parse_args(args) if not args: Error('No .dsc file specified.') if options.output: outdir = os.path.dirname(options.output) if not os.path.exists(outdir): os.makedirs(outdir) assert len(args) == 1 ProcessDSC(args[0], options.output) if __name__ == '__main__': main(sys.argv[1:])
bsd-3-clause
orbitfp7/nova
nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py
52
1913
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table from nova.i18n import _LI from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) def _get_deleted_expire_index(table): members = sorted(['deleted', 'expire']) for idx in table.indexes: if sorted(idx.columns.keys()) == members: return idx def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) if _get_deleted_expire_index(reservations): LOG.info(_LI('Skipped adding reservations_deleted_expire_idx ' 'because an equivalent index already exists.')) return # Based on expire_reservations query # from: nova/db/sqlalchemy/api.py index = Index('reservations_deleted_expire_idx', reservations.c.deleted, reservations.c.expire) index.create(migrate_engine) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) index = _get_deleted_expire_index(reservations) if index: index.drop(migrate_engine) else: LOG.info(_LI('Skipped removing reservations_deleted_expire_idx ' 'because index does not exist.'))
apache-2.0
hgl888/blink-crosswalk-efl
Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
628
31933
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This file provides classes and helper functions for parsing/building frames of the WebSocket protocol (RFC 6455). Specification: http://tools.ietf.org/html/rfc6455 """ from collections import deque import logging import os import struct import time from mod_pywebsocket import common from mod_pywebsocket import util from mod_pywebsocket._stream_base import BadOperationException from mod_pywebsocket._stream_base import ConnectionTerminatedException from mod_pywebsocket._stream_base import InvalidFrameException from mod_pywebsocket._stream_base import InvalidUTF8Exception from mod_pywebsocket._stream_base import StreamBase from mod_pywebsocket._stream_base import UnsupportedFrameException _NOOP_MASKER = util.NoopMasker() class Frame(object): def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0, opcode=None, payload=''): self.fin = fin self.rsv1 = rsv1 self.rsv2 = rsv2 self.rsv3 = rsv3 self.opcode = opcode self.payload = payload # Helper functions made public to be used for writing unittests for WebSocket # clients. def create_length_header(length, mask): """Creates a length header. Args: length: Frame length. Must be less than 2^63. mask: Mask bit. Must be boolean. Raises: ValueError: when bad data is given. """ if mask: mask_bit = 1 << 7 else: mask_bit = 0 if length < 0: raise ValueError('length must be non negative integer') elif length <= 125: return chr(mask_bit | length) elif length < (1 << 16): return chr(mask_bit | 126) + struct.pack('!H', length) elif length < (1 << 63): return chr(mask_bit | 127) + struct.pack('!Q', length) else: raise ValueError('Payload is too big for one frame') def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask): """Creates a frame header. Raises: Exception: when bad data is given. """ if opcode < 0 or 0xf < opcode: raise ValueError('Opcode out of range') if payload_length < 0 or (1 << 63) <= payload_length: raise ValueError('payload_length out of range') if (fin | rsv1 | rsv2 | rsv3) & ~1: raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1') header = '' first_byte = ((fin << 7) | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4) | opcode) header += chr(first_byte) header += create_length_header(payload_length, mask) return header def _build_frame(header, body, mask): if not mask: return header + body masking_nonce = os.urandom(4) masker = util.RepeatedXorMasker(masking_nonce) return header + masking_nonce + masker.mask(body) def _filter_and_format_frame_object(frame, mask, frame_filters): for frame_filter in frame_filters: frame_filter.filter(frame) header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_binary_frame( message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]): """Creates a simple binary frame with no extension, reserved bit.""" frame = Frame(fin=fin, opcode=opcode, payload=message) return _filter_and_format_frame_object(frame, mask, frame_filters) def create_text_frame( message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]): """Creates a simple text frame with no extension, reserved bit.""" encoded_message = message.encode('utf-8') return create_binary_frame(encoded_message, opcode, fin, mask, frame_filters) def parse_frame(receive_bytes, logger=None, ws_version=common.VERSION_HYBI_LATEST, unmask_receive=True): """Parses a frame. Returns a tuple containing each header field and payload. Args: receive_bytes: a function that reads frame data from a stream or something similar. The function takes length of the bytes to be read. The function must raise ConnectionTerminatedException if there is not enough data to be read. logger: a logging object. ws_version: the version of WebSocket protocol. unmask_receive: unmask received frames. When received unmasked frame, raises InvalidFrameException. Raises: ConnectionTerminatedException: when receive_bytes raises it. InvalidFrameException: when the frame contains invalid data. """ if not logger: logger = logging.getLogger() logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame') received = receive_bytes(2) first_byte = ord(received[0]) fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0xf second_byte = ord(received[1]) mask = (second_byte >> 7) & 1 payload_length = second_byte & 0x7f logger.log(common.LOGLEVEL_FINE, 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, ' 'Mask=%s, Payload_length=%s', fin, rsv1, rsv2, rsv3, opcode, mask, payload_length) if (mask == 1) != unmask_receive: raise InvalidFrameException( 'Mask bit on the received frame did\'nt match masking ' 'configuration for received frames') # The HyBi and later specs disallow putting a value in 0x0-0xFFFF # into the 8-octet extended payload length field (or 0x0-0xFD in # 2-octet field). valid_length_encoding = True length_encoding_bytes = 1 if payload_length == 127: logger.log(common.LOGLEVEL_FINE, 'Receive 8-octet extended payload length') extended_payload_length = receive_bytes(8) payload_length = struct.unpack( '!Q', extended_payload_length)[0] if payload_length > 0x7FFFFFFFFFFFFFFF: raise InvalidFrameException( 'Extended payload length >= 2^63') if ws_version >= 13 and payload_length < 0x10000: valid_length_encoding = False length_encoding_bytes = 8 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) elif payload_length == 126: logger.log(common.LOGLEVEL_FINE, 'Receive 2-octet extended payload length') extended_payload_length = receive_bytes(2) payload_length = struct.unpack( '!H', extended_payload_length)[0] if ws_version >= 13 and payload_length < 126: valid_length_encoding = False length_encoding_bytes = 2 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) if not valid_length_encoding: logger.warning( 'Payload length is not encoded using the minimal number of ' 'bytes (%d is encoded using %d bytes)', payload_length, length_encoding_bytes) if mask == 1: logger.log(common.LOGLEVEL_FINE, 'Receive mask') masking_nonce = receive_bytes(4) masker = util.RepeatedXorMasker(masking_nonce) logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce) else: masker = _NOOP_MASKER logger.log(common.LOGLEVEL_FINE, 'Receive payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): receive_start = time.time() raw_payload_bytes = receive_bytes(payload_length) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done receiving payload data at %s MB/s', payload_length / (time.time() - receive_start) / 1000 / 1000) logger.log(common.LOGLEVEL_FINE, 'Unmask payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): unmask_start = time.time() unmasked_bytes = masker.mask(raw_payload_bytes) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done unmasking payload data at %s MB/s', payload_length / (time.time() - unmask_start) / 1000 / 1000) return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 class FragmentedFrameBuilder(object): """A stateful class to send a message as fragments.""" def __init__(self, mask, frame_filters=[], encode_utf8=True): """Constructs an instance.""" self._mask = mask self._frame_filters = frame_filters # This is for skipping UTF-8 encoding when building text type frames # from compressed data. self._encode_utf8 = encode_utf8 self._started = False # Hold opcode of the first frame in messages to verify types of other # frames in the message are all the same. self._opcode = common.OPCODE_TEXT def build(self, payload_data, end, binary): if binary: frame_type = common.OPCODE_BINARY else: frame_type = common.OPCODE_TEXT if self._started: if self._opcode != frame_type: raise ValueError('Message types are different in frames for ' 'the same message') opcode = common.OPCODE_CONTINUATION else: opcode = frame_type self._opcode = frame_type if end: self._started = False fin = 1 else: self._started = True fin = 0 if binary or not self._encode_utf8: return create_binary_frame( payload_data, opcode, fin, self._mask, self._frame_filters) else: return create_text_frame( payload_data, opcode, fin, self._mask, self._frame_filters) def _create_control_frame(opcode, body, mask, frame_filters): frame = Frame(opcode=opcode, payload=body) for frame_filter in frame_filters: frame_filter.filter(frame) if len(frame.payload) > 125: raise BadOperationException( 'Payload data size of control frames must be 125 bytes or less') header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_ping_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters) def create_pong_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters) def create_close_frame(body, mask=False, frame_filters=[]): return _create_control_frame( common.OPCODE_CLOSE, body, mask, frame_filters) def create_closing_handshake_body(code, reason): body = '' if code is not None: if (code > common.STATUS_USER_PRIVATE_MAX or code < common.STATUS_NORMAL_CLOSURE): raise BadOperationException('Status code is out of range') if (code == common.STATUS_NO_STATUS_RECEIVED or code == common.STATUS_ABNORMAL_CLOSURE or code == common.STATUS_TLS_HANDSHAKE): raise BadOperationException('Status code is reserved pseudo ' 'code') encoded_reason = reason.encode('utf-8') body = struct.pack('!H', code) + encoded_reason return body class StreamOptions(object): """Holds option values to configure Stream objects.""" def __init__(self): """Constructs StreamOptions.""" # Filters applied to frames. self.outgoing_frame_filters = [] self.incoming_frame_filters = [] # Filters applied to messages. Control frames are not affected by them. self.outgoing_message_filters = [] self.incoming_message_filters = [] self.encode_text_message_to_utf8 = True self.mask_send = False self.unmask_receive = True class Stream(StreamBase): """A class for parsing/building frames of the WebSocket protocol (RFC 6455). """ def __init__(self, request, options): """Constructs an instance. Args: request: mod_python request. """ StreamBase.__init__(self, request) self._logger = util.get_class_logger(self) self._options = options self._request.client_terminated = False self._request.server_terminated = False # Holds body of received fragments. self._received_fragments = [] # Holds the opcode of the first fragment. self._original_opcode = None self._writer = FragmentedFrameBuilder( self._options.mask_send, self._options.outgoing_frame_filters, self._options.encode_text_message_to_utf8) self._ping_queue = deque() def _receive_frame(self): """Receives a frame and return data in the frame as a tuple containing each header field and payload separately. Raises: ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. """ def _receive_bytes(length): return self.receive_bytes(length) return parse_frame(receive_bytes=_receive_bytes, logger=self._logger, ws_version=self._request.ws_version, unmask_receive=self._options.unmask_receive) def _receive_frame_as_frame_object(self): opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame() return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3, opcode=opcode, payload=unmasked_bytes) def receive_filtered_frame(self): """Receives a frame and applies frame filters and message filters. The frame to be received must satisfy following conditions: - The frame is not fragmented. - The opcode of the frame is TEXT or BINARY. DO NOT USE this method except for testing purpose. """ frame = self._receive_frame_as_frame_object() if not frame.fin: raise InvalidFrameException( 'Segmented frames must not be received via ' 'receive_filtered_frame()') if (frame.opcode != common.OPCODE_TEXT and frame.opcode != common.OPCODE_BINARY): raise InvalidFrameException( 'Control frames must not be received via ' 'receive_filtered_frame()') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) for message_filter in self._options.incoming_message_filters: frame.payload = message_filter.filter(frame.payload) return frame def send_message(self, message, end=True, binary=False): """Send message. Args: message: text in unicode or binary in str to send. binary: send message as binary frame. Raises: BadOperationException: when called on a server-terminated connection or called with inconsistent message type or binary parameter. """ if self._request.server_terminated: raise BadOperationException( 'Requested send_message after sending out a closing handshake') if binary and isinstance(message, unicode): raise BadOperationException( 'Message for binary frame must be instance of str') for message_filter in self._options.outgoing_message_filters: message = message_filter.filter(message, end, binary) try: # Set this to any positive integer to limit maximum size of data in # payload data of each frame. MAX_PAYLOAD_DATA_SIZE = -1 if MAX_PAYLOAD_DATA_SIZE <= 0: self._write(self._writer.build(message, end, binary)) return bytes_written = 0 while True: end_for_this_frame = end bytes_to_write = len(message) - bytes_written if (MAX_PAYLOAD_DATA_SIZE > 0 and bytes_to_write > MAX_PAYLOAD_DATA_SIZE): end_for_this_frame = False bytes_to_write = MAX_PAYLOAD_DATA_SIZE frame = self._writer.build( message[bytes_written:bytes_written + bytes_to_write], end_for_this_frame, binary) self._write(frame) bytes_written += bytes_to_write # This if must be placed here (the end of while block) so that # at least one frame is sent. if len(message) <= bytes_written: break except ValueError, e: raise BadOperationException(e) def _get_message_from_frame(self, frame): """Gets a message from frame. If the message is composed of fragmented frames and the frame is not the last fragmented frame, this method returns None. The whole message will be returned when the last fragmented frame is passed to this method. Raises: InvalidFrameException: when the frame doesn't match defragmentation context, or the frame contains invalid data. """ if frame.opcode == common.OPCODE_CONTINUATION: if not self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received a termination frame but fragmentation ' 'not started') else: raise InvalidFrameException( 'Received an intermediate frame but ' 'fragmentation not started') if frame.fin: # End of fragmentation frame self._received_fragments.append(frame.payload) message = ''.join(self._received_fragments) self._received_fragments = [] return message else: # Intermediate frame self._received_fragments.append(frame.payload) return None else: if self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received an unfragmented frame without ' 'terminating existing fragmentation') else: raise InvalidFrameException( 'New fragmentation started without terminating ' 'existing fragmentation') if frame.fin: # Unfragmented frame self._original_opcode = frame.opcode return frame.payload else: # Start of fragmentation frame if common.is_control_opcode(frame.opcode): raise InvalidFrameException( 'Control frames must not be fragmented') self._original_opcode = frame.opcode self._received_fragments.append(frame.payload) return None def _process_close_message(self, message): """Processes close message. Args: message: close message. Raises: InvalidFrameException: when the message is invalid. """ self._request.client_terminated = True # Status code is optional. We can have status reason only if we # have status code. Status reason can be empty string. So, # allowed cases are # - no application data: no code no reason # - 2 octet of application data: has code but no reason # - 3 or more octet of application data: both code and reason if len(message) == 0: self._logger.debug('Received close frame (empty body)') self._request.ws_close_code = ( common.STATUS_NO_STATUS_RECEIVED) elif len(message) == 1: raise InvalidFrameException( 'If a close frame has status code, the length of ' 'status code must be 2 octet') elif len(message) >= 2: self._request.ws_close_code = struct.unpack( '!H', message[0:2])[0] self._request.ws_close_reason = message[2:].decode( 'utf-8', 'replace') self._logger.debug( 'Received close frame (code=%d, reason=%r)', self._request.ws_close_code, self._request.ws_close_reason) # As we've received a close frame, no more data is coming over the # socket. We can now safely close the socket without worrying about # RST sending. if self._request.server_terminated: self._logger.debug( 'Received ack for server-initiated closing handshake') return self._logger.debug( 'Received client-initiated closing handshake') code = common.STATUS_NORMAL_CLOSURE reason = '' if hasattr(self._request, '_dispatcher'): dispatcher = self._request._dispatcher code, reason = dispatcher.passive_closing_handshake( self._request) if code is None and reason is not None and len(reason) > 0: self._logger.warning( 'Handler specified reason despite code being None') reason = '' if reason is None: reason = '' self._send_closing_handshake(code, reason) self._logger.debug( 'Acknowledged closing handshake initiated by the peer ' '(code=%r, reason=%r)', code, reason) def _process_ping_message(self, message): """Processes ping message. Args: message: ping message. """ try: handler = self._request.on_ping_handler if handler: handler(self._request, message) return except AttributeError, e: pass self._send_pong(message) def _process_pong_message(self, message): """Processes pong message. Args: message: pong message. """ # TODO(tyoshino): Add ping timeout handling. inflight_pings = deque() while True: try: expected_body = self._ping_queue.popleft() if expected_body == message: # inflight_pings contains pings ignored by the # other peer. Just forget them. self._logger.debug( 'Ping %r is acked (%d pings were ignored)', expected_body, len(inflight_pings)) break else: inflight_pings.append(expected_body) except IndexError, e: # The received pong was unsolicited pong. Keep the # ping queue as is. self._ping_queue = inflight_pings self._logger.debug('Received a unsolicited pong') break try: handler = self._request.on_pong_handler if handler: handler(self._request, message) except AttributeError, e: pass def receive_message(self): """Receive a WebSocket frame and return its payload as a text in unicode or a binary in str. Returns: payload data of the frame - as unicode instance if received text frame - as str instance if received binary frame or None iff received closing handshake. Raises: BadOperationException: when called on a client-terminated connection. ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. UnsupportedFrameException: when the received frame has flags, opcode we cannot handle. You can ignore this exception and continue receiving the next frame. """ if self._request.client_terminated: raise BadOperationException( 'Requested receive_message after receiving a closing ' 'handshake') while True: # mp_conn.read will block if no bytes are available. # Timeout is controlled by TimeOut directive of Apache. frame = self._receive_frame_as_frame_object() # Check the constraint on the payload size for control frames # before extension processes the frame. # See also http://tools.ietf.org/html/rfc6455#section-5.5 if (common.is_control_opcode(frame.opcode) and len(frame.payload) > 125): raise InvalidFrameException( 'Payload data size of control frames must be 125 bytes or ' 'less') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) if frame.rsv1 or frame.rsv2 or frame.rsv3: raise UnsupportedFrameException( 'Unsupported flag is set (rsv = %d%d%d)' % (frame.rsv1, frame.rsv2, frame.rsv3)) message = self._get_message_from_frame(frame) if message is None: continue for message_filter in self._options.incoming_message_filters: message = message_filter.filter(message) if self._original_opcode == common.OPCODE_TEXT: # The WebSocket protocol section 4.4 specifies that invalid # characters must be replaced with U+fffd REPLACEMENT # CHARACTER. try: return message.decode('utf-8') except UnicodeDecodeError, e: raise InvalidUTF8Exception(e) elif self._original_opcode == common.OPCODE_BINARY: return message elif self._original_opcode == common.OPCODE_CLOSE: self._process_close_message(message) return None elif self._original_opcode == common.OPCODE_PING: self._process_ping_message(message) elif self._original_opcode == common.OPCODE_PONG: self._process_pong_message(message) else: raise UnsupportedFrameException( 'Opcode %d is not supported' % self._original_opcode) def _send_closing_handshake(self, code, reason): body = create_closing_handshake_body(code, reason) frame = create_close_frame( body, mask=self._options.mask_send, frame_filters=self._options.outgoing_frame_filters) self._request.server_terminated = True self._write(frame) def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='', wait_response=True): """Closes a WebSocket connection. Args: code: Status code for close frame. If code is None, a close frame with empty body will be sent. reason: string representing close reason. wait_response: True when caller want to wait the response. Raises: BadOperationException: when reason is specified with code None or reason is not an instance of both str and unicode. """ if self._request.server_terminated: self._logger.debug( 'Requested close_connection but server is already terminated') return if code is None: if reason is not None and len(reason) > 0: raise BadOperationException( 'close reason must not be specified if code is None') reason = '' else: if not isinstance(reason, str) and not isinstance(reason, unicode): raise BadOperationException( 'close reason must be an instance of str or unicode') self._send_closing_handshake(code, reason) self._logger.debug( 'Initiated closing handshake (code=%r, reason=%r)', code, reason) if (code == common.STATUS_GOING_AWAY or code == common.STATUS_PROTOCOL_ERROR) or not wait_response: # It doesn't make sense to wait for a close frame if the reason is # protocol error or that the server is going away. For some of # other reasons, it might not make sense to wait for a close frame, # but it's not clear, yet. return # TODO(ukai): 2. wait until the /client terminated/ flag has been set, # or until a server-defined timeout expires. # # For now, we expect receiving closing handshake right after sending # out closing handshake. message = self.receive_message() if message is not None: raise ConnectionTerminatedException( 'Didn\'t receive valid ack for closing handshake') # TODO: 3. close the WebSocket connection. # note: mod_python Connection (mp_conn) doesn't have close method. def send_ping(self, body=''): frame = create_ping_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) self._ping_queue.append(body) def _send_pong(self, body): frame = create_pong_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) def get_last_received_opcode(self): """Returns the opcode of the WebSocket message which the last received frame belongs to. The return value is valid iff immediately after receive_message call. """ return self._original_opcode # vi:sts=4 sw=4 et
bsd-3-clause
ethantang95/DIGITS
digits/dataset/images/views.py
5
2135
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import import os.path # Find the best implementation available try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import flask import PIL.Image import digits from digits import utils blueprint = flask.Blueprint(__name__, __name__) @blueprint.route('/resize-example', methods=['POST']) def resize_example(): """ Resizes the example image, and returns it as a string of png data """ try: example_image_path = os.path.join(os.path.dirname(digits.__file__), 'static', 'images', 'mona_lisa.jpg') image = utils.image.load_image(example_image_path) width = int(flask.request.form['width']) height = int(flask.request.form['height']) channels = int(flask.request.form['channels']) resize_mode = flask.request.form['resize_mode'] backend = flask.request.form['backend'] encoding = flask.request.form['encoding'] image = utils.image.resize_image(image, height, width, channels=channels, resize_mode=resize_mode, ) if backend != 'lmdb' or encoding == 'none': length = len(image.tostring()) else: s = StringIO() if encoding == 'png': PIL.Image.fromarray(image).save(s, format='PNG') elif encoding == 'jpg': PIL.Image.fromarray(image).save(s, format='JPEG', quality=90) else: raise ValueError('unrecognized encoding "%s"' % encoding) s.seek(0) image = PIL.Image.open(s) length = len(s.getvalue()) data = utils.image.embed_image_html(image) return '<img src=\"' + data + '\" style=\"width:%spx;height=%spx\" />\n<br>\n<i>Image size: %s</i>' % ( width, height, utils.sizeof_fmt(length) ) except Exception as e: return '%s: %s' % (type(e).__name__, e)
bsd-3-clause
garrettr/onionshare
onionshare_gui/webapp.py
1
2966
from flask import Flask, render_template import threading, json, os, time, platform, sys onionshare = None onionshare_port = None filename = None onion_host = None qtapp = None clipboard = None stay_open = None url = None app = Flask(__name__, template_folder='./templates') def debug_mode(): import logging global app if platform.system() == 'Windows': temp_dir = os.environ['Temp'].replace('\\', '/') else: temp_dir = '/tmp/' log_handler = logging.FileHandler('{0}/onionshare.web.log'.format(temp_dir)) log_handler.setLevel(logging.WARNING) app.logger.addHandler(log_handler) @app.route("/") def index(): return render_template('index.html') @app.route("/init_info") def init_info(): global onionshare, filename, stay_open basename = os.path.basename(filename) return json.dumps({ 'strings': onionshare.strings, 'basename': basename, 'stay_open': stay_open }) @app.route("/start_onionshare") def start_onionshare(): global onionshare, onionshare_port, filename, onion_host, url url = 'http://{0}/{1}'.format(onion_host, onionshare.slug) filehash, filesize = onionshare.file_crunching(filename) onionshare.set_file_info(filename, filehash, filesize) # start onionshare service in new thread t = threading.Thread(target=onionshare.app.run, kwargs={'port': onionshare_port}) t.daemon = True t.start() return json.dumps({ 'filehash': filehash, 'filesize': filesize, 'url': url }) @app.route("/copy_url") def copy_url(): if platform.system() == 'Windows': # Qt's QClipboard isn't working in Windows # https://github.com/micahflee/onionshare/issues/46 import ctypes GMEM_DDESHARE = 0x2000 ctypes.windll.user32.OpenClipboard(None) ctypes.windll.user32.EmptyClipboard() hcd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(url))+1) pch_data = ctypes.windll.kernel32.GlobalLock(hcd) ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pch_data), bytes(url)) ctypes.windll.kernel32.GlobalUnlock(hcd) ctypes.windll.user32.SetClipboardData(1, hcd) ctypes.windll.user32.CloseClipboard() else: global clipboard clipboard.setText(url) return '' @app.route("/stay_open_true") def stay_open_true(): global onionshare onionshare.set_stay_open(True) @app.route("/stay_open_false") def stay_open_false(): global onionshare onionshare.set_stay_open(False) @app.route("/heartbeat") def check_for_requests(): global onionshare events = [] done = False while not done: try: r = onionshare.q.get(False) events.append(r) except onionshare.Queue.Empty: done = True return json.dumps(events) @app.route("/close") def close(): global qtapp time.sleep(1) qtapp.closeAllWindows() return ''
gpl-3.0
moelius/async-task-processor
examples/periodic_processor.py
1
2018
import time from async_task_processor import ATP from async_task_processor.processors import PeriodicProcessor from examples import logger # first test function def test_func_one(sleep_time, word): """ :type sleep_time: int :type word: str :return: """ logger.info('start working') time.sleep(sleep_time) logger.info('Job is done. Word is: %s' % word) # second test function def test_func_second(sleep_time, word): """ :type sleep_time: int :type word: str :return: """ logger.info('start working') time.sleep(sleep_time) logger.info('Job is done. Word is: %s' % word) # third function with exception def test_func_bad(self, sleep_time, word): """ :type self: async_task_processor.Task :type sleep_time: int :type word: str :return: """ logger.info('start working') try: a = 1 / 0 except ZeroDivisionError: # optionally you can overload max_retries and retry_countdown here self.retry() time.sleep(sleep_time) logger.info('Job is done. Word is: %s' % word) atp = ATP(asyncio_debug=True) task_processor = PeriodicProcessor(atp=atp) # Add function to task processor task_processor.add_task(test_func_one, args=[5, 'first hello world'], max_workers=5, timeout=1, max_retries=5, retry_countdown=1) # Add one more function to task processor task_processor.add_task(test_func_second, args=[3, 'second hello world'], max_workers=5, timeout=1, max_retries=5, retry_countdown=1) # Add one more bad function with exception. This function will raise exception and will retry it, # then when retries exceeded, workers of this func will stop one by one with exception MaxRetriesExceeded # bind option make Task as self argument task_processor.add_task(test_func_bad, args=[3, 'second hello world'], bind=True, max_workers=2, timeout=1, max_retries=3, retry_countdown=3) # Start async-task-processor atp.start()
mit
2014cdag4/2014cdag4_1
wsgi/static/Brython2.1.0-20140419-113919/Lib/linecache.py
785
3864
"""Cache lines from files. This is intended to read lines from modules imported -- hence if a filename is not found, it will look down the module search path for a file by that name. """ import sys import os import tokenize __all__ = ["getline", "clearcache", "checkcache"] def getline(filename, lineno, module_globals=None): lines = getlines(filename, module_globals) if 1 <= lineno <= len(lines): return lines[lineno-1] else: return '' # The cache cache = {} # The cache def clearcache(): """Clear the cache entirely.""" global cache cache = {} def getlines(filename, module_globals=None): """Get the lines for a file from the cache. Update the cache if it doesn't contain an entry for this file already.""" if filename in cache: return cache[filename][2] else: return updatecache(filename, module_globals) def checkcache(filename=None): """Discard cache entries that are out of date. (This is not checked upon each call!)""" if filename is None: filenames = list(cache.keys()) else: if filename in cache: filenames = [filename] else: return for filename in filenames: size, mtime, lines, fullname = cache[filename] if mtime is None: continue # no-op for files loaded via a __loader__ try: stat = os.stat(fullname) except os.error: del cache[filename] continue if size != stat.st_size or mtime != stat.st_mtime: del cache[filename] def updatecache(filename, module_globals=None): """Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.""" if filename in cache: del cache[filename] if not filename or (filename.startswith('<') and filename.endswith('>')): return [] fullname = filename try: stat = os.stat(fullname) except OSError: basename = filename # Try for a __loader__, if available if module_globals and '__loader__' in module_globals: name = module_globals.get('__name__') loader = module_globals['__loader__'] get_source = getattr(loader, 'get_source', None) if name and get_source: try: data = get_source(name) except (ImportError, IOError): pass else: if data is None: # No luck, the PEP302 loader cannot find the source # for this module. return [] cache[filename] = ( len(data), None, [line+'\n' for line in data.splitlines()], fullname ) return cache[filename][2] # Try looking through the module search path, which is only useful # when handling a relative filename. if os.path.isabs(filename): return [] for dirname in sys.path: try: fullname = os.path.join(dirname, basename) except (TypeError, AttributeError): # Not sufficiently string-like to do anything useful with. continue try: stat = os.stat(fullname) break except os.error: pass else: return [] try: with tokenize.open(fullname) as fp: lines = fp.readlines() except IOError: return [] if lines and not lines[-1].endswith('\n'): lines[-1] += '\n' size, mtime = stat.st_size, stat.st_mtime cache[filename] = size, mtime, lines, fullname return lines
gpl-2.0
alexbruy/QGIS
python/ext-libs/pygments/lexer.py
265
26921
# -*- coding: utf-8 -*- """ pygments.lexer ~~~~~~~~~~~~~~ Base lexer classes. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re, itertools from pygments.filter import apply_filters, Filter from pygments.filters import get_filter_by_name from pygments.token import Error, Text, Other, _TokenType from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ make_analysator __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this'] _encoding_map = [('\xef\xbb\xbf', 'utf-8'), ('\xff\xfe\0\0', 'utf-32'), ('\0\0\xfe\xff', 'utf-32be'), ('\xff\xfe', 'utf-16'), ('\xfe\xff', 'utf-16be')] _default_analyse = staticmethod(lambda x: 0.0) class LexerMeta(type): """ This metaclass automagically converts ``analyse_text`` methods into static methods which always return float values. """ def __new__(cls, name, bases, d): if 'analyse_text' in d: d['analyse_text'] = make_analysator(d['analyse_text']) return type.__new__(cls, name, bases, d) class Lexer(object): """ Lexer for a specific language. Basic options recognized: ``stripnl`` Strip leading and trailing newlines from the input (default: True). ``stripall`` Strip all leading and trailing whitespace from the input (default: False). ``ensurenl`` Make sure that the input ends with a newline (default: True). This is required for some lexers that consume input linewise. *New in Pygments 1.3.* ``tabsize`` If given and greater than 0, expand tabs in the input (default: 0). ``encoding`` If given, must be an encoding name. This encoding will be used to convert the input string to Unicode, if it is not already a Unicode string (default: ``'latin1'``). Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or ``'chardet'`` to use the chardet library, if it is installed. """ #: Name of the lexer name = None #: Shortcuts for the lexer aliases = [] #: File name globs filenames = [] #: Secondary file name globs alias_filenames = [] #: MIME types mimetypes = [] #: Priority, should multiple lexers match and no content is provided priority = 0 __metaclass__ = LexerMeta def __init__(self, **options): self.options = options self.stripnl = get_bool_opt(options, 'stripnl', True) self.stripall = get_bool_opt(options, 'stripall', False) self.ensurenl = get_bool_opt(options, 'ensurenl', True) self.tabsize = get_int_opt(options, 'tabsize', 0) self.encoding = options.get('encoding', 'latin1') # self.encoding = options.get('inencoding', None) or self.encoding self.filters = [] for filter_ in get_list_opt(options, 'filters', ()): self.add_filter(filter_) def __repr__(self): if self.options: return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, self.options) else: return '<pygments.lexers.%s>' % self.__class__.__name__ def add_filter(self, filter_, **options): """ Add a new stream filter to this lexer. """ if not isinstance(filter_, Filter): filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_) def analyse_text(text): """ Has to return a float between ``0`` and ``1`` that indicates if a lexer wants to highlight this text. Used by ``guess_lexer``. If this method returns ``0`` it won't highlight it in any case, if it returns ``1`` highlighting with this lexer is guaranteed. The `LexerMeta` metaclass automatically wraps this function so that it works like a static method (no ``self`` or ``cls`` parameter) and the return value is automatically converted to `float`. If the return value is an object that is boolean `False` it's the same as if the return values was ``0.0``. """ def get_tokens(self, text, unfiltered=False): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. """ if not isinstance(text, unicode): if self.encoding == 'guess': try: text = text.decode('utf-8') if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] except UnicodeDecodeError: text = text.decode('latin1') elif self.encoding == 'chardet': try: import chardet except ImportError: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') # check for BOM first decoded = None for bom, encoding in _encoding_map: if text.startswith(bom): decoded = unicode(text[len(bom):], encoding, errors='replace') break # no BOM found, so use chardet if decoded is None: enc = chardet.detect(text[:1024]) # Guess using first 1KB decoded = unicode(text, enc.get('encoding') or 'utf-8', errors='replace') text = decoded else: text = text.decode(self.encoding) else: if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] # text now *is* a unicode string text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if self.ensurenl and not text.endswith('\n'): text += '\n' def streamer(): for i, t, v in self.get_tokens_unprocessed(text): yield t, v stream = streamer() if not unfiltered: stream = apply_filters(stream, self.filters, self) return stream def get_tokens_unprocessed(self, text): """ Return an iterable of (tokentype, value) pairs. In subclasses, implement this method as a generator to maximize effectiveness. """ raise NotImplementedError class DelegatingLexer(Lexer): """ This lexer takes two lexer as arguments. A root lexer and a language lexer. First everything is scanned using the language lexer, afterwards all ``Other`` tokens are lexed using the root lexer. The lexers from the ``template`` lexer package use this base lexer. """ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): self.root_lexer = _root_lexer(**options) self.language_lexer = _language_lexer(**options) self.needle = _needle Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): buffered = '' insertions = [] lng_buffer = [] for i, t, v in self.language_lexer.get_tokens_unprocessed(text): if t is self.needle: if lng_buffer: insertions.append((len(buffered), lng_buffer)) lng_buffer = [] buffered += v else: lng_buffer.append((i, t, v)) if lng_buffer: insertions.append((len(buffered), lng_buffer)) return do_insertions(insertions, self.root_lexer.get_tokens_unprocessed(buffered)) #------------------------------------------------------------------------------- # RegexLexer and ExtendedRegexLexer # class include(str): """ Indicates that a state should include rules from another state. """ pass class _inherit(object): """ Indicates the a state should inherit from its superclass. """ def __repr__(self): return 'inherit' inherit = _inherit() class combined(tuple): """ Indicates a state combined from multiple states. """ def __new__(cls, *args): return tuple.__new__(cls, args) def __init__(self, *args): # tuple.__init__ doesn't do anything pass class _PseudoMatch(object): """ A pseudo match object constructed from a string. """ def __init__(self, start, text): self._text = text self._start = start def start(self, arg=None): return self._start def end(self, arg=None): return self._start + len(self._text) def group(self, arg=None): if arg: raise IndexError('No such group') return self._text def groups(self): return (self._text,) def groupdict(self): return {} def bygroups(*args): """ Callback that yields multiple actions for each group in the match. """ def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: data = match.group(i + 1) if data is not None: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), data), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback class _This(object): """ Special singleton used for indicating the caller class. Used by ``using``. """ this = _This() def using(_other, **kwargs): """ Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. """ gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() return callback class RegexLexerMeta(LexerMeta): """ Metaclass for RegexLexer, creates the self._tokens attribute from self.tokens on the first instantiation. """ def _process_regex(cls, regex, rflags): """Preprocess the regular expression component of a token definition.""" return re.compile(regex, rflags).match def _process_token(cls, token): """Preprocess the token component of a token definition.""" assert type(token) is _TokenType or callable(token), \ 'token type must be simple type or callable, not %r' % (token,) return token def _process_new_state(cls, new_state, unprocessed, processed): """Preprocess the state transition action of a token definition.""" if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state def _process_state(cls, unprocessed, processed, state): """Preprocess a single state definition.""" assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] tokens = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) continue if isinstance(tdef, _inherit): # processed already continue assert type(tdef) is tuple, "wrong rule def %r" % tdef try: rex = cls._process_regex(tdef[0], rflags) except Exception, err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err)) token = cls._process_token(tdef[1]) if len(tdef) == 2: new_state = None else: new_state = cls._process_new_state(tdef[2], unprocessed, processed) tokens.append((rex, token, new_state)) return tokens def process_tokendef(cls, name, tokendefs=None): """Preprocess a dictionary of token definitions.""" processed = cls._all_tokens[name] = {} tokendefs = tokendefs or cls.tokens[name] for state in tokendefs.keys(): cls._process_state(tokendefs, processed, state) return processed def get_tokendefs(cls): """ Merge tokens from superclasses in MRO order, returning a single tokendef dictionary. Any state that is not defined by a subclass will be inherited automatically. States that *are* defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state. """ tokens = {} inheritable = {} for c in itertools.chain((cls,), cls.__mro__): toks = c.__dict__.get('tokens', {}) for state, items in toks.iteritems(): curitems = tokens.get(state) if curitems is None: tokens[state] = items try: inherit_ndx = items.index(inherit) except ValueError: continue inheritable[state] = inherit_ndx continue inherit_ndx = inheritable.pop(state, None) if inherit_ndx is None: continue # Replace the "inherit" value with the items curitems[inherit_ndx:inherit_ndx+1] = items try: new_inh_ndx = items.index(inherit) except ValueError: pass else: inheritable[state] = inherit_ndx + new_inh_ndx return tokens def __call__(cls, *args, **kwds): """Instantiate cls after preprocessing its token definitions.""" if '_tokens' not in cls.__dict__: cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) return type.__call__(cls, *args, **kwds) class RegexLexer(Lexer): """ Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """ __metaclass__ = RegexLexerMeta #: Flags for compiling the regular expressions. #: Defaults to MULTILINE. flags = re.MULTILINE #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: The initial state is 'root'. #: ``new_state`` can be omitted to signify no state transition. #: If it is a string, the state is pushed on the stack and changed. #: If it is a tuple of strings, all states are pushed on the stack and #: the current state will be the topmost. #: It can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one. tokens = {} def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: if type(action) is _TokenType: yield pos, action, m.group() else: for item in action(self, m): yield item pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[statestack[-1]] break else: try: if text[pos] == '\n': # at EOL, reset state to "root" statestack = ['root'] statetokens = tokendefs['root'] yield pos, Text, u'\n' pos += 1 continue yield pos, Error, text[pos] pos += 1 except IndexError: break class LexerContext(object): """ A helper object that holds lexer position data. """ def __init__(self, text, pos, stack=None, end=None): self.text = text self.pos = pos self.end = end or len(text) # end=0 not supported ;-) self.stack = stack or ['root'] def __repr__(self): return 'LexerContext(%r, %r, %r)' % ( self.text, self.pos, self.stack) class ExtendedRegexLexer(RegexLexer): """ A RegexLexer that uses a context object to store its state. """ def get_tokens_unprocessed(self, text=None, context=None): """ Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': ctx.stack.pop() elif state == '#push': ctx.stack.append(statestack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, u'\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = insertions.next() except StopIteration: # no insertions for item in tokens: yield item return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = insertions.next() except StopIteration: insleft = False break # not strictly necessary yield realpos, t, v[oldi:] realpos += len(v) - oldi # leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = insertions.next() except StopIteration: insleft = False break # not strictly necessary
gpl-2.0
4rado/RepositoryForProject
Lib/_threading_local.py
241
7456
"""Thread-local objects. (Note that this module provides a Python version of the threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the `local` class from `threading`.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = mydata.__dict__.items() ... items.sort() ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ __all__ = ["local"] # We need to use objects from the threading module, but the threading # module may also want to use our `local` class, if support for locals # isn't compiled in to the `thread` module. This creates potential problems # with circular imports. For that reason, we don't import `threading` # until the bottom of this file (a hack sufficient to worm around the # potential problems). Note that almost all platforms do have support for # locals in the `thread` module, and there is no circular import problem # then, so problems introduced by fiddling the order of imports here won't # manifest on most boxes. class _localbase(object): __slots__ = '_local__key', '_local__args', '_local__lock' def __new__(cls, *args, **kw): self = object.__new__(cls) key = '_local__key', 'thread.local.' + str(id(self)) object.__setattr__(self, '_local__key', key) object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__lock', RLock()) if (args or kw) and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. dict = object.__getattribute__(self, '__dict__') current_thread().__dict__[key] = dict return self def _patch(self): key = object.__getattribute__(self, '_local__key') d = current_thread().__dict__.get(key) if d is None: d = {} current_thread().__dict__[key] = d object.__setattr__(self, '__dict__', d) # we have a new instance dict, so call out __init__ if we have # one cls = type(self) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(self, '_local__args') cls.__init__(self, *args, **kw) else: object.__setattr__(self, '__dict__', d) class local(_localbase): def __getattribute__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__getattribute__(self, name) finally: lock.release() def __setattr__(self, name, value): if name == '__dict__': raise AttributeError( "%r object attribute '__dict__' is read-only" % self.__class__.__name__) lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__setattr__(self, name, value) finally: lock.release() def __delattr__(self, name): if name == '__dict__': raise AttributeError( "%r object attribute '__dict__' is read-only" % self.__class__.__name__) lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__delattr__(self, name) finally: lock.release() def __del__(self): import threading key = object.__getattribute__(self, '_local__key') try: # We use the non-locking API since we might already hold the lock # (__del__ can be called at any point by the cyclic GC). threads = threading._enumerate() except: # If enumerating the current threads fails, as it seems to do # during shutdown, we'll skip cleanup under the assumption # that there is nothing to clean up. return for thread in threads: try: __dict__ = thread.__dict__ except AttributeError: # Thread is dying, rest in peace. continue if key in __dict__: try: del __dict__[key] except KeyError: pass # didn't have anything in this thread from threading import current_thread, RLock
gpl-3.0
fedora-infra/anitya
anitya/tests/lib/test_plugins.py
1
4047
# -*- coding: utf-8 -*- # # Copyright © 2014 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2, or (at your option) any later # version. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. You # should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Any Red Hat trademarks that are incorporated in the source # code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission # of Red Hat, Inc. # """ anitya tests of the plugins. """ import unittest from anitya.lib import plugins from anitya.lib.versions import Version from anitya.tests.base import DatabaseTestCase EXPECTED_BACKENDS = [ "BitBucket", "CPAN (perl)", "CRAN (R)", "crates.io", "Debian project", "Drupal6", "Drupal7", "Freshmeat", "GNOME", "GNU project", "GitHub", "GitLab", "Hackage", "Launchpad", "Maven Central", "PEAR", "PECL", "Packagist", "PyPI", "Rubygems", "Sourceforge", "Stackage", "custom", "folder", "npmjs", "pagure", ] EXPECTED_ECOSYSTEMS = { "rubygems": "Rubygems", "pypi": "PyPI", "npm": "npmjs", "maven": "Maven Central", "crates.io": "crates.io", } EXPECTED_VERSIONS = ["RPM", "Calendar", "Semantic"] class VersionPluginsTests(unittest.TestCase): """Tests for the version scheme plugins.""" def test_version_plugin_names(self): plugin_names = plugins.VERSION_PLUGINS.get_plugin_names() self.assertEqual(sorted(EXPECTED_VERSIONS), sorted(plugin_names)) def test_version_plugin_classes(self): version_plugins = plugins.VERSION_PLUGINS.get_plugins() for plugin in version_plugins: self.assertTrue(issubclass(plugin, Version)) class Pluginstests(DatabaseTestCase): """Plugins tests.""" def test_load_all_plugins(self): """Test the plugins.load_all_plugins function.""" all_plugins = plugins.load_all_plugins(self.session) backend_plugins = all_plugins["backends"] self.assertEqual(len(backend_plugins), len(EXPECTED_BACKENDS)) backend_names = sorted(plugin.name for plugin in backend_plugins) self.assertEqual(sorted(backend_names), sorted(EXPECTED_BACKENDS)) ecosystem_plugins = all_plugins["ecosystems"] ecosystems = dict( (plugin.name, plugin.default_backend) for plugin in ecosystem_plugins ) self.assertEqual(ecosystems, EXPECTED_ECOSYSTEMS) def test_load_plugins(self): """Test the plugins.load_plugins function.""" backend_plugins = plugins.load_plugins(self.session) self.assertEqual(len(backend_plugins), len(EXPECTED_BACKENDS)) backend_names = sorted(plugin.name for plugin in backend_plugins) self.assertEqual(sorted(backend_names), sorted(EXPECTED_BACKENDS)) def test_plugins_get_plugin_names(self): """Test the plugins.get_plugin_names function.""" plugin_names = plugins.get_plugin_names() self.assertEqual(len(plugin_names), len(EXPECTED_BACKENDS)) self.assertEqual(sorted(plugin_names), sorted(EXPECTED_BACKENDS)) def test_plugins_get_plugin(self): """Test the plugins.get_plugin function.""" plugin = plugins.get_plugin("PyPI") self.assertEqual(str(plugin), "<class 'anitya.lib.backends.pypi.PypiBackend'>") if __name__ == "__main__": SUITE = unittest.TestLoader().loadTestsFromTestCase(Pluginstests) unittest.TextTestRunner(verbosity=2).run(SUITE)
gpl-2.0
gganis/root
interpreter/llvm/src/utils/wciia.py
99
2944
#!/usr/bin/env python """ wciia - Whose Code Is It Anyway Determines code owner of the file/folder relative to the llvm source root. Code owner is determined from the content of the CODE_OWNERS.TXT by parsing the D: field usage: utils/wciia.py path limitations: - must be run from llvm source root - very simplistic algorithm - only handles * as a wildcard - not very user friendly - does not handle the proposed F: field """ import os code_owners = {} def process_files_and_folders(owner): filesfolders = owner['filesfolders'] # paths must be in ( ... ) so strip them lpar = filesfolders.find('(') rpar = filesfolders.rfind(')') if rpar <= lpar: # give up return paths = filesfolders[lpar+1:rpar] # split paths owner['paths'] = [] for path in paths.split(): owner['paths'].append(path) def process_code_owner(owner): if 'filesfolders' in owner: filesfolders = owner['filesfolders'] else: # print "F: field missing, using D: field" owner['filesfolders'] = owner['description'] process_files_and_folders(owner) code_owners[owner['name']] = owner # process CODE_OWNERS.TXT first code_owners_file = open("CODE_OWNERS.TXT", "r").readlines() code_owner = {} for line in code_owners_file: for word in line.split(): if word == "N:": name = line[2:].strip() if code_owner: process_code_owner(code_owner) code_owner = {} # reset the values code_owner['name'] = name if word == "E:": email = line[2:].strip() code_owner['email'] = email if word == "D:": description = line[2:].strip() code_owner['description'] = description if word == "F:": filesfolders = line[2:].strip() code_owner['filesfolders'].append(filesfolders) def find_owners(fpath): onames = [] lmatch = -1 # very simplistic way of findning the best match for name in code_owners: owner = code_owners[name] if 'paths' in owner: for path in owner['paths']: # print "searching (" + path + ")" # try exact match if fpath == path: return name # see if path ends with a * rstar = path.rfind('*') if rstar>0: # try the longest match, rpos = -1 if len(fpath) < len(path): rpos = path.find(fpath) if rpos == 0: onames.append(name) onames.append('Chris Lattner') return onames # now lest try to find the owner of the file or folder import sys if len(sys.argv) < 2: print "usage " + sys.argv[0] + " file_or_folder" exit(-1) # the path we are checking path = str(sys.argv[1]) # check if this is real path if not os.path.exists(path): print "path (" + path + ") does not exist" exit(-1) owners_name = find_owners(path) # be grammatically correct print "The owner(s) of the (" + path + ") is(are) : " + str(owners_name) exit(0) # bottom up walk of the current . # not yet used root = "." for dir,subdirList,fileList in os.walk( root , topdown=False ) : print "dir :" , dir for fname in fileList : print "-" , fname print
lgpl-2.1
haad/ansible
lib/ansible/modules/commands/telnet.py
24
2332
# this is a virtual module that is entirely implemented server side # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: telnet short_description: Executes a low-down and dirty telnet command version_added: 2.4 description: - Executes a low-down and dirty telnet command, not going through the module subsystem. - This is mostly to be used for enabling ssh on devices that only have telnet enabled by default. options: command: description: - List of commands to be executed in the telnet session. required: True aliases: ['commands'] host: description: - The host/target on which to execute the command required: False default: remote_addr user: description: - The user for login required: False default: remote_user password: description: - The password for login port: description: - Remote port to use default: 23 timeout: description: - timeout for remote operations default: 120 prompts: description: - List of prompts expected before sending next command required: False default: ['$'] pause: description: - Seconds to pause between each command issued required: False default: 1 notes: - The C(environment) keyword does not work with this task author: - Ansible Core Team ''' EXAMPLES = ''' - name: send configuration commands to IOS telnet: user: cisco password: cisco login_prompt: "Username: " prompts: - "[>|#]" command: - terminal length 0 - configure terminal - hostname ios01 - name: run show commands telnet: user: cisco password: cisco login_prompt: "Username: " prompts: - "[>|#]" command: - terminal length 0 - show version ''' RETURN = ''' output: description: output of each command is an element in this list type: list returned: always sample: [ 'success', 'success', '', 'warning .. something' ] '''
gpl-3.0
knittledan/imageResizer
PIL/windows/PIL/XVThumbImagePlugin.py
52
1845
# # The Python Imaging Library. # $Id$ # # XV Thumbnail file handler by Charles E. "Gene" Cash # (gcash@magicnet.net) # # see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV, # available from ftp://ftp.cis.upenn.edu/pub/xv/ # # history: # 98-08-15 cec created (b/w only) # 98-12-09 cec added color palette # 98-12-28 fl added to PIL (with only a few very minor modifications) # # To do: # FIXME: make save work (this requires quantization support) # __version__ = "0.1" from PIL import Image, ImageFile, ImagePalette, _binary o8 = _binary.o8 # standard color palette for thumbnails (RGB332) PALETTE = b"" for r in range(8): for g in range(8): for b in range(4): PALETTE = PALETTE + (o8((r*255)//7)+o8((g*255)//7)+o8((b*255)//3)) ## # Image plugin for XV thumbnail images. class XVThumbImageFile(ImageFile.ImageFile): format = "XVThumb" format_description = "XV thumbnail image" def _open(self): # check magic s = self.fp.read(6) if s != b"P7 332": raise SyntaxError("not an XV thumbnail file") # Skip to beginning of next line self.fp.readline() # skip info comments while True: s = self.fp.readline() if not s: raise SyntaxError("Unexpected EOF reading XV thumbnail file") if s[0] != b'#': break # parse header line (already read) s = s.strip().split() self.mode = "P" self.size = int(s[0:1]), int(s[1:2]) self.palette = ImagePalette.raw("RGB", PALETTE) self.tile = [ ("raw", (0, 0)+self.size, self.fp.tell(), (self.mode, 0, 1) )] # -------------------------------------------------------------------- Image.register_open("XVThumb", XVThumbImageFile)
mit
shsingh/ansible
lib/ansible/modules/windows/win_owner.py
38
1169
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: win_owner version_added: "2.1" short_description: Set owner description: - Set owner of files or directories. options: path: description: - Path to be used for changing owner. type: path required: yes user: description: - Name to be used for changing owner. type: str required: yes recurse: description: - Indicates if the owner should be changed recursively. type: bool default: no seealso: - module: win_acl - module: win_file - module: win_stat author: - Hans-Joachim Kliemeck (@h0nIg) ''' EXAMPLES = r''' - name: Change owner of path win_owner: path: C:\apache user: apache recurse: yes - name: Set the owner of root directory win_owner: path: C:\apache user: SYSTEM recurse: no ''' RETURN = r''' '''
gpl-3.0
Joslyn-Maddie/BunnyCore
dep/libmpq/bindings/python/mpq.py
107
10430
"""wrapper for libmpq""" # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import ctypes import ctypes.util import os libmpq = ctypes.CDLL(ctypes.util.find_library("mpq")) class Error(Exception): pass errors = { -1: (IOError, "open"), -2: (IOError, "close"), -3: (IOError, "seek"), -4: (IOError, "read"), -5: (IOError, "write"), -6: (MemoryError,), -7: (Error, "file is not an mpq or is corrupted"), -8: (AssertionError, "not initialized"), -9: (AssertionError, "buffer size too small"), -10: (IndexError, "file not in archive"), -11: (AssertionError, "decrypt"), -12: (AssertionError, "unpack"), } def check_error(result, func, arguments, errors=errors): try: error = errors[result] except KeyError: return result else: raise error[0](*error[1:]) libmpq.libmpq__version.restype = ctypes.c_char_p libmpq.libmpq__archive_open.errcheck = check_error libmpq.libmpq__archive_close.errcheck = check_error libmpq.libmpq__archive_size_packed.errcheck = check_error libmpq.libmpq__archive_size_unpacked.errcheck = check_error libmpq.libmpq__archive_offset.errcheck = check_error libmpq.libmpq__archive_version.errcheck = check_error libmpq.libmpq__archive_files.errcheck = check_error libmpq.libmpq__file_size_packed.errcheck = check_error libmpq.libmpq__file_size_unpacked.errcheck = check_error libmpq.libmpq__file_offset.errcheck = check_error libmpq.libmpq__file_blocks.errcheck = check_error libmpq.libmpq__file_encrypted.errcheck = check_error libmpq.libmpq__file_compressed.errcheck = check_error libmpq.libmpq__file_imploded.errcheck = check_error libmpq.libmpq__file_number.errcheck = check_error libmpq.libmpq__file_read.errcheck = check_error libmpq.libmpq__block_open_offset.errcheck = check_error libmpq.libmpq__block_close_offset.errcheck = check_error libmpq.libmpq__block_size_unpacked.errcheck = check_error libmpq.libmpq__block_read.errcheck = check_error __version__ = libmpq.libmpq__version() class Reader(object): def __init__(self, file, libmpq=libmpq): self._file = file self._pos = 0 self._buf = [] self._cur_block = 0 libmpq.libmpq__block_open_offset(self._file._archive._mpq, self._file.number) def __iter__(self): return self def __repr__(self): return "iter(%r)" % self._file def seek(self, offset, whence=os.SEEK_SET, os=os): if whence == os.SEEK_SET: pass elif whence == os.SEEK_CUR: offset += self._pos elif whence == os.SEEK_END: offset += self._file.unpacked_size else: raise ValueError, "invalid whence" if offset >= self._pos: self.read(offset - self._pos) else: self._pos = 0 self._buf = [] self._cur_block = 0 self.read(offset) def tell(self): return self._pos def _read_block(self, ctypes=ctypes, libmpq=libmpq): block_size = ctypes.c_uint64() libmpq.libmpq__block_size_unpacked(self._file._archive._mpq, self._file.number, self._cur_block, ctypes.byref(block_size)) block_data = ctypes.create_string_buffer(block_size.value) libmpq.libmpq__block_read(self._file._archive._mpq, self._file.number, self._cur_block, block_data, ctypes.c_uint64(len(block_data)), None) self._buf.append(block_data.raw) self._cur_block += 1 def read(self, size=-1): while size < 0 or sum(map(len, self._buf)) < size: if self._cur_block == self._file.blocks: break self._read_block() buf = "".join(self._buf) if size < 0: ret = buf self._buf = [] else: ret = buf[:size] self._buf = [buf[size:]] self._pos += len(ret) return ret def readline(self, os=os): line = [] while True: char = self.read(1) if char == "": break if char not in '\r\n' and line and line[-1] in '\r\n': self.seek(-1, os.SEEK_CUR) break line.append(char) return ''.join(line) def next(self): line = self.readline() if not line: raise StopIteration return line def readlines(self, sizehint=-1): res = [] while sizehint < 0 or sum(map(len, res)) < sizehint: line = self.readline() if not line: break res.append(line) return res xreadlines = __iter__ def __del__(self, libmpq=libmpq): libmpq.libmpq__block_close_offset(self._file._archive._mpq, self._file.number) class File(object): def __init__(self, archive, number, ctypes=ctypes, libmpq=libmpq): self._archive = archive self.number = number for name, atype in [ ("packed_size", ctypes.c_uint64), ("unpacked_size", ctypes.c_uint64), ("offset", ctypes.c_uint64), ("blocks", ctypes.c_uint32), ("encrypted", ctypes.c_uint32), ("compressed", ctypes.c_uint32), ("imploded", ctypes.c_uint32), ]: data = atype() func = getattr(libmpq, "libmpq__file_"+name) func(self._archive._mpq, self.number, ctypes.byref(data)) setattr(self, name, data.value) def __str__(self, ctypes=ctypes, libmpq=libmpq): data = ctypes.create_string_buffer(self.unpacked_size) libmpq.libmpq__file_read(self._archive._mpq, self.number, data, ctypes.c_uint64(len(data)), None) return data.raw def __repr__(self): return "%r[%i]" % (self._archive, self.number) def __iter__(self, Reader=Reader): return Reader(self) class Archive(object): def __init__(self, source, ctypes=ctypes, File=File, libmpq=libmpq): self._source = source if isinstance(source, File): assert not source.encrypted assert not source.compressed assert not source.imploded self.filename = source._archive.filename offset = source._archive.offset + source.offset else: self.filename = source offset = -1 self._mpq = ctypes.c_void_p() libmpq.libmpq__archive_open(ctypes.byref(self._mpq), self.filename, ctypes.c_uint64(offset)) self._opened = True for field_name, field_type in [ ("packed_size", ctypes.c_uint64), ("unpacked_size", ctypes.c_uint64), ("offset", ctypes.c_uint64), ("version", ctypes.c_uint32), ("files", ctypes.c_uint32), ]: func = getattr(libmpq, "libmpq__archive_" + field_name) data = field_type() func(self._mpq, ctypes.byref(data)) setattr(self, field_name, data.value) def __del__(self, libmpq=libmpq): if getattr(self, "_opened", False): libmpq.libmpq__archive_close(self._mpq) def __len__(self): return self.files def __contains__(self, item, ctypes=ctypes, libmpq=libmpq): if isinstance(item, str): data = ctypes.c_uint32() try: libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item), ctypes.byref(data)) except IndexError: return False return True return 0 <= item < self.files def __getitem__(self, item, ctypes=ctypes, File=File, libmpq=libmpq): if isinstance(item, str): data = ctypes.c_int() libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item), ctypes.byref(data)) item = data.value else: if not 0 <= item < self.files: raise IndexError, "file not in archive" return File(self, item) def __repr__(self): return "mpq.Archive(%r)" % self._source # Remove clutter - everything except Error and Archive. del os, check_error, ctypes, errors, File, libmpq, Reader if __name__ == "__main__": import sys, random archive = Archive(sys.argv[1]) print repr(archive) for k, v in archive.__dict__.iteritems(): #if k[0] == '_': continue print " " * (4 - 1), k, v assert '(listfile)' in archive assert 0 in archive assert len(archive) == archive.files files = [x.strip() for x in archive['(listfile)']] files.extend(xrange(archive.files)) for key in files: #sys.argv[2:] if sys.argv[2:] else xrange(archive.files): file = archive[key] print print " " * (4 - 1), repr(file) for k, v in file.__dict__.iteritems(): #if k[0] == '_': continue print " " * (8 - 1), k, v a = str(file) b = iter(file).read() reader = iter(file) c = [] while True: l = random.randrange(1, 10) d = reader.read(l) if not d: break assert len(d) <= l c.append(d) c = "".join(c) d = [] reader.seek(0) for line in reader: d.append(line) d = "".join(d) assert a == b == c == d, map(hash, [a,b,c,d]) assert len(a) == file.unpacked_size repr(iter(file)) reader.seek(0) a = reader.readlines() reader.seek(0) b = list(reader) assert a == b
gpl-2.0
sourcelair/mongoengine
tests/document/dynamic.py
15
12110
import unittest import sys sys.path[0:0] = [""] from mongoengine import * from mongoengine.connection import get_db __all__ = ("DynamicTest", ) class DynamicTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') self.db = get_db() class Person(DynamicDocument): name = StringField() meta = {'allow_inheritance': True} Person.drop_collection() self.Person = Person def test_simple_dynamic_document(self): """Ensures simple dynamic documents are saved correctly""" p = self.Person() p.name = "James" p.age = 34 self.assertEqual(p.to_mongo(), {"_cls": "Person", "name": "James", "age": 34}) self.assertEqual(p.to_mongo().keys(), ["_cls", "name", "age"]) p.save() self.assertEqual(p.to_mongo().keys(), ["_id", "_cls", "name", "age"]) self.assertEqual(self.Person.objects.first().age, 34) # Confirm no changes to self.Person self.assertFalse(hasattr(self.Person, 'age')) def test_change_scope_of_variable(self): """Test changing the scope of a dynamic field has no adverse effects""" p = self.Person() p.name = "Dean" p.misc = 22 p.save() p = self.Person.objects.get() p.misc = {'hello': 'world'} p.save() p = self.Person.objects.get() self.assertEqual(p.misc, {'hello': 'world'}) def test_delete_dynamic_field(self): """Test deleting a dynamic field works""" self.Person.drop_collection() p = self.Person() p.name = "Dean" p.misc = 22 p.save() p = self.Person.objects.get() p.misc = {'hello': 'world'} p.save() p = self.Person.objects.get() self.assertEqual(p.misc, {'hello': 'world'}) collection = self.db[self.Person._get_collection_name()] obj = collection.find_one() self.assertEqual(sorted(obj.keys()), ['_cls', '_id', 'misc', 'name']) del p.misc p.save() p = self.Person.objects.get() self.assertFalse(hasattr(p, 'misc')) obj = collection.find_one() self.assertEqual(sorted(obj.keys()), ['_cls', '_id', 'name']) def test_reload_after_unsetting(self): p = self.Person() p.misc = 22 p.save() p.update(unset__misc=1) p.reload() def test_reload_dynamic_field(self): self.Person.objects.delete() p = self.Person.objects.create() p.update(age=1) self.assertEqual(len(p._data), 3) self.assertEqual(sorted(p._data.keys()), ['_cls', 'id', 'name']) p.reload() self.assertEqual(len(p._data), 4) self.assertEqual(sorted(p._data.keys()), ['_cls', 'age', 'id', 'name']) def test_dynamic_document_queries(self): """Ensure we can query dynamic fields""" p = self.Person() p.name = "Dean" p.age = 22 p.save() self.assertEqual(1, self.Person.objects(age=22).count()) p = self.Person.objects(age=22) p = p.get() self.assertEqual(22, p.age) def test_complex_dynamic_document_queries(self): class Person(DynamicDocument): name = StringField() Person.drop_collection() p = Person(name="test") p.age = "ten" p.save() p1 = Person(name="test1") p1.age = "less then ten and a half" p1.save() p2 = Person(name="test2") p2.age = 10 p2.save() self.assertEqual(Person.objects(age__icontains='ten').count(), 2) self.assertEqual(Person.objects(age__gte=10).count(), 1) def test_complex_data_lookups(self): """Ensure you can query dynamic document dynamic fields""" p = self.Person() p.misc = {'hello': 'world'} p.save() self.assertEqual(1, self.Person.objects(misc__hello='world').count()) def test_three_level_complex_data_lookups(self): """Ensure you can query three level document dynamic fields""" p = self.Person() p.misc = {'hello': {'hello2': 'world'}} p.save() # from pprint import pprint as pp; import pdb; pdb.set_trace(); print self.Person.objects(misc__hello__hello2='world') self.assertEqual(1, self.Person.objects(misc__hello__hello2='world').count()) def test_complex_embedded_document_validation(self): """Ensure embedded dynamic documents may be validated""" class Embedded(DynamicEmbeddedDocument): content = URLField() class Doc(DynamicDocument): pass Doc.drop_collection() doc = Doc() embedded_doc_1 = Embedded(content='http://mongoengine.org') embedded_doc_1.validate() embedded_doc_2 = Embedded(content='this is not a url') self.assertRaises(ValidationError, embedded_doc_2.validate) doc.embedded_field_1 = embedded_doc_1 doc.embedded_field_2 = embedded_doc_2 self.assertRaises(ValidationError, doc.validate) def test_inheritance(self): """Ensure that dynamic document plays nice with inheritance""" class Employee(self.Person): salary = IntField() Employee.drop_collection() self.assertTrue('name' in Employee._fields) self.assertTrue('salary' in Employee._fields) self.assertEqual(Employee._get_collection_name(), self.Person._get_collection_name()) joe_bloggs = Employee() joe_bloggs.name = "Joe Bloggs" joe_bloggs.salary = 10 joe_bloggs.age = 20 joe_bloggs.save() self.assertEqual(1, self.Person.objects(age=20).count()) self.assertEqual(1, Employee.objects(age=20).count()) joe_bloggs = self.Person.objects.first() self.assertTrue(isinstance(joe_bloggs, Employee)) def test_embedded_dynamic_document(self): """Test dynamic embedded documents""" class Embedded(DynamicEmbeddedDocument): pass class Doc(DynamicDocument): pass Doc.drop_collection() doc = Doc() embedded_1 = Embedded() embedded_1.string_field = 'hello' embedded_1.int_field = 1 embedded_1.dict_field = {'hello': 'world'} embedded_1.list_field = ['1', 2, {'hello': 'world'}] doc.embedded_field = embedded_1 self.assertEqual(doc.to_mongo(), { "embedded_field": { "_cls": "Embedded", "string_field": "hello", "int_field": 1, "dict_field": {"hello": "world"}, "list_field": ['1', 2, {'hello': 'world'}] } }) doc.save() doc = Doc.objects.first() self.assertEqual(doc.embedded_field.__class__, Embedded) self.assertEqual(doc.embedded_field.string_field, "hello") self.assertEqual(doc.embedded_field.int_field, 1) self.assertEqual(doc.embedded_field.dict_field, {'hello': 'world'}) self.assertEqual(doc.embedded_field.list_field, ['1', 2, {'hello': 'world'}]) def test_complex_embedded_documents(self): """Test complex dynamic embedded documents setups""" class Embedded(DynamicEmbeddedDocument): pass class Doc(DynamicDocument): pass Doc.drop_collection() doc = Doc() embedded_1 = Embedded() embedded_1.string_field = 'hello' embedded_1.int_field = 1 embedded_1.dict_field = {'hello': 'world'} embedded_2 = Embedded() embedded_2.string_field = 'hello' embedded_2.int_field = 1 embedded_2.dict_field = {'hello': 'world'} embedded_2.list_field = ['1', 2, {'hello': 'world'}] embedded_1.list_field = ['1', 2, embedded_2] doc.embedded_field = embedded_1 self.assertEqual(doc.to_mongo(), { "embedded_field": { "_cls": "Embedded", "string_field": "hello", "int_field": 1, "dict_field": {"hello": "world"}, "list_field": ['1', 2, {"_cls": "Embedded", "string_field": "hello", "int_field": 1, "dict_field": {"hello": "world"}, "list_field": ['1', 2, {'hello': 'world'}]} ] } }) doc.save() doc = Doc.objects.first() self.assertEqual(doc.embedded_field.__class__, Embedded) self.assertEqual(doc.embedded_field.string_field, "hello") self.assertEqual(doc.embedded_field.int_field, 1) self.assertEqual(doc.embedded_field.dict_field, {'hello': 'world'}) self.assertEqual(doc.embedded_field.list_field[0], '1') self.assertEqual(doc.embedded_field.list_field[1], 2) embedded_field = doc.embedded_field.list_field[2] self.assertEqual(embedded_field.__class__, Embedded) self.assertEqual(embedded_field.string_field, "hello") self.assertEqual(embedded_field.int_field, 1) self.assertEqual(embedded_field.dict_field, {'hello': 'world'}) self.assertEqual(embedded_field.list_field, ['1', 2, {'hello': 'world'}]) def test_dynamic_and_embedded(self): """Ensure embedded documents play nicely""" class Address(EmbeddedDocument): city = StringField() class Person(DynamicDocument): name = StringField() Person.drop_collection() Person(name="Ross", address=Address(city="London")).save() person = Person.objects.first() person.address.city = "Lundenne" person.save() self.assertEqual(Person.objects.first().address.city, "Lundenne") person = Person.objects.first() person.address = Address(city="Londinium") person.save() self.assertEqual(Person.objects.first().address.city, "Londinium") person = Person.objects.first() person.age = 35 person.save() self.assertEqual(Person.objects.first().age, 35) def test_dynamic_embedded_works_with_only(self): """Ensure custom fieldnames on a dynamic embedded document are found by qs.only()""" class Address(DynamicEmbeddedDocument): city = StringField() class Person(DynamicDocument): address = EmbeddedDocumentField(Address) Person.drop_collection() Person(name="Eric", address=Address(city="San Francisco", street_number="1337")).save() self.assertEqual(Person.objects.first().address.street_number, '1337') self.assertEqual(Person.objects.only('address__street_number').first().address.street_number, '1337') def test_dynamic_and_embedded_dict_access(self): """Ensure embedded dynamic documents work with dict[] style access""" class Address(EmbeddedDocument): city = StringField() class Person(DynamicDocument): name = StringField() Person.drop_collection() Person(name="Ross", address=Address(city="London")).save() person = Person.objects.first() person.attrval = "This works" person["phone"] = "555-1212" # but this should too # Same thing two levels deep person["address"]["city"] = "Lundenne" person.save() self.assertEqual(Person.objects.first().address.city, "Lundenne") self.assertEqual(Person.objects.first().phone, "555-1212") person = Person.objects.first() person.address = Address(city="Londinium") person.save() self.assertEqual(Person.objects.first().address.city, "Londinium") person = Person.objects.first() person["age"] = 35 person.save() self.assertEqual(Person.objects.first().age, 35) if __name__ == '__main__': unittest.main()
mit
arnaud-morvan/QGIS
tests/src/python/test_python_repr.py
2
5735
# -*- coding: utf-8 -*- """QGIS Unit tests for core additions .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Denis Rouzaud' __date__ = '05.06.2018' __copyright__ = 'Copyright 2015, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA from PyQt5.QtCore import QVariant from qgis.testing import unittest, start_app from qgis.core import QgsGeometry, QgsPoint, QgsPointXY, QgsCircle, QgsCircularString, QgsCompoundCurve,\ QgsCurvePolygon, QgsEllipse, QgsLineString, QgsMultiCurve, QgsRectangle, QgsExpression, QgsField, QgsError,\ QgsMimeDataUtils start_app() class TestPython__repr__(unittest.TestCase): def testQgsGeometryRepr(self): p = QgsPointXY(123.456, 987.654) g = QgsGeometry.fromPointXY(p) self.assertTrue(g.__repr__().startswith('<QgsGeometry: Point (123.456')) def testQgsPointRepr(self): p = QgsPoint(123.456, 987.654, 100) self.assertTrue(p.__repr__().startswith('<QgsPoint: PointZ (123.456')) def testQgsPointXYRepr(self): p = QgsPointXY(123.456, 987.654) self.assertTrue(p.__repr__().startswith('<QgsPointXY: POINT(123.456')) def testQgsCircleRepr(self): c = QgsCircle(QgsPoint(1, 1), 2.0) self.assertEqual(c.__repr__(), '<QgsCircle: Circle (Center: Point (1 1), Radius: 2, Azimuth: 0)>') def testQgsCircularstringRepr(self): cs = QgsCircularString(QgsPoint(1, 2), QgsPoint(2, 3), QgsPoint(3, 4)) self.assertEqual(cs.__repr__(), '<QgsCircularString: CircularString (1 2, 2 3, 3 4)>') def testQgsCompoundcurveRepr(self): cs = QgsCircularString(QgsPoint(1, 2), QgsPoint(2, 3), QgsPoint(3, 4)) cc = QgsCompoundCurve() cc.addCurve(cs) self.assertEqual(cc.__repr__(), '<QgsCompoundCurve: CompoundCurve (CircularString (1 2, 2 3, 3 4))>') def testQgsCurvepolygonRepr(self): cp = QgsCurvePolygon() cs = QgsCircularString(QgsPoint(1, 10), QgsPoint(2, 11), QgsPoint(1, 10)) cp.setExteriorRing(cs) self.assertEqual(cp.__repr__(), '<QgsCurvePolygon: CurvePolygon (CircularString (1 10, 2 11, 1 10))>') def testQgsEllipseRepr(self): e = QgsEllipse(QgsPoint(1, 2), 2.0, 3.0) self.assertEqual(e.__repr__(), '<QgsEllipse: Ellipse (Center: Point (1 2), Semi-Major Axis: 3, Semi-Minor Axis: 2, Azimuth: 180)>') def testQgsLineStringRepr(self): ls = QgsLineString([QgsPoint(10, 2), QgsPoint(10, 1), QgsPoint(5, 1)]) self.assertEqual(ls.__repr__(), '<QgsLineString: LineString (10 2, 10 1, 5 1)>') def testQgsMulticurveRepr(self): mc = QgsMultiCurve() cs = QgsCircularString(QgsPoint(1, 10), QgsPoint(2, 11), QgsPoint(3, 12)) mc.addGeometry(cs) cs2 = QgsCircularString(QgsPoint(4, 20), QgsPoint(5, 22), QgsPoint(6, 24)) mc.addGeometry(cs2) self.assertEqual(mc.__repr__(), '<QgsMultiCurve: MultiCurve (CircularString (1 10, 2 11, 3 12),CircularString (4 20, 5 22, 6 24))>') def testQgsMultilineStringRepr(self): ml = QgsGeometry.fromMultiPolylineXY( [ [QgsPointXY(0, 0), QgsPointXY(1, 0), QgsPointXY(1, 1), QgsPointXY(2, 1), QgsPointXY(2, 0), ], [QgsPointXY(3, 0), QgsPointXY(3, 1), QgsPointXY(5, 1), QgsPointXY(5, 0), QgsPointXY(6, 0), ] ] ) self.assertEqual(ml.constGet().__repr__(), '<QgsMultiLineString: MultiLineString ((0 0, 1 0, 1 1, 2 1, 2 0),(3 0, 3 1, 5 1, 5 0, 6 0))>') def testQgsMultiPointRepr(self): wkt = "MultiPoint ((10 30),(40 20),(30 10),(20 10))" mp = QgsGeometry.fromWkt(wkt) self.assertEqual(mp.constGet().__repr__(), '<QgsMultiPoint: MultiPoint ((10 30),(40 20),(30 10),(20 10))>') def testQgsMultipolygonRepr(self): mp = QgsGeometry.fromMultiPolygonXY([ [[QgsPointXY(1, 1), QgsPointXY(2, 2), QgsPointXY(1, 2), QgsPointXY(1, 1)]], [[QgsPointXY(2, 2), QgsPointXY(3, 3), QgsPointXY(3, 1), QgsPointXY(2, 2)]] ]) self.assertEqual(mp.constGet().__repr__(), '<QgsMultiPolygon: MultiPolygon (((1 1, 2 2, 1 2, 1 1)),((2 2, 3 3, 3 1, 2 2)))>') def testQgsPolygonRepr(self): p = QgsGeometry.fromPolygonXY( [[QgsPointXY(0, 0), QgsPointXY(2, 0), QgsPointXY(2, 2), QgsPointXY(0, 2), QgsPointXY(0, 0)]]) self.assertEqual(p.constGet().__repr__(), '<QgsPolygon: Polygon ((0 0, 2 0, 2 2, 0 2, 0 0))>') def testQgsRectangleRepr(self): r = QgsRectangle(1, 2, 3, 4) self.assertEqual(r.__repr__(), '<QgsRectangle: 1 2, 3 4>') def testQgsExpressionRepr(self): e = QgsExpression('my expression') self.assertEqual(e.__repr__(), "<QgsExpression: 'my expression'>") def testQgsFieldRepr(self): f = QgsField('field_name', QVariant.Double, 'double') self.assertEqual(f.__repr__(), "<QgsField: field_name (double)>") def testQgsErrorRepr(self): e = QgsError('you done wrong son', 'dad') self.assertEqual(e.__repr__(), "<QgsError: dad you done wrong son>") def testQgsMimeDataUri(self): d = QgsMimeDataUtils.Uri() d.uri = 'my_uri' d.providerKey = 'my_provider' self.assertEqual(d.__repr__(), "<QgsMimeDataUtils::Uri (my_provider): my_uri>") if __name__ == "__main__": unittest.main()
gpl-2.0
Dalanar/DotA2DraftBot
tools/collect_bot_info.py
1
3877
import vdf implemented_bots = set([ 'npc_dota_hero_axe', 'npc_dota_hero_bane', 'npc_dota_hero_bounty_hunter', 'npc_dota_hero_bloodseeker', 'npc_dota_hero_bristleback', 'npc_dota_hero_chaos_knight', 'npc_dota_hero_crystal_maiden', 'npc_dota_hero_dazzle', 'npc_dota_hero_death_prophet', 'npc_dota_hero_dragon_knight', 'npc_dota_hero_drow_ranger', 'npc_dota_hero_earthshaker', 'npc_dota_hero_jakiro', 'npc_dota_hero_juggernaut', 'npc_dota_hero_kunkka', 'npc_dota_hero_lich', 'npc_dota_hero_lina', 'npc_dota_hero_lion', 'npc_dota_hero_luna', 'npc_dota_hero_necrolyte', 'npc_dota_hero_omniknight', 'npc_dota_hero_oracle', 'npc_dota_hero_phantom_assassin', 'npc_dota_hero_pudge', 'npc_dota_hero_razor', 'npc_dota_hero_sand_king', 'npc_dota_hero_nevermore', 'npc_dota_hero_skywrath_mage', 'npc_dota_hero_sniper', 'npc_dota_hero_sven', 'npc_dota_hero_tidehunter', 'npc_dota_hero_tiny', 'npc_dota_hero_vengefulspirit', 'npc_dota_hero_viper', 'npc_dota_hero_warlock', 'npc_dota_hero_windrunner', 'npc_dota_hero_witch_doctor', 'npc_dota_hero_skeleton_king', 'npc_dota_hero_zuus', ]) heroes = vdf.load(open(r'D:\games\steamapps\common\dota 2 beta\game\dota\scripts\npc\npc_heroes.txt')) with open('hero_bot_data.lua', 'w') as output: # Write module exporting stuff #1 output.write('_G._savedEnv = getfenv()\n') output.write('module("hero_bot_data", package.seeall)\n') output.write('\n') # Collect all hero types hero_types = set() hero_type_ids = {} for name, data in heroes['DOTAHeroes'].iteritems(): if isinstance(data, dict) and 'Bot' in data: this_hero_type = data['Bot']['HeroType'].split('|') for hero_type in this_hero_type: hero_types.add(hero_type.strip()) idx = 1 for hero_type in hero_types: hero_type_ids[hero_type] = idx output.write('%s = %d\n' % (hero_type, idx)) idx *= 2 output.write('\n') # Fill LaningInfo and HeroType output.write('heroes = {\n') supported_list = [] not_supported_list = [] for name, data in heroes['DOTAHeroes'].iteritems(): if isinstance(data, dict) and data.get('CMEnabled', '0') == '1': human_name = data['url'].replace('_', ' ') if 'Bot' not in data: not_supported_list.append(human_name) continue laning_info = [] try: for key, value in data['Bot']['LaningInfo'].iteritems(): laning_info.append('[\'%s\'] = %s' % (key, value)) this_hero_type = 0 this_hero_type_raw = data['Bot']['HeroType'].split('|') for hero_type in this_hero_type_raw: this_hero_type |= hero_type_ids[hero_type.strip()] if ('Loadout' not in data['Bot']) or (name not in implemented_bots): not_supported_list.append(human_name) else: output.write(' [\'%s\'] = {[\'HeroType\'] = %s, [\'LaningInfo\'] = {%s}},\n' % (name, this_hero_type, ', '.join(laning_info))) supported_list.append(human_name) except KeyError as ex: not_supported_list.append(human_name) output.write('}\n\n') # Write module exporting stuff #2 output.write('for k,v in pairs(hero_bot_data) do _G._savedEnv[k] = v end\n') supported_list.sort() print 'Fully operational:' for hero in supported_list: print ' - %s' % hero not_supported_list.sort() print '\nNot supported:' for hero in not_supported_list: print ' - %s' % hero
mit
odubno/microblog
flask/lib/python2.7/site-packages/pytz/tzinfo.py
380
19368
'''Base classes and helpers for building zone specific tzinfo classes''' from datetime import datetime, timedelta, tzinfo from bisect import bisect_right try: set except NameError: from sets import Set as set import pytz from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError __all__ = [] _timedelta_cache = {} def memorized_timedelta(seconds): '''Create only one instance of each distinct timedelta''' try: return _timedelta_cache[seconds] except KeyError: delta = timedelta(seconds=seconds) _timedelta_cache[seconds] = delta return delta _epoch = datetime.utcfromtimestamp(0) _datetime_cache = {0: _epoch} def memorized_datetime(seconds): '''Create only one instance of each distinct datetime''' try: return _datetime_cache[seconds] except KeyError: # NB. We can't just do datetime.utcfromtimestamp(seconds) as this # fails with negative values under Windows (Bug #90096) dt = _epoch + timedelta(seconds=seconds) _datetime_cache[seconds] = dt return dt _ttinfo_cache = {} def memorized_ttinfo(*args): '''Create only one instance of each distinct tuple''' try: return _ttinfo_cache[args] except KeyError: ttinfo = ( memorized_timedelta(args[0]), memorized_timedelta(args[1]), args[2] ) _ttinfo_cache[args] = ttinfo return ttinfo _notime = memorized_timedelta(0) def _to_seconds(td): '''Convert a timedelta to seconds''' return td.seconds + td.days * 24 * 60 * 60 class BaseTzInfo(tzinfo): # Overridden in subclass _utcoffset = None _tzname = None zone = None def __str__(self): return self.zone class StaticTzInfo(BaseTzInfo): '''A timezone that has a constant offset from UTC These timezones are rare, as most locations have changed their offset at some point in their history ''' def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if dt.tzinfo is not None and dt.tzinfo is not self: raise ValueError('fromutc: dt.tzinfo is not self') return (dt + self._utcoffset).replace(tzinfo=self) def utcoffset(self, dt, is_dst=None): '''See datetime.tzinfo.utcoffset is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return self._utcoffset def dst(self, dt, is_dst=None): '''See datetime.tzinfo.dst is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return _notime def tzname(self, dt, is_dst=None): '''See datetime.tzinfo.tzname is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return self._tzname def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime. This is normally a no-op, as StaticTzInfo timezones never have ambiguous cases to correct: >>> from pytz import timezone >>> gmt = timezone('GMT') >>> isinstance(gmt, StaticTzInfo) True >>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt) >>> gmt.normalize(dt) is dt True The supported method of converting between timezones is to use datetime.astimezone(). Currently normalize() also works: >>> la = timezone('America/Los_Angeles') >>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3)) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> gmt.normalize(dt).strftime(fmt) '2011-05-07 08:02:03 GMT (+0000)' ''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def __repr__(self): return '<StaticTzInfo %r>' % (self.zone,) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, (self.zone,) class DstTzInfo(BaseTzInfo): '''A timezone that has a variable offset from UTC The offset might change if daylight saving time comes into effect, or at a point in history when the region decides to change their timezone definition. ''' # Overridden in subclass _utc_transition_times = None # Sorted list of DST transition times in UTC _transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding # to _utc_transition_times entries zone = None # Set in __init__ _tzinfos = None _dst = None # DST offset def __init__(self, _inf=None, _tzinfos=None): if _inf: self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = _inf else: _tzinfos = {} self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = self._transition_info[0] _tzinfos[self._transition_info[0]] = self for inf in self._transition_info[1:]: if inf not in _tzinfos: _tzinfos[inf] = self.__class__(inf, _tzinfos) def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if (dt.tzinfo is not None and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos): raise ValueError('fromutc: dt.tzinfo is not self') dt = dt.replace(tzinfo=None) idx = max(0, bisect_right(self._utc_transition_times, dt) - 1) inf = self._transition_info[idx] return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf]) def normalize(self, dt): '''Correct the timezone information on the given datetime If date arithmetic crosses DST boundaries, the tzinfo is not magically adjusted. This method normalizes the tzinfo to the correct one. To test, first we need to do some setup >>> from pytz import timezone >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' We next create a datetime right on an end-of-DST transition point, the instant when the wallclocks are wound back one hour. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' Now, if we subtract a few minutes from it, note that the timezone information has not changed. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' But we can fix that by calling the normalize method >>> before = eastern.normalize(before) >>> before.strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' The supported method of converting between timezones is to use datetime.astimezone(). Currently, normalize() also works: >>> th = timezone('Asia/Bangkok') >>> am = timezone('Europe/Amsterdam') >>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3)) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> am.normalize(dt).strftime(fmt) '2011-05-06 20:02:03 CEST (+0200)' ''' if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') # Convert dt in localtime to UTC offset = dt.tzinfo._utcoffset dt = dt.replace(tzinfo=None) dt = dt - offset # convert it back, and return it return self.fromutc(dt) def localize(self, dt, is_dst=False): '''Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight saving time. >>> from pytz import timezone >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> amdam = timezone('Europe/Amsterdam') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) '2004-10-31 02:00:00 CEST (+0200)' >>> loc_dt2.strftime(fmt) '2004-10-31 02:00:00 CET (+0100)' >>> str(loc_dt2 - loc_dt1) '1:00:00' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight saving time >>> try: ... loc_dt1 = amdam.localize(dt, is_dst=None) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True is_dst is also used to determine the correct timezone in the wallclock times jumped over at the start of daylight saving time. >>> pacific = timezone('US/Pacific') >>> dt = datetime(2008, 3, 9, 2, 0, 0) >>> ploc_dt1 = pacific.localize(dt, is_dst=True) >>> ploc_dt2 = pacific.localize(dt, is_dst=False) >>> ploc_dt1.strftime(fmt) '2008-03-09 02:00:00 PDT (-0700)' >>> ploc_dt2.strftime(fmt) '2008-03-09 02:00:00 PST (-0800)' >>> str(ploc_dt2 - ploc_dt1) '1:00:00' Use is_dst=None to raise a NonExistentTimeError for these skipped times. >>> try: ... loc_dt1 = pacific.localize(dt, is_dst=None) ... except NonExistentTimeError: ... print('Non-existent') Non-existent ''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') # Find the two best possibilities. possible_loc_dt = set() for delta in [timedelta(days=-1), timedelta(days=1)]: loc_dt = dt + delta idx = max(0, bisect_right( self._utc_transition_times, loc_dt) - 1) inf = self._transition_info[idx] tzinfo = self._tzinfos[inf] loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if loc_dt.replace(tzinfo=None) == dt: possible_loc_dt.add(loc_dt) if len(possible_loc_dt) == 1: return possible_loc_dt.pop() # If there are no possibly correct timezones, we are attempting # to convert a time that never happened - the time period jumped # during the start-of-DST transition period. if len(possible_loc_dt) == 0: # If we refuse to guess, raise an exception. if is_dst is None: raise NonExistentTimeError(dt) # If we are forcing the pre-DST side of the DST transition, we # obtain the correct timezone by winding the clock forward a few # hours. elif is_dst: return self.localize( dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) # If we are forcing the post-DST side of the DST transition, we # obtain the correct timezone by winding the clock back. else: return self.localize( dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) # If we get this far, we have multiple possible timezones - this # is an ambiguous case occuring during the end-of-DST transition. # If told to be strict, raise an exception since we have an # ambiguous case if is_dst is None: raise AmbiguousTimeError(dt) # Filter out the possiblilities that don't match the requested # is_dst filtered_possible_loc_dt = [ p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst ] # Hopefully we only have one possibility left. Return it. if len(filtered_possible_loc_dt) == 1: return filtered_possible_loc_dt[0] if len(filtered_possible_loc_dt) == 0: filtered_possible_loc_dt = list(possible_loc_dt) # If we get this far, we have in a wierd timezone transition # where the clocks have been wound back but is_dst is the same # in both (eg. Europe/Warsaw 1915 when they switched to CET). # At this point, we just have to guess unless we allow more # hints to be passed in (such as the UTC offset or abbreviation), # but that is just getting silly. # # Choose the earliest (by UTC) applicable timezone if is_dst=True # Choose the latest (by UTC) applicable timezone if is_dst=False # i.e., behave like end-of-DST transition dates = {} # utc -> local for local_dt in filtered_possible_loc_dt: utc_time = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset assert utc_time not in dates dates[utc_time] = local_dt return dates[[min, max][not is_dst](dates)] def utcoffset(self, dt, is_dst=None): '''See datetime.tzinfo.utcoffset The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.utcoffset(ambiguous, is_dst=False) datetime.timedelta(-1, 73800) >>> tz.utcoffset(ambiguous, is_dst=True) datetime.timedelta(-1, 77400) >>> try: ... tz.utcoffset(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return None elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._utcoffset else: return self._utcoffset def dst(self, dt, is_dst=None): '''See datetime.tzinfo.dst The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> tz.dst(normal) datetime.timedelta(0, 3600) >>> tz.dst(normal, is_dst=False) datetime.timedelta(0, 3600) >>> tz.dst(normal, is_dst=True) datetime.timedelta(0, 3600) >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.dst(ambiguous, is_dst=False) datetime.timedelta(0) >>> tz.dst(ambiguous, is_dst=True) datetime.timedelta(0, 3600) >>> try: ... tz.dst(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return None elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._dst else: return self._dst def tzname(self, dt, is_dst=None): '''See datetime.tzinfo.tzname The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> tz.tzname(normal) 'NDT' >>> tz.tzname(normal, is_dst=False) 'NDT' >>> tz.tzname(normal, is_dst=True) 'NDT' >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.tzname(ambiguous, is_dst=False) 'NST' >>> tz.tzname(ambiguous, is_dst=True) 'NDT' >>> try: ... tz.tzname(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return self.zone elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._tzname else: return self._tzname def __repr__(self): if self._dst: dst = 'DST' else: dst = 'STD' if self._utcoffset > _notime: return '<DstTzInfo %r %s+%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) else: return '<DstTzInfo %r %s%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, ( self.zone, _to_seconds(self._utcoffset), _to_seconds(self._dst), self._tzname ) def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): """Factory function for unpickling pytz tzinfo instances. This is shared for both StaticTzInfo and DstTzInfo instances, because database changes could cause a zones implementation to switch between these two base classes and we can't break pickles on a pytz version upgrade. """ # Raises a KeyError if zone no longer exists, which should never happen # and would be a bug. tz = pytz.timezone(zone) # A StaticTzInfo - just return it if utcoffset is None: return tz # This pickle was created from a DstTzInfo. We need to # determine which of the list of tzinfo instances for this zone # to use in order to restore the state of any datetime instances using # it correctly. utcoffset = memorized_timedelta(utcoffset) dstoffset = memorized_timedelta(dstoffset) try: return tz._tzinfos[(utcoffset, dstoffset, tzname)] except KeyError: # The particular state requested in this timezone no longer exists. # This indicates a corrupt pickle, or the timezone database has been # corrected violently enough to make this particular # (utcoffset,dstoffset) no longer exist in the zone, or the # abbreviation has been changed. pass # See if we can find an entry differing only by tzname. Abbreviations # get changed from the initial guess by the database maintainers to # match reality when this information is discovered. for localized_tz in tz._tzinfos.values(): if (localized_tz._utcoffset == utcoffset and localized_tz._dst == dstoffset): return localized_tz # This (utcoffset, dstoffset) information has been removed from the # zone. Add it back. This might occur when the database maintainers have # corrected incorrect information. datetime instances using this # incorrect information will continue to do so, exactly as they were # before being pickled. This is purely an overly paranoid safety net - I # doubt this will ever been needed in real life. inf = (utcoffset, dstoffset, tzname) tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) return tz._tzinfos[inf]
bsd-3-clause
cysnake4713/odoo
addons/crm_partner_assign/report/crm_lead_report.py
16
5264
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv from openerp import tools from openerp.addons.crm import crm class crm_lead_report_assign(osv.osv): """ CRM Lead Report """ _name = "crm.lead.report.assign" _auto = False _description = "CRM Lead Report" _columns = { 'partner_assigned_id':fields.many2one('res.partner', 'Partner', readonly=True), 'grade_id':fields.many2one('res.partner.grade', 'Grade', readonly=True), 'user_id':fields.many2one('res.users', 'User', readonly=True), 'country_id':fields.many2one('res.country', 'Country', readonly=True), 'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'date_assign': fields.date('Assign Date', readonly=True), 'create_date': fields.datetime('Create Date', readonly=True), 'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"), 'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"), 'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"), 'probability': fields.float('Avg Probability',digits=(16,2),readonly=True, group_operator="avg"), 'probability_max': fields.float('Max Probability',digits=(16,2),readonly=True, group_operator="max"), 'planned_revenue': fields.float('Planned Revenue',digits=(16,2),readonly=True), 'probable_revenue': fields.float('Probable Revenue', digits=(16,2),readonly=True), 'stage_id': fields.many2one ('crm.case.stage', 'Stage', domain="[('section_ids', '=', section_id)]"), 'partner_id': fields.many2one('res.partner', 'Customer' , readonly=True), 'opening_date': fields.date('Opening Date', readonly=True), 'creation_date': fields.date('Creation Date', readonly=True), 'date_closed': fields.date('Close Date', readonly=True), 'nbr': fields.integer('# of Cases', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'), 'type':fields.selection([ ('lead','Lead'), ('opportunity','Opportunity') ],'Type', help="Type is used to separate Leads and Opportunities"), } def init(self, cr): """ CRM Lead Report @param cr: the current row, from the database cursor """ tools.drop_view_if_exists(cr, 'crm_lead_report_assign') cr.execute(""" CREATE OR REPLACE VIEW crm_lead_report_assign AS ( SELECT c.id, to_char(c.create_date, 'YYYY-MM-DD') as creation_date, to_char(c.date_open, 'YYYY-MM-DD') as opening_date, to_char(c.date_closed, 'YYYY-mm-dd') as date_closed, c.date_assign, c.user_id, c.probability, c.probability as probability_max, c.stage_id, c.type, c.company_id, c.priority, c.section_id, c.partner_id, c.country_id, c.planned_revenue, c.partner_assigned_id, p.grade_id, p.date as partner_date, c.planned_revenue*(c.probability/100) as probable_revenue, 1 as nbr, date_trunc('day',c.create_date) as create_date, extract('epoch' from (c.write_date-c.create_date))/(3600*24) as delay_close, extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected, extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open FROM crm_lead c left join res_partner p on (c.partner_assigned_id=p.id) )""") # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
namgivu/flask-aosh4
app/controller/user_controller.py
1
4136
from app import * import random from flask import session @app.route('/user/switch-role') def user_switch_role(): return render_template('user/switch-role.html') @app.route('/user/wallet') def user_wallet(): template = userRole_to_dashboardView() fake_data = [] my_point = 2000 for i in range(10): data = { 'id': i + 1 } fake_data.append(data) return render_template('user/wallet.html', **locals()) @app.route('/user/dashboard-player') def user_dashboard_player(): session['userRole'] = app.config['USER_ROLE']['player'] return render_template('user/dashboard-player.html') @app.route('/user/player/my-board') def user_player_my_board(): session['userRole'] = app.config['USER_ROLE']['player'] template = userRole_to_dashboardView() days = range(1, 30) months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] names = ['Anonymous'] chars = "abcdefghjiklmnopqrstw" for i in range(5): names.append(''.join(random.sample(chars,len(chars)))) points = range(100, 1000) fake_data = [] for i in range(5): data = { 'id': i + 1, 'day': random.choice(days), 'month': random.choice(months), 'point': random.choice(points), 'name': random.choice(names)[:9], 'completed': True if random.randint(1, 2) == 2 else False, } if random.randint(1, 2) == 2: data['is_my_task'] = 1 else: data['is_received_task'] = 1 fake_data.append(data) return render_template('user/base_task_board.html', **locals()) @app.route('/user/player/mission-board') def user_player_mission_board(): session['userRole'] = app.config['USER_ROLE']['player'] template = userRole_to_dashboardView() days = range(1, 30) months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] names = ['Anonymous'] skills = ['AI', 'Design', 'Video Content', 'Develop', 'Other'] chars = "abcdefghjiklmnopqrstw" for i in range(5): names.append(''.join(random.sample(chars,len(chars)))) points = range(100, 1000) fake_data = [] for i in range(5): data = { 'id': i + 1, 'day': random.choice(days), 'month': random.choice(months), 'point': random.choice(points), 'name': random.choice(names)[:9], 'skill': random.choice(skills), 'is_other_task': 1 } fake_data.append(data) return render_template('user/base_task_board.html', **locals()) @app.route('/user/leader-board') def user_leader_board(): template = userRole_to_dashboardView() chars = "abcdefghjiklmnopqrstw" names = [] for i in range(5): names.append(''.join(random.sample(chars,len(chars)))) points = range(1, 1000) fake_data = [] for i in range(20): data = { 'rank': i + 1, 'name': random.choice(names)[:9], 'point': random.choice(points) } fake_data.append(data) return render_template('/user/leader_board.html', **locals()) @app.route('/user/dashboard-giver') def user_dashboard_giver(): session['userRole'] = app.config['USER_ROLE']['giver'] return render_template('user/dashboard-giver.html') @app.route("/user/giver/my-board") def user_giver_my_board(): session['userRole'] = app.config['USER_ROLE']['giver'] template = userRole_to_dashboardView() role = 'giver' not_show_avatar = True days = range(1, 30) months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] names = ['Anonymous'] skills = ['AI', 'Design', 'Video Content', 'Develop', 'Other'] chars = "abcdefghjiklmnopqrstw" for i in range(5): names.append(''.join(random.sample(chars,len(chars)))) points = range(100, 1000) fake_data = [] for i in range(5): data = { 'id': i + 1, 'day': random.choice(days), 'month': random.choice(months), 'point': random.choice(points), 'name': random.choice(names)[:9], 'skill': random.choice(skills), 'is_setting': 1, 'completed': True if random.randint(1, 2) == 2 else False, } fake_data.append(data) return render_template('user/base_task_board.html', **locals())
gpl-3.0
colinbrislawn/bioconda-recipes
recipes/mz_to_sqlite/1.2.0/mz_to_sqlite.py
60
3265
#!/usr/bin/env python # # Wrapper script for Java Conda packages that ensures that the java runtime # is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128). # # Program Parameters # import os import subprocess import sys import shutil from os import access from os import getenv from os import X_OK jar_file = 'mzToSQLite-1.2.0.jar' default_jvm_mem_opts = ['-Xms512m', '-Xmx1g'] # !!! End of parameter section. No user-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] exec_dir = None for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) elif arg.startswith('--exec_dir='): exec_dir = arg.split('=')[1].strip('"').strip("'") if not os.path.exists(exec_dir): shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') is None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args, exec_dir) def main(): java = java_executable() """ mz_to_sqlite updates files relative to the path of the jar file. In a multiuser setting, the option --exec_dir="exec_dir" can be used as the location for the peptide-shaker distribution. If the exec_dir dies not exist, we copy the jar file, lib, and resources to the exec_dir directory. """ (mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:]) jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0]) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args sys.exit(subprocess.call(java_args)) if __name__ == '__main__': main()
mit
gregcaporaso/qiime
tests/test_clean_raxml_parsimony_tree.py
15
3447
#!/usr/bin/env python # File created on 16 Nov 2011 from __future__ import division __author__ = "Jesse Stombaugh" __copyright__ = "Copyright 2011, The QIIME project" __credits__ = ["Jesse Stombaugh"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Jesse Stombaugh" __email__ = "jesse.stombaugh@colorado.edu" from unittest import TestCase, main from StringIO import StringIO from cogent.parse.tree import DndParser from cogent.core.tree import PhyloNode, TreeNode from qiime.clean_raxml_parsimony_tree import decorate_numtips, decorate_depth,\ get_insert_dict, drop_duplicate_nodes class TopLevelTests(TestCase): """Tests of top-level functions""" def setUp(self): """define some top-level data""" self.tree = DndParser(StringIO(TEST_TREE), constructor=PhyloNode) def test_decorate_numtips(self): """decorate_numtips: decorate the number of tips below each node.""" obs = decorate_numtips(self.tree) # make sure each tip only has 1 tip tips = obs.tips() for obs_tips in tips: exp = 1 self.assertEqual(obs_tips.Score, exp) # check that the node scores are correct node_numtips = [1, 2, 8] for nodes in obs: self.assertTrue(nodes.Score in node_numtips) def test_decorate_depth(self): """decorate_depth: decorate the depth from the root each node is.""" # decorate the depth of each node on the tree obs = decorate_depth(self.tree) # make sure each tip depth is between 1 and 5 tips = obs.tips() tip_depth = [1, 2, 3, 4, 5] for obs_tips in tips: self.assertTrue(obs_tips.Score in tip_depth) # check that the node depth is 1 for nodes in obs: exp = 1 self.assertEqual(nodes.Score, exp) def test_get_insert_dict(self): """get_insert_dict: get the location of each inserted tip.""" # pull out the tips for Species006 obs = get_insert_dict(self.tree, 'Species006') # verify the dict is properly refrenced self.assertTrue('Species006' in obs) # if Species006 in dict, verify there are 3 tips if 'Species006' in obs: exp_len = 3 obs_len = len(obs['Species006']) self.assertEqual(obs_len, exp_len) def test_drop_duplicate_nodes(self): """drop_duplicate_nodes: remove duplicate tips from tree based on either the number of tips or depth.""" # pull out the tips for Species006 inserted_nodes = get_insert_dict(self.tree, 'Species006') # decorate the depth of each node on the tree decorated_tree = decorate_depth(self.tree) # drop duplicate nodes based on depth (deterministic when equal depths) obs = drop_duplicate_nodes(decorated_tree, inserted_nodes) # verify the resulting tree is correct self.assertEqual(obs.getNewick(with_distances=True), EXPECTED_TREE) TEST_TREE = """(Species003:0.1,(Species001:0.1,Species002:0.1):0.1,((Species006,Species007):0.0,(((Species006,Species007):0.0,Species004:0.1):0.1,((Species006,Species007):0.0,Species005:0.1):0.1):0.1):0.1);""" EXPECTED_TREE = """(Species003:0.1,(Species001:0.1,Species002:0.1):0.1,((((Species006,Species007):0.0,Species004:0.1):0.1,(Species005:0.1,Species007:0.0):0.1):0.1,Species007:0.0):0.1);""" if __name__ == "__main__": main()
gpl-2.0
PeterWangIntel/chromium-crosswalk
media/tools/layout_tests/test_expectations_history_unittest.py
156
2572
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from datetime import datetime import calendar import unittest from test_expectations_history import TestExpectationsHistory class TestTestExpectationsHistory(unittest.TestCase): """Unit tests for the TestExpectationsHistory class.""" def AssertTestName(self, result_list, testname): """Assert test name in the result_list. Args: result_list: a result list of tuples returned by |GetDiffBetweenTimesOnly1Diff()|. Each tuple consists of (old_rev, new_rev, author, date, message, lines) where |lines| are the entries in the test expectation file. testname: a testname string. Returns: True if the result contains the testname, False otherwise. """ for (_, _, _, _, _, lines) in result_list: if any([testname in line for line in lines]): return True return False # These tests use the following commit. # commit 235788e3a4fc71342a5c9fefe67ce9537706ce35 # Author: rniwa@webkit.org # Date: Sat Aug 20 06:19:11 2011 +0000 def testGetDiffBetweenTimes(self): ptime = calendar.timegm((2011, 8, 20, 0, 0, 0, 0, 0, 0)) ctime = calendar.timegm((2011, 8, 21, 0, 0, 0, 0, 0, 0)) testname = 'fast/css/getComputedStyle/computed-style-without-renderer.html' testname_list = [testname] result_list = TestExpectationsHistory.GetDiffBetweenTimes( ptime, ctime, testname_list) self.assertTrue(self.AssertTestName(result_list, testname)) def testGetDiffBetweenTimesOnly1Diff(self): ptime = calendar.timegm((2011, 8, 20, 6, 0, 0, 0, 0, 0)) ctime = calendar.timegm((2011, 8, 20, 7, 0, 0, 0, 0, 0)) testname = 'fast/css/getComputedStyle/computed-style-without-renderer.html' testname_list = [testname] result_list = TestExpectationsHistory.GetDiffBetweenTimes( ptime, ctime, testname_list) self.assertTrue(self.AssertTestName(result_list, testname)) def testGetDiffBetweenTimesOnly1DiffWithGobackSeveralDays(self): ptime = calendar.timegm((2011, 9, 12, 1, 0, 0, 0, 0, 0)) ctime = calendar.timegm((2011, 9, 12, 2, 0, 0, 0, 0, 0)) testname = 'media/video-zoom-controls.html' testname_list = [testname] result_list = TestExpectationsHistory.GetDiffBetweenTimes( ptime, ctime, testname_list) self.assertTrue(self.AssertTestName(result_list, testname)) if __name__ == '__main__': unittest.main()
bsd-3-clause
nmercier/linux-cross-gcc
linux/lib/python2.7/dist-packages/numpy/core/function_base.py
23
6891
from __future__ import division, absolute_import, print_function __all__ = ['logspace', 'linspace'] from . import numeric as _nx from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None): """ Return evenly spaced numbers over a specified interval. Returns `num` evenly spaced samples, calculated over the interval [`start`, `stop`]. The endpoint of the interval can optionally be excluded. Parameters ---------- start : scalar The starting value of the sequence. stop : scalar The end value of the sequence, unless `endpoint` is set to False. In that case, the sequence consists of all but the last of ``num + 1`` evenly spaced samples, so that `stop` is excluded. Note that the step size changes when `endpoint` is False. num : int, optional Number of samples to generate. Default is 50. Must be non-negative. endpoint : bool, optional If True, `stop` is the last sample. Otherwise, it is not included. Default is True. retstep : bool, optional If True, return (`samples`, `step`), where `step` is the spacing between samples. dtype : dtype, optional The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. .. versionadded:: 1.9.0 Returns ------- samples : ndarray There are `num` equally spaced samples in the closed interval ``[start, stop]`` or the half-open interval ``[start, stop)`` (depending on whether `endpoint` is True or False). step : float Only returned if `retstep` is True Size of spacing between samples. See Also -------- arange : Similar to `linspace`, but uses a step size (instead of the number of samples). logspace : Samples uniformly distributed in log space. Examples -------- >>> np.linspace(2.0, 3.0, num=5) array([ 2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) array([ 2. , 2.2, 2.4, 2.6, 2.8]) >>> np.linspace(2.0, 3.0, num=5, retstep=True) (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) Graphical illustration: >>> import matplotlib.pyplot as plt >>> N = 8 >>> y = np.zeros(N) >>> x1 = np.linspace(0, 10, N, endpoint=True) >>> x2 = np.linspace(0, 10, N, endpoint=False) >>> plt.plot(x1, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(x2, y + 0.5, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.ylim([-0.5, 1]) (-0.5, 1) >>> plt.show() """ num = int(num) if num < 0: raise ValueError("Number of samples, %s, must be non-negative." % num) div = (num - 1) if endpoint else num # Convert float/complex array scalars to float, gh-3504 start = start * 1. stop = stop * 1. dt = result_type(start, stop, float(num)) if dtype is None: dtype = dt y = _nx.arange(0, num, dtype=dt) delta = stop - start if num > 1: step = delta / div if step == 0: # Special handling for denormal numbers, gh-5437 y /= div y = y * delta else: # One might be tempted to use faster, in-place multiplication here, # but this prevents step from overriding what class is produced, # and thus prevents, e.g., use of Quantities; see gh-7142. y = y * step else: # 0 and 1 item long sequences have an undefined step step = NaN # Multiply with delta to allow possible override of output class. y = y * delta y += start if endpoint and num > 1: y[-1] = stop if retstep: return y.astype(dtype, copy=False), step else: return y.astype(dtype, copy=False) def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None): """ Return numbers spaced evenly on a log scale. In linear space, the sequence starts at ``base ** start`` (`base` to the power of `start`) and ends with ``base ** stop`` (see `endpoint` below). Parameters ---------- start : float ``base ** start`` is the starting value of the sequence. stop : float ``base ** stop`` is the final value of the sequence, unless `endpoint` is False. In that case, ``num + 1`` values are spaced over the interval in log-space, of which all but the last (a sequence of length ``num``) are returned. num : integer, optional Number of samples to generate. Default is 50. endpoint : boolean, optional If true, `stop` is the last sample. Otherwise, it is not included. Default is True. base : float, optional The base of the log space. The step size between the elements in ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. Default is 10.0. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. Returns ------- samples : ndarray `num` samples, equally spaced on a log scale. See Also -------- arange : Similar to linspace, with the step size specified instead of the number of samples. Note that, when used with a float endpoint, the endpoint may or may not be included. linspace : Similar to logspace, but with the samples uniformly distributed in linear space, instead of log space. Notes ----- Logspace is equivalent to the code >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) ... # doctest: +SKIP >>> power(base, y).astype(dtype) ... # doctest: +SKIP Examples -------- >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.443469 , 464.15888336, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) array([ 100. , 177.827941 , 316.22776602, 562.34132519]) >>> np.logspace(2.0, 3.0, num=4, base=2.0) array([ 4. , 5.0396842 , 6.34960421, 8. ]) Graphical illustration: >>> import matplotlib.pyplot as plt >>> N = 10 >>> x1 = np.logspace(0.1, 1, N, endpoint=True) >>> x2 = np.logspace(0.1, 1, N, endpoint=False) >>> y = np.zeros(N) >>> plt.plot(x1, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(x2, y + 0.5, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.ylim([-0.5, 1]) (-0.5, 1) >>> plt.show() """ y = linspace(start, stop, num=num, endpoint=endpoint) if dtype is None: return _nx.power(base, y) return _nx.power(base, y).astype(dtype)
bsd-3-clause
LLNL/spack
var/spack/repos/builtin/packages/py-atropos/package.py
5
1111
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyAtropos(PythonPackage): """Atropos is tool for specific, sensitive, and speedy trimming of NGS reads. It is a fork of the venerable Cutadapt read trimmer.""" homepage = "https://atropos.readthedocs.io" url = "https://pypi.io/packages/source/a/atropos/atropos-1.1.22.tar.gz" git = "https://github.com/jdidion/atropos.git" version('1.1.22', sha256='05e40cb9337421479c692e1154b962fbf811d7939b72c197a024929b7ae88b78') depends_on('python@3.3:', type=('build', 'run')) depends_on('py-setuptools', type=('build', 'run')) depends_on('py-cython@0.25.2:', type='build') depends_on('py-tqdm', type=('build', 'run'), when='+tqdm') depends_on('py-pysam', type=('build', 'run'), when='+pysam') variant('tqdm', default=False, description='Enable progress bar') variant('pysam', default=False, description='Enable bam file parsing')
lgpl-2.1
stefanw/froide
froide/foirequest/validators.py
1
1237
from django.utils.encoding import force_str from django.core.exceptions import ValidationError from django.utils.translation import gettext_lazy as _ import magic from .models.attachment import POSTAL_CONTENT_TYPES def get_content_type(scan): scan.seek(0) content_type = magic.from_buffer(scan.read(1024), mime=True) content_type = force_str(content_type) scan.seek(0) return content_type def validate_upload_document(scan): content_type = get_content_type(scan) # FIXME: move this out of the validator if content_type: scan.content_type = content_type validate_postal_content_type(content_type) def validate_postal_content_type(content_type): if content_type not in POSTAL_CONTENT_TYPES: raise ValidationError( _('The scanned letter must be either PDF, JPG or PNG,' ' but was detected as %(content_type)s!'), params={ 'content_type': content_type } ) def clean_reference(value): if not value: return '' try: kind, value = value.split(':', 1) except ValueError: return '' try: return '%s:%s' % (kind, value) except ValueError: return ''
mit
Rocamadour7/ml_tutorial
02. Regression/main.py
1
1623
from statistics import mean import numpy as np import matplotlib.pyplot as plt from matplotlib import style import random style.use('fivethirtyeight') # xs = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64) # ys = np.array([5, 4, 6, 5, 6, 7], dtype=np.float64) def create_dataset(how_much, variance, step=2, correlation='pos'): val = 1 ys = [] for i in range(how_much): y = val + random.randrange(-variance, variance) ys.append(y) if correlation and correlation == 'pos': val += step elif correlation and correlation == 'neg': val -= step xs = [i for i in range(len(ys))] return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64) def best_fit_slope_and_intercept(xs, ys): m = ((mean(xs) * mean(ys)) - mean(xs * ys)) / (mean(xs) ** 2 - mean(xs ** 2)) b = mean(ys) - m*mean(xs) return m, b def squared_error(ys_original, ys_line): return sum((ys_line - ys_original)**2) def coefficient_of_determination(ys_original, ys_line): y_mean_line = mean(ys_original) squared_error_regr = squared_error(ys_original, ys_line) squared_error_y_mean = squared_error(ys_original, y_mean_line) return 1 - (squared_error_regr / squared_error_y_mean) xs, ys = create_dataset(40, 10, 2, correlation='pos') m, b = best_fit_slope_and_intercept(xs, ys) regression_line = [m * x + b for x in xs] predict_x = 8 predict_y = m * predict_x + b r_squared = coefficient_of_determination(ys, regression_line) print(r_squared) plt.scatter(xs, ys) plt.scatter(predict_x, predict_y) plt.plot(xs, regression_line) plt.show()
mit
joeyjojo/django_offline
src/django/contrib/formtools/tests/wizard/wizardtests/forms.py
313
2203
import os import tempfile from django import forms from django.contrib.auth.models import User from django.core.files.storage import FileSystemStorage from django.forms.formsets import formset_factory from django.forms.models import modelformset_factory from django.http import HttpResponse from django.template import Template, Context from django.contrib.auth.models import User from django.contrib.formtools.wizard.views import WizardView temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR')) temp_storage = FileSystemStorage(location=temp_storage_location) class Page1(forms.Form): name = forms.CharField(max_length=100) user = forms.ModelChoiceField(queryset=User.objects.all()) thirsty = forms.NullBooleanField() class Page2(forms.Form): address1 = forms.CharField(max_length=100) address2 = forms.CharField(max_length=100) file1 = forms.FileField() class Page3(forms.Form): random_crap = forms.CharField(max_length=100) Page4 = formset_factory(Page3, extra=2) class ContactWizard(WizardView): file_storage = temp_storage def done(self, form_list, **kwargs): c = Context({ 'form_list': [x.cleaned_data for x in form_list], 'all_cleaned_data': self.get_all_cleaned_data(), }) for form in self.form_list.keys(): c[form] = self.get_cleaned_data_for_step(form) c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail') return HttpResponse(Template('').render(c)) def get_context_data(self, form, **kwargs): context = super(ContactWizard, self).get_context_data(form, **kwargs) if self.storage.current_step == 'form2': context.update({'another_var': True}) return context class UserForm(forms.ModelForm): class Meta: model = User fields = ('username', 'email') UserFormSet = modelformset_factory(User, form=UserForm) class SessionContactWizard(ContactWizard): storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage' class CookieContactWizard(ContactWizard): storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
mit
ApplauseAQI/androguard
androguard/decompiler/dad/basic_blocks.py
23
11191
# This file is part of Androguard. # # Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import defaultdict from androguard.decompiler.dad.opcode_ins import INSTRUCTION_SET from androguard.decompiler.dad.instruction import MoveExceptionExpression from androguard.decompiler.dad.node import Node from androguard.decompiler.dad.util import get_type logger = logging.getLogger('dad.basic_blocks') class BasicBlock(Node): def __init__(self, name, block_ins): super(BasicBlock, self).__init__(name) self.ins = block_ins self.ins_range = None self.loc_ins = None self.var_to_declare = set() self.catch_type = None def get_ins(self): return self.ins def get_loc_with_ins(self): if self.loc_ins is None: self.loc_ins = zip(range(*self.ins_range), self.ins) return self.loc_ins def remove_ins(self, loc, ins): self.ins.remove(ins) self.loc_ins.remove((loc, ins)) def add_ins(self, new_ins_list): for new_ins in new_ins_list: self.ins.append(new_ins) def add_variable_declaration(self, variable): self.var_to_declare.add(variable) def number_ins(self, num): last_ins_num = num + len(self.ins) self.ins_range = [num, last_ins_num] self.loc_ins = None return last_ins_num def set_catch_type(self, _type): self.catch_type = _type class StatementBlock(BasicBlock): def __init__(self, name, block_ins): super(StatementBlock, self).__init__(name, block_ins) self.type.is_stmt = True def visit(self, visitor): return visitor.visit_statement_node(self) def __str__(self): return '%d-Statement(%s)' % (self.num, self.name) class ReturnBlock(BasicBlock): def __init__(self, name, block_ins): super(ReturnBlock, self).__init__(name, block_ins) self.type.is_return = True def visit(self, visitor): return visitor.visit_return_node(self) def __str__(self): return '%d-Return(%s)' % (self.num, self.name) class ThrowBlock(BasicBlock): def __init__(self, name, block_ins): super(ThrowBlock, self).__init__(name, block_ins) self.type.is_throw = True def visit(self, visitor): return visitor.visit_throw_node(self) def __str__(self): return '%d-Throw(%s)' % (self.num, self.name) class SwitchBlock(BasicBlock): def __init__(self, name, switch, block_ins): super(SwitchBlock, self).__init__(name, block_ins) self.switch = switch self.cases = [] self.default = None self.node_to_case = defaultdict(list) self.type.is_switch = True def add_case(self, case): self.cases.append(case) def visit(self, visitor): return visitor.visit_switch_node(self) def copy_from(self, node): super(SwitchBlock, self).copy_from(node) self.cases = node.cases[:] self.switch = node.switch[:] def update_attribute_with(self, n_map): super(SwitchBlock, self).update_attribute_with(n_map) self.cases = [n_map.get(n, n) for n in self.cases] for node1, node2 in n_map.iteritems(): if node1 in self.node_to_case: self.node_to_case[node2] = self.node_to_case.pop(node1) def order_cases(self): values = self.switch.get_values() if len(values) < len(self.cases): self.default = self.cases.pop(0) for case, node in zip(values, self.cases): self.node_to_case[node].append(case) def __str__(self): return '%d-Switch(%s)' % (self.num, self.name) class CondBlock(BasicBlock): def __init__(self, name, block_ins): super(CondBlock, self).__init__(name, block_ins) self.true = None self.false = None self.type.is_cond = True def update_attribute_with(self, n_map): super(CondBlock, self).update_attribute_with(n_map) self.true = n_map.get(self.true, self.true) self.false = n_map.get(self.false, self.false) def neg(self): if len(self.ins) != 1: raise RuntimeWarning('Condition should have only 1 instruction !') self.ins[-1].neg() def visit(self, visitor): return visitor.visit_cond_node(self) def visit_cond(self, visitor): if len(self.ins) != 1: raise RuntimeWarning('Condition should have only 1 instruction !') return visitor.visit_ins(self.ins[-1]) def __str__(self): return '%d-If(%s)' % (self.num, self.name) class Condition(object): def __init__(self, cond1, cond2, isand, isnot): self.cond1 = cond1 self.cond2 = cond2 self.isand = isand self.isnot = isnot def neg(self): self.isand = not self.isand self.cond1.neg() self.cond2.neg() def get_ins(self): lins = [] lins.extend(self.cond1.get_ins()) lins.extend(self.cond2.get_ins()) return lins def get_loc_with_ins(self): loc_ins = [] loc_ins.extend(self.cond1.get_loc_with_ins()) loc_ins.extend(self.cond2.get_loc_with_ins()) return loc_ins def visit(self, visitor): return visitor.visit_short_circuit_condition(self.isnot, self.isand, self.cond1, self.cond2) def __str__(self): if self.isnot: ret = '!%s %s %s' else: ret = '%s %s %s' return ret % (self.cond1, ['||', '&&'][self.isand], self.cond2) class ShortCircuitBlock(CondBlock): def __init__(self, name, cond): super(ShortCircuitBlock, self).__init__(name, None) self.cond = cond def get_ins(self): return self.cond.get_ins() def get_loc_with_ins(self): return self.cond.get_loc_with_ins() def neg(self): self.cond.neg() def visit_cond(self, visitor): return self.cond.visit(visitor) def __str__(self): return '%d-SC(%s)' % (self.num, self.cond) class LoopBlock(CondBlock): def __init__(self, name, cond): super(LoopBlock, self).__init__(name, None) self.cond = cond def get_ins(self): return self.cond.get_ins() def neg(self): self.cond.neg() def get_loc_with_ins(self): return self.cond.get_loc_with_ins() def visit(self, visitor): return visitor.visit_loop_node(self) def visit_cond(self, visitor): return self.cond.visit_cond(visitor) def update_attribute_with(self, n_map): super(LoopBlock, self).update_attribute_with(n_map) self.cond.update_attribute_with(n_map) def __str__(self): if self.looptype.is_pretest: if self.false in self.loop_nodes: return '%d-While(!%s)[%s]' % (self.num, self.name, self.cond) return '%d-While(%s)[%s]' % (self.num, self.name, self.cond) elif self.looptype.is_posttest: return '%d-DoWhile(%s)[%s]' % (self.num, self.name, self.cond) elif self.looptype.is_endless: return '%d-WhileTrue(%s)[%s]' % (self.num, self.name, self.cond) return '%d-WhileNoType(%s)' % (self.num, self.name) class TryBlock(BasicBlock): def __init__(self, node): super(TryBlock, self).__init__('Try-%s' % node.name, None) self.try_start = node self.catch = [] # FIXME: @property def num(self): return self.try_start.num @num.setter def num(self, value): pass def add_catch_node(self, node): self.catch.append(node) def visit(self, visitor): visitor.visit_try_node(self) def __str__(self): return 'Try(%s)[%s]' % (self.name, self.catch) class CatchBlock(BasicBlock): def __init__(self, node): first_ins = node.ins[0] self.exception_ins = None if isinstance(first_ins, MoveExceptionExpression): self.exception_ins = first_ins node.ins.pop(0) super(CatchBlock, self).__init__('Catch-%s' % node.name, node.ins) self.catch_start = node self.catch_type = node.catch_type def visit(self, visitor): visitor.visit_catch_node(self) def visit_exception(self, visitor): if self.exception_ins: visitor.visit_ins(self.exception_ins) else: visitor.write(get_type(self.catch_type)) def __str__(self): return 'Catch(%s)' % self.name def build_node_from_block(block, vmap, gen_ret, exception_type=None): ins, lins = None, [] idx = block.get_start() for ins in block.get_instructions(): opcode = ins.get_op_value() if opcode == -1: # FIXME? or opcode in (0x0300, 0x0200, 0x0100): idx += ins.get_length() continue try: _ins = INSTRUCTION_SET[opcode] except IndexError: logger.error('Unknown instruction : %s.', ins.get_name().lower()) _ins = INSTRUCTION_SET[0] # fill-array-data if opcode == 0x26: fillarray = block.get_special_ins(idx) lins.append(_ins(ins, vmap, fillarray)) # invoke-kind[/range] elif (0x6e <= opcode <= 0x72 or 0x74 <= opcode <= 0x78): lins.append(_ins(ins, vmap, gen_ret)) # filled-new-array[/range] elif 0x24 <= opcode <= 0x25: lins.append(_ins(ins, vmap, gen_ret.new())) # move-result* elif 0xa <= opcode <= 0xc: lins.append(_ins(ins, vmap, gen_ret.last())) # move-exception elif opcode == 0xd: lins.append(_ins(ins, vmap, exception_type)) # monitor-{enter,exit} elif 0x1d <= opcode <= 0x1e: idx += ins.get_length() continue else: lins.append(_ins(ins, vmap)) idx += ins.get_length() name = block.get_name() # return* if 0xe <= opcode <= 0x11: node = ReturnBlock(name, lins) # {packed,sparse}-switch elif 0x2b <= opcode <= 0x2c: idx -= ins.get_length() values = block.get_special_ins(idx) node = SwitchBlock(name, values, lins) # if-test[z] elif 0x32 <= opcode <= 0x3d: node = CondBlock(name, lins) node.off_last_ins = ins.get_ref_off() # throw elif opcode == 0x27: node = ThrowBlock(name, lins) else: # goto* if 0x28 <= opcode <= 0x2a: lins.pop() node = StatementBlock(name, lins) return node
apache-2.0
frictionlessdata/tabulator-py
tests/schemes/test_remote.py
3
1872
# -*- coding: utf-8 -*- from __future__ import division from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import pytest from tabulator import Stream from tabulator.loaders.remote import RemoteLoader from tabulator.exceptions import HTTPError from time import time BASE_URL = 'https://raw.githubusercontent.com/frictionlessdata/tabulator-py/master/%s' # Read @pytest.mark.remote def test_stream_https(): with Stream(BASE_URL % 'data/table.csv') as stream: assert stream.read() == [['id', 'name'], ['1', 'english'], ['2', '中国人']] @pytest.mark.remote def test_stream_https_latin1(): # Github returns wrong encoding `utf-8` with Stream(BASE_URL % 'data/special/latin1.csv') as stream: assert stream.read() # Internal @pytest.mark.remote def test_loader_remote_t(): loader = RemoteLoader() chars = loader.load(BASE_URL % 'data/table.csv', encoding='utf-8') assert chars.read() == 'id,name\n1,english\n2,中国人\n' @pytest.mark.remote def test_loader_remote_b(): spec = '中国人'.encode('utf-8') loader = RemoteLoader() chars = loader.load(BASE_URL % 'data/table.csv', mode='b', encoding='utf-8') assert chars.read() == b'id,name\n1,english\n2,' + spec + b'\n' @pytest.mark.skip @pytest.mark.remote def test_loader_no_timeout(): loader = RemoteLoader() t = time() chars = loader.load('https://httpstat.us/200?sleep=5000', mode='b', encoding='utf-8') assert time() - t > 5 assert chars.read() == b'200 OK' t = time() @pytest.mark.remote def test_loader_has_timeout(): loader = RemoteLoader(http_timeout=1) t = time() with pytest.raises(HTTPError): chars = loader.load('https://httpstat.us/200?sleep=5000', mode='b', encoding='utf-8') assert time() - t < 5 assert time() - t > 1
mit
varunagrawal/azure-services
varunagrawal/VarunWeb/env/Lib/site-packages/django/db/backends/mysql/base.py
28
22109
""" MySQL database backend for Django. Requires MySQLdb: http://sourceforge.net/projects/mysql-python """ from __future__ import unicode_literals import datetime import re import sys import warnings try: import MySQLdb as Database except ImportError as e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e) from django.utils.functional import cached_property # We want version (1, 2, 1, 'final', 2) or later. We can't just use # lexicographic ordering in this check because then (1, 2, 1, 'gamma') # inadvertently passes the version test. version = Database.version_info if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and (len(version) < 5 or version[3] != 'final' or version[4] < 2))): from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__) from MySQLdb.converters import conversions, Thing2Literal from MySQLdb.constants import FIELD_TYPE, CLIENT try: import pytz except ImportError: pytz = None from django.conf import settings from django.db import utils from django.db.backends import * from django.db.backends.mysql.client import DatabaseClient from django.db.backends.mysql.creation import DatabaseCreation from django.db.backends.mysql.introspection import DatabaseIntrospection from django.db.backends.mysql.validation import DatabaseValidation from django.utils.encoding import force_str, force_text from django.utils.safestring import SafeBytes, SafeText from django.utils import six from django.utils import timezone # Raise exceptions for database warnings if DEBUG is on if settings.DEBUG: warnings.filterwarnings("error", category=Database.Warning) DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError # It's impossible to import datetime_or_None directly from MySQLdb.times parse_datetime = conversions[FIELD_TYPE.DATETIME] def parse_datetime_with_timezone_support(value): dt = parse_datetime(value) # Confirm that dt is naive before overwriting its tzinfo. if dt is not None and settings.USE_TZ and timezone.is_naive(dt): dt = dt.replace(tzinfo=timezone.utc) return dt def adapt_datetime_with_timezone_support(value, conv): # Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL. if settings.USE_TZ: if timezone.is_naive(value): warnings.warn("MySQL received a naive datetime (%s)" " while time zone support is active." % value, RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) value = value.astimezone(timezone.utc).replace(tzinfo=None) return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv) # MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like # timedelta in terms of actual behavior as they are signed and include days -- # and Django expects time, so we still need to override that. We also need to # add special handling for SafeText and SafeBytes as MySQLdb's type # checking is too tight to catch those (see Django ticket #6052). # Finally, MySQLdb always returns naive datetime objects. However, when # timezone support is active, Django expects timezone-aware datetime objects. django_conversions = conversions.copy() django_conversions.update({ FIELD_TYPE.TIME: util.typecast_time, FIELD_TYPE.DECIMAL: util.typecast_decimal, FIELD_TYPE.NEWDECIMAL: util.typecast_decimal, FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support, datetime.datetime: adapt_datetime_with_timezone_support, }) # This should match the numerical portion of the version numbers (we can treat # versions like 5.0.24 and 5.0.24a as the same). Based on the list of version # at http://dev.mysql.com/doc/refman/4.1/en/news.html and # http://dev.mysql.com/doc/refman/5.0/en/news.html . server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})') # MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on # MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the # point is to raise Warnings as exceptions, this can be done with the Python # warning module, and this is setup when the connection is created, and the # standard util.CursorDebugWrapper can be used. Also, using sql_mode # TRADITIONAL will automatically cause most warnings to be treated as errors. class CursorWrapper(object): """ A thin wrapper around MySQLdb's normal cursor class so that we can catch particular exception instances and reraise them with the right types. Implemented as a wrapper, rather than a subclass, so that we aren't stuck to the particular underlying representation returned by Connection.cursor(). """ codes_for_integrityerror = (1048,) def __init__(self, cursor): self.cursor = cursor def execute(self, query, args=None): try: # args is None means no string interpolation return self.cursor.execute(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise def executemany(self, query, args): try: return self.cursor.executemany(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor) class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () update_can_self_select = False allows_group_by_pk = True related_fields_match_type = True allow_sliced_subqueries = False has_bulk_insert = True has_select_for_update = True has_select_for_update_nowait = False supports_forward_references = False supports_long_model_names = False supports_microsecond_precision = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_primary_key_0 = False uses_savepoints = True atomic_transactions = False def __init__(self, connection): super(DatabaseFeatures, self).__init__(connection) @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" cursor = self.connection.cursor() cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)') # This command is MySQL specific; the second column # will tell you the default table type of the created # table. Since all Django's test tables will have the same # table type, that's enough to evaluate the feature. cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'") result = cursor.fetchone() cursor.execute('DROP TABLE INTROSPECT_TEST') return result[1] @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def has_zoneinfo_database(self): # MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects # abbreviations (eg. EAT). When pytz isn't installed and the current # time zone is LocalTimezone (the only sensible value in this # context), the current time zone name will be an abbreviation. As a # consequence, MySQL cannot perform time zone conversions reliably. if pytz is None: return False # Test if the time zone definitions are installed. cursor = self.connection.cursor() cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1") return cursor.fetchone() is not None class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.mysql.compiler" def date_extract_sql(self, lookup_type, field_name): # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. # Note: WEEKDAY() returns 0-6, Monday=0. return "DAYOFWEEK(%s)" % field_name else: return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql def datetime_extract_sql(self, lookup_type, field_name, tzname): if settings.USE_TZ: field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name params = [tzname] else: params = [] # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. # Note: WEEKDAY() returns 0-6, Monday=0. sql = "DAYOFWEEK(%s)" % field_name else: sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) return sql, params def datetime_trunc_sql(self, lookup_type, field_name, tzname): if settings.USE_TZ: field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name params = [tzname] else: params = [] fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql, params def date_interval_sql(self, sql, connector, timedelta): return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector, timedelta.days, timedelta.seconds, timedelta.microseconds) def drop_foreignkey_sql(self): return "DROP FOREIGN KEY" def force_no_ordering(self): """ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on. """ return ["NULL"] def fulltext_search_sql(self, field_name): return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_last_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. return force_text(getattr(cursor, '_last_executed', None), errors='replace') def no_limit_value(self): # 2**64 - 1, as recommended by the MySQL documentation return 18446744073709551615 def quote_name(self, name): if name.startswith("`") and name.endswith("`"): return name # Quoting once is enough. return "`%s`" % name def random_function_sql(self): return 'RAND()' def sql_flush(self, style, tables, sequences, allow_cascade=False): # NB: The generated SQL below is specific to MySQL # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements # to clear all tables of all data if tables: sql = ['SET FOREIGN_KEY_CHECKS = 0;'] for table in tables: sql.append('%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table)), )) sql.append('SET FOREIGN_KEY_CHECKS = 1;') sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # Truncate already resets the AUTO_INCREMENT field from # MySQL version 5.0.13 onwards. Refs #16961. if self.connection.mysql_version < (5, 0, 13): return ["%s %s %s %s %s;" % \ (style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_TABLE(self.quote_name(sequence['table'])), style.SQL_KEYWORD('AUTO_INCREMENT'), style.SQL_FIELD('= 1'), ) for sequence in sequences] else: return [] def validate_autopk_value(self, value): # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653. if value == 0: raise ValueError('The database backend does not accept 0 as a ' 'value for AutoField.') return value def value_to_db_datetime(self, value): if value is None: return None # MySQL doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = value.astimezone(timezone.utc).replace(tzinfo=None) else: raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") # MySQL doesn't support microseconds return six.text_type(value.replace(microsecond=0)) def value_to_db_time(self, value): if value is None: return None # MySQL doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("MySQL backend does not support timezone-aware times.") # MySQL doesn't support microseconds return six.text_type(value.replace(microsecond=0)) def year_lookup_bounds_for_datetime_field(self, value): # Again, no microseconds first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value) return [first.replace(microsecond=0), second.replace(microsecond=0)] def max_name_length(self): return 64 def bulk_insert_sql(self, fields, num_values): items_sql = "(%s)" % ", ".join(["%s"] * len(fields)) return "VALUES " + ", ".join([items_sql] * num_values) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'mysql' operators = { 'exact': '= %s', 'iexact': 'LIKE %s', 'contains': 'LIKE BINARY %s', 'icontains': 'LIKE %s', 'regex': 'REGEXP BINARY %s', 'iregex': 'REGEXP %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE BINARY %s', 'endswith': 'LIKE BINARY %s', 'istartswith': 'LIKE %s', 'iendswith': 'LIKE %s', } Database = Database def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = DatabaseFeatures(self) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = DatabaseValidation(self) def get_connection_params(self): kwargs = { 'conv': django_conversions, 'charset': 'utf8', } if six.PY2: kwargs['use_unicode'] = True settings_dict = self.settings_dict if settings_dict['USER']: kwargs['user'] = settings_dict['USER'] if settings_dict['NAME']: kwargs['db'] = settings_dict['NAME'] if settings_dict['PASSWORD']: kwargs['passwd'] = force_str(settings_dict['PASSWORD']) if settings_dict['HOST'].startswith('/'): kwargs['unix_socket'] = settings_dict['HOST'] elif settings_dict['HOST']: kwargs['host'] = settings_dict['HOST'] if settings_dict['PORT']: kwargs['port'] = int(settings_dict['PORT']) # We need the number of potentially affected rows after an # "UPDATE", not the number of changed rows. kwargs['client_flag'] = CLIENT.FOUND_ROWS kwargs.update(settings_dict['OPTIONS']) return kwargs def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) conn.encoders[SafeText] = conn.encoders[six.text_type] conn.encoders[SafeBytes] = conn.encoders[bytes] return conn def init_connection_state(self): cursor = self.connection.cursor() # SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column # on a recently-inserted row will return when the field is tested for # NULL. Disabling this value brings this aspect of MySQL in line with # SQL standards. cursor.execute('SET SQL_AUTO_IS_NULL = 0') cursor.close() def create_cursor(self): cursor = self.connection.cursor() return CursorWrapper(cursor) def _rollback(self): try: BaseDatabaseWrapper._rollback(self) except Database.NotSupportedError: pass def _set_autocommit(self, autocommit): self.connection.autocommit(autocommit) def disable_constraint_checking(self): """ Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True, to indicate constraint checks need to be re-enabled. """ self.cursor().execute('SET foreign_key_checks=0') return True def enable_constraint_checking(self): """ Re-enable foreign key checks after they have been disabled. """ # Override needs_rollback in case constraint_checks_disabled is # nested inside transaction.atomic. self.needs_rollback, needs_rollback = False, self.needs_rollback try: self.cursor().execute('SET foreign_key_checks=1') finally: self.needs_rollback = needs_rollback def check_constraints(self, table_names=None): """ Checks each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides detailed information about the invalid reference in the error message. Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE") """ cursor = self.cursor() if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute(""" SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL""" % (primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name)) for bad_row in cursor.fetchall(): raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid " "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s." % (table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name)) def is_usable(self): try: self.connection.ping() except DatabaseError: return False else: return True @cached_property def mysql_version(self): with self.temporary_connection(): server_info = self.connection.get_server_info() match = server_version_re.match(server_info) if not match: raise Exception('Unable to determine MySQL version from version string %r' % server_info) return tuple([int(x) for x in match.groups()])
gpl-2.0
hocinebendou/bika.gsoc
bika/lims/exportimport/instruments/agilent/masshunter/quantitative.py
5
23863
""" Agilent's 'Masshunter Quant' """ from DateTime import DateTime from Products.Archetypes.event import ObjectInitializedEvent from Products.CMFCore.utils import getToolByName from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from bika.lims import bikaMessageFactory as _ from bika.lims.utils import t from bika.lims import logger from bika.lims.browser import BrowserView from bika.lims.idserver import renameAfterCreation from bika.lims.utils import changeWorkflowState from bika.lims.utils import tmpID from cStringIO import StringIO from datetime import datetime from operator import itemgetter from plone.i18n.normalizer.interfaces import IIDNormalizer from zope.component import getUtility import csv import json import plone import zope import zope.event from bika.lims.exportimport.instruments.resultsimport import InstrumentCSVResultsFileParser,\ AnalysisResultsImporter import traceback title = "Agilent - Masshunter Quantitative" def Import(context, request): """ Read Agilent's Masshunter Quant analysis results """ infile = request.form['amhq_file'] fileformat = request.form['amhq_format'] artoapply = request.form['amhq_artoapply'] override = request.form['amhq_override'] sample = request.form.get('amhq_sample', 'requestid') instrument = request.form.get('amhq_instrument', None) errors = [] logs = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) elif fileformat == 'csv': parser = MasshunterQuantCSVParser(infile) else: errors.append(t(_("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] sam = ['getRequestID', 'getSampleID', 'getClientSampleID'] if sample =='requestid': sam = ['getRequestID'] if sample == 'sampleid': sam = ['getSampleID'] elif sample == 'clientsid': sam = ['getClientSampleID'] elif sample == 'sample_clientsid': sam = ['getSampleID', 'getClientSampleID'] importer = MasshunterQuantImporter(parser=parser, context=context, idsearchcriteria=sam, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results) class MasshunterQuantCSVParser(InstrumentCSVResultsFileParser): HEADERKEY_BATCHINFO = 'Batch Info' HEADERKEY_BATCHDATAPATH = 'Batch Data Path' HEADERKEY_ANALYSISTIME = 'Analysis Time' HEADERKEY_ANALYSTNAME = 'Analyst Name' HEADERKEY_REPORTTIME = 'Report Time' HEADERKEY_REPORTERNAME = 'Reporter Name' HEADERKEY_LASTCALIBRATION = 'Last Calib Update' HEADERKEY_BATCHSTATE = 'Batch State' SEQUENCETABLE_KEY = 'Sequence Table' SEQUENCETABLE_HEADER_DATAFILE = 'Data File' SEQUENCETABLE_HEADER_SAMPLENAME = 'Sample Name' SEQUENCETABLE_PRERUN = 'prerunrespchk.d' SEQUENCETABLE_MIDRUN = 'mid_respchk.d' SEQUENCETABLE_POSTRUN = 'post_respchk.d' SEQUENCETABLE_NUMERICHEADERS = ('Inj Vol',) QUANTITATIONRESULTS_KEY = 'Quantification Results' QUANTITATIONRESULTS_TARGETCOMPOUND = 'Target Compound' QUANTITATIONRESULTS_HEADER_DATAFILE = 'Data File' QUANTITATIONRESULTS_PRERUN = 'prerunrespchk.d' QUANTITATIONRESULTS_MIDRUN = 'mid_respchk.d' QUANTITATIONRESULTS_POSTRUN = 'post_respchk.d' QUANTITATIONRESULTS_NUMERICHEADERS = ('Resp', 'ISTD Resp', 'Resp Ratio', 'Final Conc', 'Exp Conc', 'Accuracy') QUANTITATIONRESULTS_COMPOUNDCOLUMN = 'Compound' COMMAS = ',' def __init__(self, csv): InstrumentCSVResultsFileParser.__init__(self, csv) self._end_header = False self._end_sequencetable = False self._sequences = [] self._sequencesheader = [] self._quantitationresultsheader = [] self._numline = 0 def getAttachmentFileType(self): return "Agilent's Masshunter Quant CSV" def _parseline(self, line): if self._end_header == False: return self.parse_headerline(line) elif self._end_sequencetable == False: return self.parse_sequencetableline(line) else: return self.parse_quantitationesultsline(line) def parse_headerline(self, line): """ Parses header lines Header example: Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,, Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,, Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,, Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,, Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,,, """ if self._end_header == True: # Header already processed return 0 if line.startswith(self.SEQUENCETABLE_KEY): self._end_header = True if len(self._header) == 0: self.err("No header found", numline=self._numline) return -1 return 0 splitted = [token.strip() for token in line.split(',')] # Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,, if splitted[0] == self.HEADERKEY_BATCHINFO: if self.HEADERKEY_BATCHINFO in self._header: self.warn("Header Batch Info already found. Discarding", numline=self._numline, line=line) return 0 self._header[self.HEADERKEY_BATCHINFO] = [] for i in range(len(splitted) - 1): if splitted[i + 1]: self._header[self.HEADERKEY_BATCHINFO].append(splitted[i + 1]) # Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,, elif splitted[0] == self.HEADERKEY_BATCHDATAPATH: if self.HEADERKEY_BATCHDATAPATH in self._header: self.warn("Header Batch Data Path already found. Discarding", numline=self._numline, line=line) return 0; if splitted[1]: self._header[self.HEADERKEY_BATCHDATAPATH] = splitted[1] else: self.warn("Batch Data Path not found or empty", numline=self._numline, line=line) # Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,, elif splitted[0] == self.HEADERKEY_ANALYSISTIME: if splitted[1]: try: d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p") self._header[self.HEADERKEY_ANALYSISTIME] = d except ValueError: self.err("Invalid Analysis Time format", numline=self._numline, line=line) else: self.warn("Analysis Time not found or empty", numline=self._numline, line=line) if splitted[2] and splitted[2] == self.HEADERKEY_ANALYSTNAME: if splitted[3]: self._header[self.HEADERKEY_ANALYSTNAME] = splitted[3] else: self.warn("Analyst Name not found or empty", numline=self._numline, line=line) else: self.err("Analyst Name not found", numline=self._numline, line=line) # Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,, elif splitted[0] == self.HEADERKEY_REPORTTIME: if splitted[1]: try: d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p") self._header[self.HEADERKEY_REPORTTIME] = d except ValueError: self.err("Invalid Report Time format", numline=self._numline, line=line) else: self.warn("Report time not found or empty", numline=self._numline, line=line) if splitted[2] and splitted[2] == self.HEADERKEY_REPORTERNAME: if splitted[3]: self._header[self.HEADERKEY_REPORTERNAME] = splitted[3] else: self.warn("Reporter Name not found or empty", numline=self._numline, line=line) else: self.err("Reporter Name not found", numline=self._numline, line=line) # Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,, elif splitted[0] == self.HEADERKEY_LASTCALIBRATION: if splitted[1]: try: d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p") self._header[self.HEADERKEY_LASTCALIBRATION] = d except ValueError: self.err("Invalid Last Calibration time format", numline=self._numline, line=line) else: self.warn("Last Calibration time not found or empty", numline=self._numline, line=line) if splitted[2] and splitted[2] == self.HEADERKEY_BATCHSTATE: if splitted[3]: self._header[self.HEADERKEY_BATCHSTATE] = splitted[3] else: self.warn("Batch state not found or empty", numline=self._numline, line=line) else: self.err("Batch state not found", numline=self._numline, line=line) return 0 def parse_sequencetableline(self, line): """ Parses sequence table lines Sequence Table example: Sequence Table,,,,,,,,,,,,,,,,, Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,, prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, DSS_Nist_L2.d,DSS_Nist_L2,P1-B2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, DSS_Nist_L3.d,DSS_Nist_L3,P1-C2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, UTAK_DS_L1.d,UTAK_DS_L1,P1-D2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, UTAK_DS_L2.d,UTAK_DS_L2,P1-E2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, UTAK_DS_low.d,UTAK_DS_Low,P1-F2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, FDBS_31.d,FDBS_31,P1-G2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, FDBS_32.d,FDBS_32,P1-H2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, LS_60-r001.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, LS_60-r002.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, LS_61-r001.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, LS_61-r002.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, ,,,,,,,,,,,,,,,,, """ # Sequence Table,,,,,,,,,,,,,,,,, # prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, # mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, # ,,,,,,,,,,,,,,,,, if line.startswith(self.SEQUENCETABLE_KEY) \ or line.startswith(self.SEQUENCETABLE_PRERUN) \ or line.startswith(self.SEQUENCETABLE_MIDRUN) \ or self._end_sequencetable == True: # Nothing to do, continue return 0 # Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,, if line.startswith(self.SEQUENCETABLE_HEADER_DATAFILE): self._sequencesheader = [token.strip() for token in line.split(',') if token.strip()] return 0 # post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, # Quantitation Results,,,,,,,,,,,,,,,,, if line.startswith(self.SEQUENCETABLE_POSTRUN) \ or line.startswith(self.QUANTITATIONRESULTS_KEY) \ or line.startswith(self.COMMAS): self._end_sequencetable = True if len(self._sequences) == 0: self.err("No Sequence Table found", linenum=self._numline) return -1 # Jumps 2 lines: # Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,, # prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, return 2 # DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,, splitted = [token.strip() for token in line.split(',')] sequence = {} for colname in self._sequencesheader: sequence[colname] = '' for i in range(len(splitted)): token = splitted[i] if i < len(self._sequencesheader): colname = self._sequencesheader[i] if token and colname in self.SEQUENCETABLE_NUMERICHEADERS: try: sequence[colname] = float(token) except ValueError: self.warn( "No valid number ${token} in column ${index} (${column_name})", mapping={"token": token, "index": str(i + 1), "column_name": colname}, numline=self._numline, line=line) sequence[colname] = token else: sequence[colname] = token elif token: self.err("Orphan value in column ${index} (${token})", mapping={"index": str(i+1), "token": token}, numline=self._numline, line=line) self._sequences.append(sequence) def parse_quantitationesultsline(self, line): """ Parses quantitation result lines Quantitation results example: Quantitation Results,,,,,,,,,,,,,,,,, Target Compound,25-OH D3+PTAD+MA,,,,,,,,,,,,,,,, Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,, prerunrespchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5816,274638,0.0212,0.9145,,,,,,,,,,, DSS_Nist_L1.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,6103,139562,0.0437,1.6912,,,,,,,,,,, DSS_Nist_L2.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,11339,135726,0.0835,3.0510,,,,,,,,,,, DSS_Nist_L3.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,15871,141710,0.1120,4.0144,,,,,,,,,,, mid_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,4699,242798,0.0194,0.8514,,,,,,,,,,, DSS_Nist_L3-r002.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,15659,129490,0.1209,4.3157,,,,,,,,,,, UTAK_DS_L1-r001.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,29846,132264,0.2257,7.7965,,,,,,,,,,, UTAK_DS_L1-r002.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,28696,141614,0.2026,7.0387,,,,,,,,,,, post_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5022,231748,0.0217,0.9315,,,,,,,,,,, ,,,,,,,,,,,,,,,,, Target Compound,25-OH D2+PTAD+MA,,,,,,,,,,,,,,,, Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,, prerunrespchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6222,274638,0.0227,0.8835,,,,,,,,,,, DSS_Nist_L1.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,1252,139562,0.0090,0.7909,,,,,,,,,,, DSS_Nist_L2.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,3937,135726,0.0290,0.9265,,,,,,,,,,, DSS_Nist_L3.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,826,141710,0.0058,0.7697,,,,,,,,,,, mid_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,7864,242798,0.0324,0.9493,,,,,,,,,,, DSS_Nist_L3-r002.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,853,129490,0.0066,0.7748,,,,,,,,,,, UTAK_DS_L1-r001.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,127496,132264,0.9639,7.1558,,,,,,,,,,, UTAK_DS_L1-r002.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,135738,141614,0.9585,7.1201,,,,,,,,,,, post_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6567,231748,0.0283,0.9219,,,,,,,,,,, ,,,,,,,,,,,,,,,,, """ # Quantitation Results,,,,,,,,,,,,,,,,, # prerunrespchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5816,274638,0.0212,0.9145,,,,,,,,,,, # mid_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,4699,242798,0.0194,0.8514,,,,,,,,,,, # post_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6567,231748,0.0283,0.9219,,,,,,,,,,, # ,,,,,,,,,,,,,,,,, if line.startswith(self.QUANTITATIONRESULTS_KEY) \ or line.startswith(self.QUANTITATIONRESULTS_PRERUN) \ or line.startswith(self.QUANTITATIONRESULTS_MIDRUN) \ or line.startswith(self.QUANTITATIONRESULTS_POSTRUN) \ or line.startswith(self.COMMAS): # Nothing to do, continue return 0 # Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,, if line.startswith(self.QUANTITATIONRESULTS_HEADER_DATAFILE): self._quantitationresultsheader = [token.strip() for token in line.split(',') if token.strip()] return 0 # Target Compound,25-OH D3+PTAD+MA,,,,,,,,,,,,,,,, if line.startswith(self.QUANTITATIONRESULTS_TARGETCOMPOUND): # New set of Quantitation Results splitted = [token.strip() for token in line.split(',')] if not splitted[1]: self.warn("No Target Compound found", numline=self._numline, line=line) return 0 # DSS_Nist_L1.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,1252,139562,0.0090,0.7909,,,,,,,,,,, splitted = [token.strip() for token in line.split(',')] quantitation = {} for colname in self._quantitationresultsheader: quantitation[colname] = '' for i in range(len(splitted)): token = splitted[i] if i < len(self._quantitationresultsheader): colname = self._quantitationresultsheader[i] if token and colname in self.QUANTITATIONRESULTS_NUMERICHEADERS: try: quantitation[colname] = float(token) except ValueError: self.warn( "No valid number ${token} in column ${index} (${column_name})", mapping={"token": token, "index": str(i + 1), "column_name": colname}, numline=self._numline, line=line) quantitation[colname] = token else: quantitation[colname] = token elif token: self.err("Orphan value in column ${index} (${token})", mapping={"index": str(i+1), "token": token}, numline=self._numline, line=line) if self.QUANTITATIONRESULTS_COMPOUNDCOLUMN in quantitation: compound = quantitation[self.QUANTITATIONRESULTS_COMPOUNDCOLUMN] # Look for sequence matches and populate rawdata datafile = quantitation.get(self.QUANTITATIONRESULTS_HEADER_DATAFILE, '') if not datafile: self.err("No Data File found for quantitation result", numline=self._numline, line=line) else: seqs = [sequence for sequence in self._sequences \ if sequence.get('Data File', '') == datafile] if len(seqs) == 0: self.err("No sample found for quantitative result ${data_file}", mapping={"data_file": datafile}, numline=self._numline, line=line) elif len(seqs) > 1: self.err("More than one sequence found for quantitative result: ${data_file}", mapping={"data_file": datafile}, numline=self._numline, line=line) else: objid = seqs[0].get(self.SEQUENCETABLE_HEADER_SAMPLENAME, '') if objid: quantitation['DefaultResult'] = 'Final Conc' quantitation['Remarks'] = _("Autoimport") rows = self.getRawResults().get(objid, []) raw = rows[0] if len(rows) > 0 else {} raw[compound] = quantitation self._addRawResult(objid, raw, True) else: self.err("No valid sequence for ${data_file}", mapping={"data_file": datafile}, numline=self._numline, line=line) else: self.err("Value for column '${column}' not found", mapping={"column": self.QUANTITATIONRESULTS_COMPOUNDCOLUMN}, numline=self._numline, line=line) class MasshunterQuantImporter(AnalysisResultsImporter): def __init__(self, parser, context, idsearchcriteria, override, allowed_ar_states=None, allowed_analysis_states=None, instrument_uid=''): AnalysisResultsImporter.__init__(self, parser, context, idsearchcriteria, override, allowed_ar_states, allowed_analysis_states, instrument_uid)
mit
georgtroska/root
interpreter/llvm/src/utils/lit/lit/formats/base.py
81
3910
from __future__ import absolute_import import os import sys import lit.Test import lit.util class TestFormat(object): pass ### class FileBasedTest(TestFormat): def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): source_path = testSuite.getSourcePath(path_in_suite) for filename in os.listdir(source_path): # Ignore dot files and excluded tests. if (filename.startswith('.') or filename in localConfig.excludes): continue filepath = os.path.join(source_path, filename) if not os.path.isdir(filepath): base,ext = os.path.splitext(filename) if ext in localConfig.suffixes: yield lit.Test.Test(testSuite, path_in_suite + (filename,), localConfig) ### import re import tempfile class OneCommandPerFileTest(TestFormat): # FIXME: Refactor into generic test for running some command on a directory # of inputs. def __init__(self, command, dir, recursive=False, pattern=".*", useTempInput=False): if isinstance(command, str): self.command = [command] else: self.command = list(command) if dir is not None: dir = str(dir) self.dir = dir self.recursive = bool(recursive) self.pattern = re.compile(pattern) self.useTempInput = useTempInput def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): dir = self.dir if dir is None: dir = testSuite.getSourcePath(path_in_suite) for dirname,subdirs,filenames in os.walk(dir): if not self.recursive: subdirs[:] = [] subdirs[:] = [d for d in subdirs if (d != '.svn' and d not in localConfig.excludes)] for filename in filenames: if (filename.startswith('.') or not self.pattern.match(filename) or filename in localConfig.excludes): continue path = os.path.join(dirname,filename) suffix = path[len(dir):] if suffix.startswith(os.sep): suffix = suffix[1:] test = lit.Test.Test( testSuite, path_in_suite + tuple(suffix.split(os.sep)), localConfig) # FIXME: Hack? test.source_path = path yield test def createTempInput(self, tmp, test): abstract def execute(self, test, litConfig): if test.config.unsupported: return (lit.Test.UNSUPPORTED, 'Test is unsupported') cmd = list(self.command) # If using temp input, create a temporary file and hand it to the # subclass. if self.useTempInput: tmp = tempfile.NamedTemporaryFile(suffix='.cpp') self.createTempInput(tmp, test) tmp.flush() cmd.append(tmp.name) elif hasattr(test, 'source_path'): cmd.append(test.source_path) else: cmd.append(test.getSourcePath()) out, err, exitCode = lit.util.executeCommand(cmd) diags = out + err if not exitCode and not diags.strip(): return lit.Test.PASS,'' # Try to include some useful information. report = """Command: %s\n""" % ' '.join(["'%s'" % a for a in cmd]) if self.useTempInput: report += """Temporary File: %s\n""" % tmp.name report += "--\n%s--\n""" % open(tmp.name).read() report += """Output:\n--\n%s--""" % diags return lit.Test.FAIL, report
lgpl-2.1
FuegoFro/mongo-web-shell
tests/__init__.py
7
1316
# Copyright 2013 10gen Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import defaultTestLoader, TestCase from mongows import create_app app = create_app() app.testing = True app.config['QUOTA_NUM_COLLECTIONS'] = None class MongoWSTestCase(TestCase): """A generic test case for the mongows package.""" def setUp(self): self.real_app = app self.app = app.test_client() def tearDown(self): pass def load_tests(loader, tests, pattern): """Returns the test modules for the mongows package. The expected output of this function is defined by the unittest module's load_tests protocol. unittest.main() will runs tests on the modules returned by this function. """ return defaultTestLoader.discover(__name__)
apache-2.0
Dee-UK/D33_KK_RK3066
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
Datera/cinder
cinder/tests/unit/backup/test_backup.py
1
98639
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup code.""" import copy import ddt import os import uuid from eventlet import tpool import mock from os_brick.initiator.connectors import fake as fake_connectors from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils from oslo_utils import timeutils import cinder from cinder.backup import api from cinder.backup import manager from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder import test from cinder.tests import fake_driver from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils from cinder.volume import rpcapi as volume_rpcapi CONF = cfg.CONF class FakeBackupException(Exception): pass class BaseBackupTest(test.TestCase): def setUp(self): super(BaseBackupTest, self).setUp() self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.backup_mgr.is_initialized = True self.ctxt = context.get_admin_context() paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', 'cinder.volume.rpcapi.VolumeAPI.delete_volume', 'cinder.volume.rpcapi.VolumeAPI.detach_volume', 'cinder.volume.rpcapi.VolumeAPI.' 'secure_file_operations_enabled'] self.volume_patches = {} self.volume_mocks = {} for path in paths: name = path.split('.')[-1] self.volume_patches[name] = mock.patch(path) self.volume_mocks[name] = self.volume_patches[name].start() self.addCleanup(self.volume_patches[name].stop) def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=fields.BackupStatus.CREATING, size=1, object_count=0, project_id=str(uuid.uuid4()), service=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, metadata=None, parent_id=None, encryption_key_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = '1' kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot_id'] = snapshot_id kwargs['parent_id'] = parent_id kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id kwargs['metadata'] = metadata or {} kwargs['encryption_key_id'] = encryption_key_id backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1, host='testhost', encryption_key_id=None): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = str(uuid.uuid4()) vol['project_id'] = str(uuid.uuid4()) vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = fields.VolumeAttachStatus.DETACHED vol['availability_zone'] = '1' vol['previous_status'] = previous_status vol['encryption_key_id'] = encryption_key_id volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def _create_snapshot_db_entry(self, display_name='test_snapshot', display_description='test snapshot', status=fields.SnapshotStatus.AVAILABLE, size=1, volume_id=str(uuid.uuid4()), provider_location=None): """Create a snapshot entry in the DB. Return the entry ID. """ kwargs = {} kwargs['size'] = size kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = str(uuid.uuid4()) kwargs['status'] = status kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['volume_id'] = volume_id kwargs['cgsnapshot_id'] = None kwargs['volume_size'] = size kwargs['metadata'] = {} kwargs['provider_location'] = provider_location snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs) snapshot_obj.create() return snapshot_obj def _create_volume_attach(self, volume_id): values = {'volume_id': volume_id, 'attach_status': fields.VolumeAttachStatus.ATTACHED, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0') def _create_exported_record_entry(self, vol_size=1, exported_id=None): """Create backup metadata export entry.""" vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) if exported_id is not None: backup.id = exported_id export = self.backup_mgr.export_record(self.ctxt, backup) return export def _create_export_record_db_entry(self, volume_id=str(uuid.uuid4()), status=fields.BackupStatus.CREATING, project_id=str(uuid.uuid4()), backup_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['status'] = status if backup_id: kwargs['id'] = backup_id backup = objects.BackupImport(context=self.ctxt, **kwargs) backup.create() return backup @ddt.ddt class BackupTestCase(BaseBackupTest): """Test Case for backups.""" @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'set_initialized') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'do_setup') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'check_for_setup_error') @mock.patch('cinder.context.get_admin_context') def test_init_host(self, mock_get_admin_context, mock_check, mock_setup, mock_set_initialized): """Test stuck volumes and backups. Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ def get_admin_context(): return self.ctxt self.override_config('backup_service_inithost_offload', False) self.override_config('periodic_interval', 0) vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) vol3_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) vol4_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = fields.SnapshotStatus.AVAILABLE temp_snap.save() backup1 = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol2_id) backup3 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol3_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol4_id, temp_volume_id=temp_vol_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol5_id, temp_snapshot_id=temp_snap.id) mock_get_admin_context.side_effect = get_admin_context self.volume = importutils.import_object(CONF.volume_manager) self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual('available', vol1['status']) vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual('error_restoring', vol2['status']) vol3 = db.volume_get(self.ctxt, vol3_id) self.assertEqual('available', vol3['status']) vol4 = db.volume_get(self.ctxt, vol4_id) self.assertEqual('available', vol4['status']) vol5 = db.volume_get(self.ctxt, vol5_id) self.assertEqual('available', vol5['status']) backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(fields.BackupStatus.ERROR, backup1['status']) backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status']) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id) temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id) self.volume_mocks['delete_volume'].assert_called_once_with( self.ctxt, temp_vol) self.assertTrue(self.volume_mocks['detach_volume'].called) @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_with_service_inithost_offload(self, mock_add_threadpool, mock_get_all_by_host): vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) backup1 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol1_id) vol2_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol2_id) mock_get_all_by_host.return_value = [backup1, backup2] self.backup_mgr.init_host() calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] mock_add_threadpool.assert_has_calls(calls, any_order=True) # 3 calls because 1 is always made to handle encryption key migration. self.assertEqual(3, mock_add_threadpool.call_count) @mock.patch('cinder.keymgr.migration.migrate_fixed_key') @mock.patch('cinder.objects.BackupList.get_all_by_host') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_key_migration(self, mock_add_threadpool, mock_get_all_by_host, mock_migrate_fixed_key): self.backup_mgr.init_host() mock_add_threadpool.assert_called_once_with( mock_migrate_fixed_key, backups=mock_get_all_by_host()) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', 'cinder-volume': '1.7'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.2', 'cinder-volume': '1.4'}) def test_reset(self, get_min_obj, get_min_rpc): get_min_obj.return_value = 'liberty' backup_mgr = manager.BackupManager() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual('1.3', backup_rpcapi.client.version_cap) self.assertEqual('1.2', backup_rpcapi.client.serializer._base.version_cap) self.assertEqual('1.7', volume_rpcapi.client.version_cap) self.assertEqual('1.4', volume_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() backup_mgr.reset() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual(get_min_rpc.return_value, backup_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, backup_rpcapi.client.serializer._base.version_cap) self.assertIsNone(backup_rpcapi.client.serializer._base.manifest) self.assertEqual(get_min_rpc.return_value, volume_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, volume_rpcapi.client.serializer._base.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) @ddt.data(True, False) def test_is_working(self, initialized): self.backup_mgr.is_initialized = initialized self.assertEqual(initialized, self.backup_mgr.is_working()) def test_cleanup_incomplete_backup_operations_with_exceptions(self): """Test cleanup resilience in the face of exceptions.""" fake_backup_list = [{'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}] mock_backup_get_by_host = self.mock_object( objects.BackupList, 'get_all_by_host') mock_backup_get_by_host.return_value = fake_backup_list mock_backup_cleanup = self.mock_object( self.backup_mgr, '_cleanup_one_backup') mock_backup_cleanup.side_effect = [Exception] mock_temp_cleanup = self.mock_object( self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') mock_temp_cleanup.side_effect = [Exception] self.assertIsNone( self.backup_mgr._cleanup_incomplete_backup_operations( self.ctxt)) self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count) self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count) def test_cleanup_one_backing_up_volume(self): """Test cleanup_one_volume for volume status 'backing-up'.""" volume_id = self._create_volume_db_entry(status='backing-up', previous_status='available') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('available', volume['status']) def test_cleanup_one_restoring_backup_volume(self): """Test cleanup_one_volume for volume status 'restoring-backup'.""" volume_id = self._create_volume_db_entry(status='restoring-backup') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('error_restoring', volume['status']) def test_cleanup_one_creating_backup(self): """Test cleanup_one_backup for volume status 'creating'.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.ERROR, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('available', volume.status) def test_cleanup_one_restoring_backup(self): """Test cleanup_one_backup for volume status 'restoring'.""" vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('error_restoring', volume.status) def test_cleanup_one_deleting_backup(self): """Test cleanup_one_backup for backup status 'deleting'.""" self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) def test_cleanup_one_deleting_encrypted_backup(self): """Test cleanup of backup status 'deleting' (encrypted).""" self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, encryption_key_id=fake.ENCRYPTION_KEY_ID) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertIsNotNone(backup) self.assertEqual(fields.BackupStatus.ERROR_DELETING, backup.status) def test_detach_all_attachments_handles_exceptions(self): """Test detach_all_attachments with exceptions.""" mock_log = self.mock_object(manager, 'LOG') self.volume_mocks['detach_volume'].side_effect = [Exception] fake_attachments = [ { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, }, { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, } ] fake_volume = { 'id': str(uuid.uuid4()), 'volume_attachment': fake_attachments } self.backup_mgr._detach_all_attachments(self.ctxt, fake_volume) self.assertEqual(len(fake_attachments), mock_log.exception.call_count) @ddt.data(KeyError, exception.VolumeNotFound) def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found( self, err): """Ensure we handle missing volume for a backup.""" mock_volume_get = self.mock_object(db, 'volume_get') mock_volume_get.side_effect = [err] backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) def test_cleanup_temp_snapshot_for_one_backup_not_found(self): """Ensure we handle missing temp snapshot for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry( status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_snapshot_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_snapshot'].called) self.assertIsNone(backup.temp_snapshot_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_cleanup_temp_volume_for_one_backup_not_found(self): """Ensure we handle missing temp volume for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_volume_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_volume'].called) self.assertIsNone(backup.temp_volume_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_create_backup_with_bad_volume_status(self): """Test creating a backup from a volume with a bad status.""" vol_id = self._create_volume_db_entry(status='restoring', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_bad_backup_status(self): """Test creating a backup with a backup with a bad status.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_error(self): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup') mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4())) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('error_backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.backup.manager.BackupManager._run_backup') def test_create_backup_aborted(self, run_backup_mock): """Test error handling when abort occurs during backup creation.""" def my_run_backup(*args, **kwargs): backup.destroy() with backup.as_read_deleted(): original_refresh() run_backup_mock.side_effect = my_run_backup vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) original_refresh = backup.refresh self.backup_mgr.create_backup(self.ctxt, backup) self.assertTrue(run_backup_mock.called) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol.status) self.assertEqual('backing-up', vol['previous_status']) # Make sure we didn't set the backup to available after it was deleted with backup.as_read_deleted(): backup.refresh() self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.backup.manager.BackupManager._run_backup', side_effect=FakeBackupException(str(uuid.uuid4()))) def test_create_backup_with_snapshot_error(self, mock_run_backup): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) snapshot = self._create_snapshot_db_entry(status='backing-up', volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) snapshot.refresh() self.assertEqual('available', snapshot.status) backup.refresh() self.assertEqual(fields.BackupStatus.ERROR, backup.status) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): """Test normal backup creation.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) backup_device_dict = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False, } mock_get_backup_device.return_value = ( objects.BackupDeviceInfo.from_primitive(backup_device_dict, self.ctxt, ['admin_metadata', 'metadata'])) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_attach_device.assert_called_once_with(self.ctxt, vol, properties, False) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, False, force=True, ignore_errors=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id_to_none(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id, parent_id='mock') with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.return_value = ( {'parent_id': None}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertIsNone(backup.parent_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) parent_backup = self._create_backup_db_entry(size=vol_size) with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.return_value = ( {'parent_id': parent_backup.id}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertEqual(parent_backup.id, backup.parent_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_fail_with_excep(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.side_effect = ( FakeBackupException('fake')) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol.status) self.assertEqual('error_backing-up', vol.previous_status) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup.status) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_run_backup_with_dir_device_path(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): backup_service = lambda: None backup_service.backup = mock.Mock( return_value=mock.sentinel.backup_update) self.backup_mgr.get_backup_driver = lambda x: backup_service vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) volume = objects.Volume.get_by_id(self.ctxt, vol_id) # device_path is represented by a directory device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} self.backup_mgr._attach_device = mock.Mock( return_value=attach_info) self.backup_mgr._detach_device = mock.Mock() output = self.backup_mgr._run_backup(self.ctxt, backup, volume) mock_chown.assert_not_called() mock_open.assert_not_called() backup_service.backup.assert_called_once_with( backup, device_path) self.assertEqual(mock.sentinel.backup_update, output) @mock.patch('cinder.backup.manager.BackupManager._run_backup') @ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'), (fields.SnapshotStatus.BACKING_UP, 'in-use'), (fields.SnapshotStatus.AVAILABLE, 'available'), (fields.SnapshotStatus.AVAILABLE, 'in-use')) @ddt.unpack def test_create_backup_with_snapshot(self, snapshot_status, volume_status, mock_run_backup): vol_id = self._create_volume_db_entry(status=volume_status) snapshot = self._create_snapshot_db_entry(volume_id=vol_id, status=snapshot_status) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) if snapshot_status == fields.SnapshotStatus.BACKING_UP: self.backup_mgr.create_backup(self.ctxt, backup) vol = objects.Volume.get_by_id(self.ctxt, vol_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(volume_status, vol.status) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) else: self.assertRaises(exception.InvalidSnapshot, self.backup_mgr.create_backup, self.ctxt, backup) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup_with_temp_snapshot(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): """Test backup in-use volume using temp snapshot.""" self.override_config('backup_use_same_host', True) vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') backup = self._create_backup_db_entry(volume_id=vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_get_backup_device.return_value = ( objects.BackupDeviceInfo.from_primitive({ 'backup_device': snap, 'secure_enabled': False, 'is_snapshot': True, }, self.ctxt, expected_attrs=['metadata'])) attach_info = { 'device': {'path': '/dev/null'}, 'conn': {'data': {}}, 'connector': fake_connectors.FakeConnector(None)} mock_terminate_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'terminate_connection_snapshot') mock_initialize_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'initialize_connection_snapshot') mock_connect_device = self.mock_object( manager.BackupManager, '_connect_device') mock_connect_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_initialize_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_terminate_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('in-use', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot') def test_create_temp_snapshot(self, mock_create_snapshot): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_snapshot.return_value = {'provider_id': 'fake_provider_id'} temp_snap = volume_manager.driver._create_temp_snapshot( self.ctxt, vol) self.assertEqual('available', temp_snap['status']) self.assertEqual('fake_provider_id', temp_snap['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_cloned_volume') def test_create_temp_cloned_volume(self, mock_create_cloned_volume): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_cloned_volume.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_cloned_volume( self.ctxt, vol) self.assertEqual('available', temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_volume_from_snapshot') def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) mock_create_vol_from_snap.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_volume_from_snapshot( self.ctxt, vol, snap) self.assertEqual('available', temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_create_backup_with_notify(self, notify): """Test normal backup creation with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_run_backup') self.backup_mgr.create_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.utils.brick_get_connector_properties') def test_create_backup_encrypted_volume(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): """Test backup of encrypted volume. Test whether the volume's encryption key ID is cloned and saved in the backup. """ vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID2 self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.utils.brick_get_connector_properties') def test_create_backup_encrypted_volume_again(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): """Test backup of encrypted volume. Test when the backup already has a clone of the volume's encryption key ID. """ vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_not_called() def test_restore_backup_with_bad_volume_status(self): """Test error handling. Test error handling when restoring a backup to a volume with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) backup = db.backup_get(self.ctxt, backup.id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_restore_backup_with_bad_backup_status(self): """Test error handling. Test error handling when restoring a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_restore_backup_with_driver_error(self): """Test error handling when an error occurs during backup restore.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertTrue(mock_run_restore.called) def test_restore_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a restore of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_restore_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_conn): """Test normal backup restoration.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'wb') mock_secure_enabled = ( self.volume_mocks['secure_file_operations_enabled']) mock_secure_enabled.return_value = False vol = objects.Volume.get_by_id(self.ctxt, vol_id) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) mock_temporary_chown.assert_called_once_with('/dev/null') mock_get_conn.assert_called_once_with() mock_secure_enabled.assert_called_once_with(self.ctxt, vol) mock_attach_device.assert_called_once_with(self.ctxt, vol, properties) mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_restore_backup_with_notify(self, notify): """Test normal backup restoration with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) self.backup_mgr._run_restore = mock.Mock() self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test restoring a volume from its own backup. In this situation, the volume's encryption key ID shouldn't change. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID1, volume.encryption_key_id) mock_clone_encryption_key.assert_not_called() mock_delete_encryption_key.assert_not_called() @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_new_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test handling of encryption key IDs when retoring to another encrypted volume, i.e. a volume whose key ID is different from the volume originally backed up. - The volume's prior encryption key ID is deleted. - The volume is assigned a fresh clone of the backup's encryption key ID. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 # Mimic the driver's side effect where it updates the volume's # metadata. For backups of encrypted volumes, this will essentially # overwrite the volume's encryption key ID prior to the restore. def restore_side_effect(backup, volume_id, volume_file): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # Backup's encryption key ID should have been cloned mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should not have changed backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_glean_key_id(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test restoring a backup that was created prior to when the encryption key ID is saved in the backup DB. The backup encryption key ID is gleaned from the restored volume. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 # Mimic the driver's side effect where it updates the volume's # metadata. For backups of encrypted volumes, this will essentially # overwrite the volume's encryption key ID prior to the restore. def restore_side_effect(backup, volume_id, volume_file): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # Backup's encryption key ID should have been cloned from # the value restored from the metadata. mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID4) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should have been gleaned from value restored # from the backup's metadata backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID4, backup.encryption_key_id) def test_delete_backup_with_bad_backup_status(self): """Test error handling. Test error handling when deleting a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_error(self): """Test error handling when an error occurs during backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, display_name='fail_on_delete', volume_id=vol_id) self.assertRaises(IOError, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a delete of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_no_service(self): """Test error handling. Test error handling when attempting a delete of a backup with no service defined for that backup, relates to bug #1162908 """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) backup.service = None backup.save() self.backup_mgr.delete_backup(self.ctxt, backup) @ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService', 'cinder.tests.unit.backup.fake_service') def test_delete_backup(self, service): """Test normal backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.volume.utils.delete_encryption_key') def test_delete_backup_of_encrypted_volume(self, mock_delete_encryption_key): """Test deletion of backup of encrypted volume""" vol_id = self._create_volume_db_entry( encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.DELETING, encryption_key_id=fake.UUID2) self.backup_mgr.delete_backup(self.ctxt, backup) mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_delete_backup_with_notify(self, notify): """Test normal backup deletion with notifications.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) def test_list_backup(self): project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) self._create_backup_db_entry() b2 = self._create_backup_db_entry(project_id=project_id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(b2.id, backups[0].id) def test_backup_get_all_by_project_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes'. """ project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry(project_id=project_id) backup = self._create_backup_db_entry(project_id=project_id) db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id) self.assertEqual(2, len(backups)) def test_backup_get_all_by_host_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes' """ backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry() backup = self._create_backup_db_entry() db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') self.assertEqual(2, len(backups)) def test_backup_manager_driver_name(self): """Test mapping between backup services and backup drivers.""" self.override_config('backup_driver', "cinder.backup.services.swift") backup_mgr = \ importutils.import_object(CONF.backup_manager) self.assertEqual('cinder.backup.drivers.swift', backup_mgr.driver_name) def test_export_record_with_bad_service(self): """Test error handling. Test error handling when attempting an export of a backup record with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record_with_bad_backup_status(self): """Test error handling. Test error handling when exporting a backup record with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) @ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService', 'cinder.tests.unit.backup.fake_service') def test_export_record(self, service): """Test normal backup record export.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) export = self.backup_mgr.export_record(self.ctxt, backup) self.assertEqual(service, export['backup_service']) self.assertIn('backup_url', export) def test_import_record_with_verify_not_implemented(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry(vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_wrong_id(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 export = self._create_exported_record_entry(vol_size=vol_size) imported_record = self._create_export_record_db_entry() backup_hosts = [] self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) def test_import_record_with_bad_service(self): """Test error handling. Test error handling when attempting an import of a backup record with a different service to that used to create the backup. """ export = self._create_exported_record_entry() export['backup_service'] = 'cinder.tests.unit.backup.bad_service' imported_record = self._create_export_record_db_entry() # Test the case where the additional hosts list is empty backup_hosts = [] self.assertRaises(exception.ServiceNotFound, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) # Test that the import backup keeps calling other hosts to find a # suitable host for the backup service backup_hosts = ['fake1', 'fake2'] backup_hosts_expect = list(backup_hosts) BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' with mock.patch(BackupAPI_import) as _mock_backup_import: self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) next_host = backup_hosts_expect.pop() _mock_backup_import.assert_called_once_with( self.ctxt, next_host, imported_record, export['backup_service'], export['backup_url'], backup_hosts_expect) def test_import_record_with_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ export = self._create_exported_record_entry() backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_record_import_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'import_record')) imported_record = self._create_export_record_db_entry() backup_hosts = [] with mock.patch(_mock_record_import_class) as _mock_record_import: _mock_record_import.side_effect = FakeBackupException('fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_import.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_not_supported_driver_to_force_delete(self): """Test force delete check method for not supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.ceph') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertFalse(result) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_init_backup_repo_path', return_value=None) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' 'check_for_setup_error', return_value=None) def test_check_support_to_force_delete(self, mock_check_configuration, mock_init_backup_repo_path): """Test force delete check method for supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.nfs') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertTrue(result) def test_backup_has_dependent_backups(self): """Test backup has dependent backups. Test the query of has_dependent_backups in backup object is correct. """ vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertFalse(backup.has_dependent_backups) def test_default_tpool_size(self): """Test we can set custom tpool size.""" tpool._nthreads = 20 self.assertListEqual([], tpool._threads) self.backup_mgr = importutils.import_object(CONF.backup_manager) self.assertEqual(60, tpool._nthreads) self.assertListEqual([], tpool._threads) def test_tpool_size(self): """Test we can set custom tpool size.""" self.assertNotEqual(100, tpool._nthreads) self.assertListEqual([], tpool._threads) self.override_config('backup_native_threads_pool_size', 100) self.backup_mgr = importutils.import_object(CONF.backup_manager) self.assertEqual(100, tpool._nthreads) self.assertListEqual([], tpool._threads) def test_driver_name_startswith_backup_service_name(self): service_name = 'cinder.tests.unit.backup.fake_service' driver_name = 'cinder.tests.unit.backup.fake_service.FakeBackupService' self.override_config('backup_driver', driver_name) vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id, service=service_name) result = self.backup_mgr._is_our_backup(backup) self.assertTrue(result) def test_backup_service_name_startswith_driver_name(self): driver_name = 'cinder.tests.unit.backup.fake_service' service_name = ('cinder.tests.unit.backup.fake_service.' 'FakeBackupService') self.override_config('backup_driver', driver_name) vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id, service=service_name) result = self.backup_mgr._is_our_backup(backup) self.assertTrue(result) class BackupTestCaseWithVerify(BaseBackupTest): """Test Case for backups.""" def setUp(self): self.override_config( "backup_driver", "cinder.tests.unit.backup.fake_service_with_verify") super(BackupTestCaseWithVerify, self).setUp() def test_import_record_with_verify(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver implements verify. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) def mock_verify(backup_id): backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(fields.BackupStatus.CREATING, backup['status']) with mock.patch(_mock_backup_verify_class) as mock_backup_verify: mock_backup_verify.side_effect = mock_verify self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_verify_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as _mock_record_verify: _mock_record_verify.side_effect = \ exception.InvalidBackup(reason='fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_verify.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_nonrestoring_to_available( self, mock_clean_temp): vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) with mock.patch.object(manager.BackupManager, '_map_service_to_driver') as \ mock_map_service_to_driver: # It should works when the service name is a string backup_driver = 'cinder.tests.unit.backup.fake_service_with_verify' mock_map_service_to_driver.return_value = backup_driver self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) new_backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, new_backup['status']) mock_map_service_to_driver.return_value = backup_driver self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.reset_mock() self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_backup_reset_status_to_available_invalid_backup(self): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=volume['id']) backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as \ _mock_record_verify: _mock_record_verify.side_effect = \ exception.BackupVerifyUnsupportedDriver(reason='fake') self.assertRaises(exception.BackupVerifyUnsupportedDriver, self.backup_mgr.reset_status, self.ctxt, backup, fields.BackupStatus.AVAILABLE) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_restoring_to_available( self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_to_error(self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup['id']) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @ddt.ddt class BackupAPITestCase(BaseBackupTest): def setUp(self): super(BackupAPITestCase, self).setUp() self.api = api.API() def test_get_all_wrong_all_tenants_value(self): self.assertRaises(exception.InvalidParameterValue, self.api.get_all, self.ctxt, {'all_tenants': 'bad'}) @mock.patch.object(objects, 'BackupList') def test_get_all_no_all_tenants_value(self, mock_backuplist): result = self.api.get_all(self.ctxt, {'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(False, 'false', '0', 0, 'no') def test_get_all_false_value_all_tenants( self, false_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': false_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(True, 'true', '1', 1, 'yes') def test_get_all_true_value_all_tenants( self, true_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': true_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all_by_project.called) self.assertEqual(mock_backuplist.get_all.return_value, result) mock_backuplist.get_all.assert_called_once_with( self.ctxt, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist): ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4()) result = self.api.get_all(ctxt, {'all_tenants': '1', 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(db, 'backup_create', side_effect=db_exc.DBError()) def test_create_when_failed_to_create_backup_object( self, mock_create, mock_get_service): # Create volume in admin context volume_id = utils.create_volume(self.ctxt)['id'] # Will try to backup from a different context new_context = copy.copy(self.ctxt) new_context.user_id = uuid.uuid4() new_context.project_id = uuid.uuid4() # The opposite side of this test case is a "NotImplementedError: # Cannot load 'id' in the base class" being raised. # More detailed, in the try clause, if backup.create() failed # with DB exception, backup.id won't be assigned. However, # in the except clause, backup.destroy() is invoked to do cleanup, # which internally tries to access backup.id. self.assertRaises(db_exc.DBError, self.api.create, context=new_context, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(objects.Backup, '__init__', side_effect=exception.InvalidInput( reason='Failed to new')) def test_create_when_failed_to_new_backup_object(self, mock_new, mock_get_service): volume_id = utils.create_volume(self.ctxt)['id'] # The opposite side of this test case is that a "UnboundLocalError: # local variable 'backup' referenced before assignment" is raised. # More detailed, in the try clause, backup = objects.Backup(...) # raises exception, so 'backup' is not assigned. But in the except # clause, 'backup' is referenced to invoke cleanup methods. self.assertRaises(exception.InvalidInput, self.api.create, context=self.ctxt, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch('cinder.backup.api.API._is_backup_service_enabled') def test_create_backup_in_same_host(self, mock_is_enable, mock_create): self.override_config('backup_use_same_host', True) mock_is_enable.return_value = True self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available', host='testhost#lvm', size=1) backup = self.api.create(self.ctxt, None, None, volume_id, None) self.assertEqual('testhost', backup.host) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') def test_create_backup_from_snapshot_with_volume_in_use( self, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='in-use') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) backup = self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) self.assertEqual(fields.BackupStatus.CREATING, backup.status) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status) self.assertEqual('in-use', volume.status) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @ddt.data(True, False) def test_create_backup_resource_status(self, is_snapshot, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) if is_snapshot: self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('backing-up', snapshot.status) self.assertEqual('available', volume.status) else: self.api.create(self.ctxt, None, None, volume_id, None) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('available', snapshot.status) self.assertEqual('backing-up', volume.status) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_volume(self, mock_rpcapi_restore, mock_get_backup_host): volume_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(size=1, status='available') mock_get_backup_host.return_value = 'testhost' self.api.restore(self.ctxt, backup.id, volume_id) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(volume_id, backup.restore_volume_id) @mock.patch.object(objects.Backup, 'decode_record') @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test__get_import_backup_invalid_backup( self, mock_reserve, mock_rollback, mock_commit, mock_decode): backup = self._create_backup_db_entry(size=1, status='available') mock_decode.return_value = {'id': backup.id, 'project_id': backup.project_id, 'user_id': backup.user_id, 'volume_id': backup.volume_id, 'size': 1} mock_reserve.return_value = 'fake_reservation' self.assertRaises(exception.InvalidBackup, self.api._get_import_backup, self.ctxt, 'fake_backup_url') mock_reserve.assert_called_with( self.ctxt, backups=1, backup_gigabytes=1) mock_rollback.assert_called_with(self.ctxt, "fake_reservation")
apache-2.0
mgit-at/ansible
lib/ansible/modules/packaging/os/zypper.py
27
17960
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Patrick Callahan <pmc@patrickcallahan.com> # based on # openbsd_pkg # (c) 2013 # Patrik Lundin <patrik.lundin.swe@gmail.com> # # yum # (c) 2012, Red Hat, Inc # Written by Seth Vidal <skvidal at fedoraproject.org> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: zypper author: - "Patrick Callahan (@dirtyharrycallahan)" - "Alexander Gubin (@alxgu)" - "Thomas O'Donnell (@andytom)" - "Robin Roth (@robinro)" - "Andrii Radyk (@AnderEnder)" version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: - Manage packages on SUSE and openSUSE using the zypper and rpm tools. options: name: description: - Package name C(name) or package specifier or a list of either. - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to update the package within the version range given. - You can also pass a url or a local path to a rpm file. - When using state=latest, this can be '*', which updates all installed packages. required: true aliases: [ 'pkg' ] state: description: - C(present) will make sure the package is installed. C(latest) will make sure the latest version of the package is installed. C(absent) will make sure the specified package is not installed. C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed. - When using C(dist-upgrade), I(name) should be C('*'). required: false choices: [ present, latest, absent, dist-upgrade ] default: "present" type: description: - The type of package to be operated on. required: false choices: [ package, patch, pattern, product, srcpackage, application ] default: "package" version_added: "2.0" extra_args_precommand: version_added: "2.6" required: false description: - Add additional global target options to C(zypper). - Options should be supplied in a single line as if given in the command line. disable_gpg_check: description: - Whether to disable to GPG signature checking of the package signature being installed. Has an effect only if state is I(present) or I(latest). required: false default: "no" type: bool disable_recommends: version_added: "1.8" description: - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. required: false default: "yes" type: bool force: version_added: "2.2" description: - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. required: false default: "no" type: bool update_cache: version_added: "2.2" description: - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. required: false default: "no" type: bool aliases: [ "refresh" ] oldpackage: version_added: "2.2" description: - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name. required: false default: "no" type: bool extra_args: version_added: "2.4" required: false description: - Add additional options to C(zypper) command. - Options should be supplied in a single line as if given in the command line. notes: - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. # informational: requirements for nodes requirements: - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0" - python-xml - rpm ''' EXAMPLES = ''' # Install "nmap" - zypper: name: nmap state: present # Install apache2 with recommended packages - zypper: name: apache2 state: present disable_recommends: no # Apply a given patch - zypper: name: openSUSE-2016-128 state: present type: patch # Remove the "nmap" package - zypper: name: nmap state: absent # Install the nginx rpm from a remote repo - zypper: name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' state: present # Install local rpm file - zypper: name: /tmp/fancy-software.rpm state: present # Update all packages - zypper: name: '*' state: latest # Apply all available patches - zypper: name: '*' state: latest type: patch # Perform a dist-upgrade with additional arguments - zypper: name: '*' state: dist-upgrade extra_args: '--no-allow-vendor-change --allow-arch-change' # Refresh repositories and update package "openssl" - zypper: name: openssl state: present update_cache: yes # Install specific version (possible comparisons: <, >, <=, >=, =) - zypper: name: 'docker>=1.10' state: present # Wait 20 seconds to acquire the lock before failing - zypper: name: mosh state: present environment: ZYPP_LOCK_TIMEOUT: 20 ''' import xml import re from xml.dom.minidom import parseString as parseXML from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_native # import module snippets from ansible.module_utils.basic import AnsibleModule class Package: def __init__(self, name, prefix, version): self.name = name self.prefix = prefix self.version = version self.shouldinstall = (prefix == '+') def __str__(self): return self.prefix + self.name + self.version def split_name_version(name): """splits of the package name and desired version example formats: - docker>=1.10 - apache=2.4 Allowed version specifiers: <, >, <=, >=, = Allowed version format: [0-9.-]* Also allows a prefix indicating remove "-", "~" or install "+" """ prefix = '' if name[0] in ['-', '~', '+']: prefix = name[0] name = name[1:] if prefix == '~': prefix = '-' version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') try: reres = version_check.match(name) name, version = reres.groups() if version is None: version = '' return prefix, name, version except: return prefix, name, '' def get_want_state(names, remove=False): packages = [] urls = [] for name in names: if '://' in name or name.endswith('.rpm'): urls.append(name) else: prefix, pname, version = split_name_version(name) if prefix not in ['-', '+']: if remove: prefix = '-' else: prefix = '+' packages.append(Package(pname, prefix, version)) return packages, urls def get_installed_state(m, packages): "get installed state of packages" cmd = get_cmd(m, 'search') cmd.extend(['--match-exact', '--details', '--installed-only']) cmd.extend([p.name for p in packages]) return parse_zypper_xml(m, cmd, fail_not_found=False)[0] def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): rc, stdout, stderr = m.run_command(cmd, check_rc=False) try: dom = parseXML(stdout) except xml.parsers.expat.ExpatError as exc: m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc), rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) if rc == 104: # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) if fail_not_found: errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) else: return {}, rc, stdout, stderr elif rc in [0, 106, 103]: # zypper exit codes # 0: success # 106: signature verification failed # 103: zypper was upgraded, run same command again if packages is None: firstrun = True packages = {} solvable_list = dom.getElementsByTagName('solvable') for solvable in solvable_list: name = solvable.getAttribute('name') packages[name] = {} packages[name]['version'] = solvable.getAttribute('edition') packages[name]['oldversion'] = solvable.getAttribute('edition-old') status = solvable.getAttribute('status') packages[name]['installed'] = status == "installed" packages[name]['group'] = solvable.parentNode.nodeName if rc == 103 and firstrun: # if this was the first run and it failed with 103 # run zypper again with the same command to complete update return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) return packages, rc, stdout, stderr m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) def get_cmd(m, subcommand): "puts together the basic zypper command arguments with those passed to the module" is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] is_refresh = subcommand == 'refresh' cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] if m.params['extra_args_precommand']: args_list = m.params['extra_args_precommand'].split() cmd.extend(args_list) # add global options before zypper command if (is_install or is_refresh) and m.params['disable_gpg_check']: cmd.append('--no-gpg-checks') if subcommand == 'search': cmd.append('--disable-repositories') cmd.append(subcommand) if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh: cmd.extend(['--type', m.params['type']]) if m.check_mode and subcommand != 'search': cmd.append('--dry-run') if is_install: cmd.append('--auto-agree-with-licenses') if m.params['disable_recommends']: cmd.append('--no-recommends') if m.params['force']: cmd.append('--force') if m.params['oldpackage']: cmd.append('--oldpackage') if m.params['extra_args']: args_list = m.params['extra_args'].split(' ') cmd.extend(args_list) return cmd def set_diff(m, retvals, result): # TODO: if there is only one package, set before/after to version numbers packages = {'installed': [], 'removed': [], 'upgraded': []} if result: for p in result: group = result[p]['group'] if group == 'to-upgrade': versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' packages['upgraded'].append(p + versions) elif group == 'to-install': packages['installed'].append(p) elif group == 'to-remove': packages['removed'].append(p) output = '' for state in packages: if packages[state]: output += state + ': ' + ', '.join(packages[state]) + '\n' if 'diff' not in retvals: retvals['diff'] = {} if 'prepared' not in retvals['diff']: retvals['diff']['prepared'] = output else: retvals['diff']['prepared'] += '\n' + output def package_present(m, name, want_latest): "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} packages, urls = get_want_state(name) # add oldpackage flag when a version is given to allow downgrades if any(p.version for p in packages): m.params['oldpackage'] = True if not want_latest: # for state=present: filter out already installed packages # if a version is given leave the package in to let zypper handle the version # resolution packageswithoutversion = [p for p in packages if not p.version] prerun_state = get_installed_state(m, packageswithoutversion) # generate lists of packages to install or remove packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] if not packages and not urls: # nothing to install/remove and nothing to update return None, retvals # zypper install also updates packages cmd = get_cmd(m, 'install') cmd.append('--') cmd.extend(urls) # pass packages to zypper # allow for + or - prefixes in install/remove lists # also add version specifier if given # do this in one zypper run to allow for dependency-resolution # for example "-exim postfix" runs without removing packages depending on mailserver cmd.extend([str(p) for p in packages]) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals def package_update_all(m): "run update or patch on all available packages" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} if m.params['type'] == 'patch': cmdname = 'patch' elif m.params['state'] == 'dist-upgrade': cmdname = 'dist-upgrade' else: cmdname = 'update' cmd = get_cmd(m, cmdname) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals def package_absent(m, name): "remove the packages in name" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} # Get package state packages, urls = get_want_state(name, remove=True) if any(p.prefix == '+' for p in packages): m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") if urls: m.fail_json(msg="Can not remove via URL.") if m.params['type'] == 'patch': m.fail_json(msg="Can not remove patches.") prerun_state = get_installed_state(m, packages) packages = [p for p in packages if p.name in prerun_state] if not packages: return None, retvals cmd = get_cmd(m, 'remove') cmd.extend([p.name + p.version for p in packages]) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals def repo_refresh(m): "update the repositories" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} cmd = get_cmd(m, 'refresh') retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return retvals # =========================================== # Main control flow def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['pkg'], type='list'), state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']), type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), extra_args_precommand=dict(required=False, default=None), disable_gpg_check=dict(required=False, default='no', type='bool'), disable_recommends=dict(required=False, default='yes', type='bool'), force=dict(required=False, default='no', type='bool'), update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'), oldpackage=dict(required=False, default='no', type='bool'), extra_args=dict(required=False, default=None), ), supports_check_mode=True ) name = module.params['name'] state = module.params['state'] update_cache = module.params['update_cache'] # remove empty strings from package list name = list(filter(None, name)) # Refresh repositories if update_cache and not module.check_mode: retvals = repo_refresh(module) if retvals['rc'] != 0: module.fail_json(msg="Zypper refresh run failed.", **retvals) # Perform requested action if name == ['*'] and state in ['latest', 'dist-upgrade']: packages_changed, retvals = package_update_all(module) elif name != ['*'] and state == 'dist-upgrade': module.fail_json(msg="Can not dist-upgrade specific packages.") else: if state in ['absent', 'removed']: packages_changed, retvals = package_absent(module, name) elif state in ['installed', 'present', 'latest']: packages_changed, retvals = package_present(module, name, state == 'latest') retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) if module._diff: set_diff(module, retvals, packages_changed) if retvals['rc'] != 0: module.fail_json(msg="Zypper run failed.", **retvals) if not retvals['changed']: del retvals['stdout'] del retvals['stderr'] module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) if __name__ == "__main__": main()
gpl-3.0
tsiktsiris/falcon
tools/perf/scripts/python/syscall-counts.py
11181
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
mohamed--abdel-maksoud/chromium.src
tools/perf/measurements/repaint_unittest.py
12
2429
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from measurements import repaint from telemetry import decorators from telemetry.core import wpr_modes from telemetry.page import page as page_module from telemetry.unittest_util import options_for_unittests from telemetry.unittest_util import page_test_test_case class TestRepaintPage(page_module.Page): def __init__(self, page_set, base_dir): super(TestRepaintPage, self).__init__('file://blank.html', page_set, base_dir) def RunPageInteractions(self, action_runner): action_runner.RepaintContinuously(seconds=2) class RepaintUnitTest(page_test_test_case.PageTestTestCase): """Smoke test for repaint measurement Runs repaint measurement on a simple page and verifies that all metrics were added to the results. The test is purely functional, i.e. it only checks if the metrics are present and non-zero. """ def setUp(self): self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF def testRepaint(self): ps = self.CreateEmptyPageSet() ps.AddUserStory(TestRepaintPage(ps, ps.base_dir)) measurement = repaint.Repaint() results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(0, len(results.failures)) frame_times = results.FindAllPageSpecificValuesNamed('frame_times') self.assertEquals(len(frame_times), 1) self.assertGreater(frame_times[0].GetRepresentativeNumber(), 0) mean_frame_time = results.FindAllPageSpecificValuesNamed('mean_frame_time') self.assertEquals(len(mean_frame_time), 1) self.assertGreater(mean_frame_time[0].GetRepresentativeNumber(), 0) frame_time_discrepancy = results.FindAllPageSpecificValuesNamed( 'frame_time_discrepancy') self.assertEquals(len(frame_time_discrepancy), 1) self.assertGreater(frame_time_discrepancy[0].GetRepresentativeNumber(), 0) percentage_smooth = results.FindAllPageSpecificValuesNamed( 'percentage_smooth') self.assertEquals(len(percentage_smooth), 1) self.assertGreaterEqual(percentage_smooth[0].GetRepresentativeNumber(), 0) @decorators.Disabled('android') def testCleanUpTrace(self): self.TestTracingCleanedUp(repaint.Repaint, self._options)
bsd-3-clause
ColorFuzzy/tornado
tornado/test/util_test.py
3
6672
# coding: utf-8 from __future__ import absolute_import, division, print_function, with_statement import sys import datetime import tornado.escape from tornado.escape import utf8 from tornado.util import raise_exc_info, Configurable, exec_in, ArgReplacer, timedelta_to_seconds, import_object from tornado.test.util import unittest try: from cStringIO import StringIO # py2 except ImportError: from io import StringIO # py3 class RaiseExcInfoTest(unittest.TestCase): def test_two_arg_exception(self): # This test would fail on python 3 if raise_exc_info were simply # a three-argument raise statement, because TwoArgException # doesn't have a "copy constructor" class TwoArgException(Exception): def __init__(self, a, b): super(TwoArgException, self).__init__() self.a, self.b = a, b try: raise TwoArgException(1, 2) except TwoArgException: exc_info = sys.exc_info() try: raise_exc_info(exc_info) self.fail("didn't get expected exception") except TwoArgException as e: self.assertIs(e, exc_info[1]) class TestConfigurable(Configurable): @classmethod def configurable_base(cls): return TestConfigurable @classmethod def configurable_default(cls): return TestConfig1 class TestConfig1(TestConfigurable): def initialize(self, pos_arg=None, a=None): self.a = a self.pos_arg = pos_arg class TestConfig2(TestConfigurable): def initialize(self, pos_arg=None, b=None): self.b = b self.pos_arg = pos_arg class ConfigurableTest(unittest.TestCase): def setUp(self): self.saved = TestConfigurable._save_configuration() def tearDown(self): TestConfigurable._restore_configuration(self.saved) def checkSubclasses(self): # no matter how the class is configured, it should always be # possible to instantiate the subclasses directly self.assertIsInstance(TestConfig1(), TestConfig1) self.assertIsInstance(TestConfig2(), TestConfig2) obj = TestConfig1(a=1) self.assertEqual(obj.a, 1) obj = TestConfig2(b=2) self.assertEqual(obj.b, 2) def test_default(self): obj = TestConfigurable() self.assertIsInstance(obj, TestConfig1) self.assertIs(obj.a, None) obj = TestConfigurable(a=1) self.assertIsInstance(obj, TestConfig1) self.assertEqual(obj.a, 1) self.checkSubclasses() def test_config_class(self): TestConfigurable.configure(TestConfig2) obj = TestConfigurable() self.assertIsInstance(obj, TestConfig2) self.assertIs(obj.b, None) obj = TestConfigurable(b=2) self.assertIsInstance(obj, TestConfig2) self.assertEqual(obj.b, 2) self.checkSubclasses() def test_config_args(self): TestConfigurable.configure(None, a=3) obj = TestConfigurable() self.assertIsInstance(obj, TestConfig1) self.assertEqual(obj.a, 3) obj = TestConfigurable(42, a=4) self.assertIsInstance(obj, TestConfig1) self.assertEqual(obj.a, 4) self.assertEqual(obj.pos_arg, 42) self.checkSubclasses() # args bound in configure don't apply when using the subclass directly obj = TestConfig1() self.assertIs(obj.a, None) def test_config_class_args(self): TestConfigurable.configure(TestConfig2, b=5) obj = TestConfigurable() self.assertIsInstance(obj, TestConfig2) self.assertEqual(obj.b, 5) obj = TestConfigurable(42, b=6) self.assertIsInstance(obj, TestConfig2) self.assertEqual(obj.b, 6) self.assertEqual(obj.pos_arg, 42) self.checkSubclasses() # args bound in configure don't apply when using the subclass directly obj = TestConfig2() self.assertIs(obj.b, None) class UnicodeLiteralTest(unittest.TestCase): def test_unicode_escapes(self): self.assertEqual(utf8(u'\u00e9'), b'\xc3\xa9') class ExecInTest(unittest.TestCase): # This test is python 2 only because there are no new future imports # defined in python 3 yet. @unittest.skipIf(sys.version_info >= print_function.getMandatoryRelease(), 'no testable future imports') def test_no_inherit_future(self): # This file has from __future__ import print_function... f = StringIO() print('hello', file=f) # ...but the template doesn't exec_in('print >> f, "world"', dict(f=f)) self.assertEqual(f.getvalue(), 'hello\nworld\n') class ArgReplacerTest(unittest.TestCase): def setUp(self): def function(x, y, callback=None, z=None): pass self.replacer = ArgReplacer(function, 'callback') def test_omitted(self): args = (1, 2) kwargs = dict() self.assertIs(self.replacer.get_old_value(args, kwargs), None) self.assertEqual(self.replacer.replace('new', args, kwargs), (None, (1, 2), dict(callback='new'))) def test_position(self): args = (1, 2, 'old', 3) kwargs = dict() self.assertEqual(self.replacer.get_old_value(args, kwargs), 'old') self.assertEqual(self.replacer.replace('new', args, kwargs), ('old', [1, 2, 'new', 3], dict())) def test_keyword(self): args = (1,) kwargs = dict(y=2, callback='old', z=3) self.assertEqual(self.replacer.get_old_value(args, kwargs), 'old') self.assertEqual(self.replacer.replace('new', args, kwargs), ('old', (1,), dict(y=2, callback='new', z=3))) class TimedeltaToSecondsTest(unittest.TestCase): def test_timedelta_to_seconds(self): time_delta = datetime.timedelta(hours=1) self.assertEqual(timedelta_to_seconds(time_delta), 3600.0) class ImportObjectTest(unittest.TestCase): def test_import_member(self): self.assertIs(import_object('tornado.escape.utf8'), utf8) def test_import_member_unicode(self): self.assertIs(import_object(u'tornado.escape.utf8'), utf8) def test_import_module(self): self.assertIs(import_object('tornado.escape'), tornado.escape) def test_import_module_unicode(self): # The internal implementation of __import__ differs depending on # whether the thing being imported is a module or not. # This variant requires a byte string in python 2. self.assertIs(import_object(u'tornado.escape'), tornado.escape)
apache-2.0
mancoast/CPythonPyc_test
fail/314_threaded_import_hangers.py
203
1410
# This is a helper module for test_threaded_import. The test imports this # module, and this module tries to run various Python library functions in # their own thread, as a side effect of being imported. If the spawned # thread doesn't complete in TIMEOUT seconds, an "appeared to hang" message # is appended to the module-global `errors` list. That list remains empty # if (and only if) all functions tested complete. TIMEOUT = 10 import threading import tempfile import os.path errors = [] # This class merely runs a function in its own thread T. The thread importing # this module holds the import lock, so if the function called by T tries # to do its own imports it will block waiting for this module's import # to complete. class Worker(threading.Thread): def __init__(self, function, args): threading.Thread.__init__(self) self.function = function self.args = args def run(self): self.function(*self.args) for name, func, args in [ # Bug 147376: TemporaryFile hung on Windows, starting in Python 2.4. ("tempfile.TemporaryFile", tempfile.TemporaryFile, ()), # The real cause for bug 147376: ntpath.abspath() caused the hang. ("os.path.abspath", os.path.abspath, ('.',)), ]: t = Worker(func, args) t.start() t.join(TIMEOUT) if t.is_alive(): errors.append("%s appeared to hang" % name)
gpl-3.0
dzc34/searx
searx/tests/engines/test_searchcode_doc.py
6
2477
from collections import defaultdict import mock from searx.engines import searchcode_doc from searx.testing import SearxTestCase class TestSearchcodeDocEngine(SearxTestCase): def test_request(self): query = 'test_query' dicto = defaultdict(dict) dicto['pageno'] = 0 params = searchcode_doc.request(query, dicto) self.assertIn('url', params) self.assertIn(query, params['url']) self.assertIn('searchcode.com', params['url']) def test_response(self): self.assertRaises(AttributeError, searchcode_doc.response, None) self.assertRaises(AttributeError, searchcode_doc.response, []) self.assertRaises(AttributeError, searchcode_doc.response, '') self.assertRaises(AttributeError, searchcode_doc.response, '[]') response = mock.Mock(text='{}') self.assertEqual(searchcode_doc.response(response), []) response = mock.Mock(text='{"data": []}') self.assertEqual(searchcode_doc.response(response), []) json = """ { "matchterm": "test", "previouspage": null, "searchterm": "test", "query": "test", "total": 60, "page": 0, "nextpage": 1, "results": [ { "synopsis": "Synopsis", "displayname": null, "name": "test", "url": "http://url", "type": "Type", "icon": null, "namespace": "Namespace", "description": "Description" } ] } """ response = mock.Mock(text=json) results = searchcode_doc.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1) self.assertEqual(results[0]['title'], '[Type] Namespace test') self.assertEqual(results[0]['url'], 'http://url') self.assertIn('Synopsis', results[0]['content']) self.assertIn('Type', results[0]['content']) self.assertIn('test', results[0]['content']) self.assertIn('Description', results[0]['content']) json = """ {"toto":[ {"id":200,"name":"Artist Name", "link":"http:\/\/www.searchcode_doc.com\/artist\/1217","type":"artist"} ]} """ response = mock.Mock(text=json) results = searchcode_doc.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 0)
agpl-3.0
Tesora/tesora-tempest
tempest/lib/common/api_version_request.py
12
5783
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from tempest.lib import exceptions # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. class APIVersionRequest(object): """This class represents an API Version Request. This class provides convenience methods for manipulation and comparison of version numbers that we need to do to implement microversions. :param version_string: String representation of APIVersionRequest. Correct format is 'X.Y', where 'X' and 'Y' are int values. None value should be used to create Null APIVersionRequest, which is equal to 0.0 """ # NOTE: This 'latest' version is a magic number, we assume any # projects(Nova, etc.) never achieve this number. latest_ver_major = 99999 latest_ver_minor = 99999 def __init__(self, version_string=None): """Create an API version request object.""" # NOTE(gmann): 'version_string' as String "None" will be considered as # invalid version string. self.ver_major = 0 self.ver_minor = 0 if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self.ver_major = int(match.group(1)) self.ver_minor = int(match.group(2)) elif version_string == 'latest': self.ver_major = self.latest_ver_major self.ver_minor = self.latest_ver_minor else: raise exceptions.InvalidAPIVersionString( version=version_string) def __str__(self): """Debug/Logging representation of object.""" return ("API Version Request: %s" % self.get_string()) def is_null(self): """Checks whether version is null. Return True if version object is null otherwise False. :returns: boolean """ return self.ver_major == 0 and self.ver_minor == 0 def _format_type_error(self, other): return TypeError("'%(other)s' should be an instance of '%(cls)s'" % {"other": other, "cls": self.__class__}) def __lt__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) < (other.ver_major, other.ver_minor)) def __eq__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) == (other.ver_major, other.ver_minor)) def __gt__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) > (other.ver_major, other.ver_minor)) def __le__(self, other): return self < other or self == other def __ne__(self, other): return not self.__eq__(other) def __ge__(self, other): return self > other or self == other def matches(self, min_version, max_version): """Matches the version object. Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :returns: boolean If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError """ if self.is_null(): raise ValueError if max_version.is_null() and min_version.is_null(): return True elif max_version.is_null(): return min_version <= self elif min_version.is_null(): return self <= max_version else: return min_version <= self <= max_version def get_string(self): """Version string representation. Converts object to string representation which if used to create an APIVersionRequest object results in the same version request. """ if self.is_null(): return None if (self.ver_major == self.latest_ver_major and self.ver_minor == self.latest_ver_minor): return 'latest' return "%s.%s" % (self.ver_major, self.ver_minor)
apache-2.0
stephenrjones/geoq
geoq/core/migrations/0004_auto__add_userprofile.py
6
9987
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'UserProfile' db.create_table(u'core_userprofile', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), ('score', self.gf('django.db.models.fields.IntegerField')(default=1)), )) db.send_create_signal(u'core', ['UserProfile']) def backwards(self, orm): # Deleting model 'UserProfile' db.delete_table(u'core_userprofile') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'core.aoi': { 'Meta': {'object_name': 'AOI'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'analyst': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aois'", 'to': u"orm['core.Job']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'polygon': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}), 'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '5', 'max_length': '1'}), 'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aoi_reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Unassigned'", 'max_length': '15'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'core.job': { 'Meta': {'ordering': "('-created_at',)", 'object_name': 'Job'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'analysts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'analysts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'feature_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['maps.FeatureType']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.Map']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'progress': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project'", 'to': u"orm['core.Project']"}), 'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'core.project': { 'Meta': {'ordering': "('-created_at',)", 'object_name': 'Project'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'project_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'supervisors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'supervisors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'core.userprofile': { 'Meta': {'object_name': 'UserProfile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}) }, u'maps.featuretype': { 'Meta': {'object_name': 'FeatureType'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'style': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '25'}) }, u'maps.map': { 'Meta': {'object_name': 'Map'}, 'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'zoom': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['core']
mit
nealtodd/django
django/contrib/admin/templatetags/admin_modify.py
342
2505
from django import template register = template.Library() @register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True) def prepopulated_fields_js(context): """ Creates a list of prepopulated_fields that should render Javascript for the prepopulated fields for both the admin form and inlines. """ prepopulated_fields = [] if 'adminform' in context: prepopulated_fields.extend(context['adminform'].prepopulated_fields) if 'inline_admin_formsets' in context: for inline_admin_formset in context['inline_admin_formsets']: for inline_admin_form in inline_admin_formset: if inline_admin_form.original is None: prepopulated_fields.extend(inline_admin_form.prepopulated_fields) context.update({'prepopulated_fields': prepopulated_fields}) return context @register.inclusion_tag('admin/submit_line.html', takes_context=True) def submit_row(context): """ Displays the row of buttons for delete and save. """ opts = context['opts'] change = context['change'] is_popup = context['is_popup'] save_as = context['save_as'] show_save = context.get('show_save', True) show_save_and_continue = context.get('show_save_and_continue', True) ctx = { 'opts': opts, 'show_delete_link': ( not is_popup and context['has_delete_permission'] and change and context.get('show_delete', True) ), 'show_save_as_new': not is_popup and change and save_as, 'show_save_and_add_another': ( context['has_add_permission'] and not is_popup and (not save_as or context['add']) ), 'show_save_and_continue': not is_popup and context['has_change_permission'] and show_save_and_continue, 'is_popup': is_popup, 'show_save': show_save, 'preserved_filters': context.get('preserved_filters'), } if context.get('original') is not None: ctx['original'] = context['original'] return ctx @register.filter def cell_count(inline_admin_form): """Returns the number of cells used in a tabular inline""" count = 1 # Hidden cell with hidden 'id' field for fieldset in inline_admin_form: # Loop through all the fields (one per cell) for line in fieldset: for field in line: count += 1 if inline_admin_form.formset.can_delete: # Delete checkbox count += 1 return count
bsd-3-clause
SmartInfrastructures/fuel-web-dev
network_checker/network_checker/tests/test_multicast.py
7
3095
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import subprocess import unittest from network_checker.multicast import api as multicast_api class TestMulticastVerification(unittest.TestCase): def setUp(self): self.config_node_112 = {"uid": "112", "group": "225.0.0.250", "port": 8890} self.config_node_113 = {"uid": "113", "group": "225.0.0.250", "port": 8890} self.mchecker_node_112 = multicast_api.MulticastChecker( **self.config_node_112) self.mchecker_node_113 = multicast_api.MulticastChecker( **self.config_node_113) def test_multicast_verification(self): self.mchecker_node_112.listen() self.mchecker_node_113.listen() self.mchecker_node_112.send() self.mchecker_node_113.send() info_node_112 = self.mchecker_node_112.get_info() info_node_113 = self.mchecker_node_113.get_info() self.assertEqual(info_node_112, [u"113", u"112"]) self.assertEqual(info_node_113, [u"113", u"112"]) class TestSystemMulticastVerification(unittest.TestCase): def shell_helper(self, args): proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() return json.loads(out) def test_multicast_verification_with_detach(self): init_args = ['fuel-netcheck', 'multicast', 'serve', 'listen'] listen_data = self.shell_helper(init_args) self.assertIn('uid', listen_data) args = ['fuel-netcheck', 'multicast', 'send', 'info'] info = self.shell_helper(args) self.assertEqual([listen_data['uid']], info) cleanup_args = ['fuel-netcheck', 'multicast', 'clean'] clean = self.shell_helper(cleanup_args) self.assertTrue(clean) def test_mutlicast_with_config(self): config = {"uid": "112", "group": "225.0.0.250", "port": 8890, "iface": "eth0"} config_json = json.dumps(config) init_args = ["fuel-netcheck", "multicast", "serve", "--config", config_json] self.shell_helper(init_args) args = ['fuel-netcheck', 'multicast', 'listen', 'send', 'info'] info = self.shell_helper(args) self.assertEqual([config['uid']], info) cleanup_args = ['fuel-netcheck', 'multicast', 'clean'] clean = self.shell_helper(cleanup_args) self.assertTrue(clean)
apache-2.0
Endika/website
website_backend_views/controllers/demo.py
36
1453
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2015 Therp BV <http://therp.nl>. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import http, _ class Demo(http.Controller): @http.route('/website_backend_views/demo/', auth='user', website=True) def index(self, debug=False): if not http.request.env.ref( 'website_backend_views.demo_index', raise_if_not_found=False): return _('You need to install this module in demo mode for this ' 'url to work!') return http.request.render('website_backend_views.demo_index')
agpl-3.0
dllsf/odootest
addons/payment_buckaroo/controllers/main.py
325
1270
# -*- coding: utf-8 -*- try: import simplejson as json except ImportError: import json import logging import pprint import werkzeug from openerp import http, SUPERUSER_ID from openerp.http import request _logger = logging.getLogger(__name__) class BuckarooController(http.Controller): _return_url = '/payment/buckaroo/return' _cancel_url = '/payment/buckaroo/cancel' _exception_url = '/payment/buckaroo/error' _reject_url = '/payment/buckaroo/reject' @http.route([ '/payment/buckaroo/return', '/payment/buckaroo/cancel', '/payment/buckaroo/error', '/payment/buckaroo/reject', ], type='http', auth='none') def buckaroo_return(self, **post): """ Buckaroo.""" _logger.info('Buckaroo: entering form_feedback with post data %s', pprint.pformat(post)) # debug request.registry['payment.transaction'].form_feedback(request.cr, SUPERUSER_ID, post, 'buckaroo', context=request.context) return_url = post.pop('return_url', '') if not return_url: data ='' + post.pop('ADD_RETURNDATA', '{}').replace("'", "\"") custom = json.loads(data) return_url = custom.pop('return_url', '/') return werkzeug.utils.redirect(return_url)
agpl-3.0
pdelsante/thug
tests/Logging/test_ThugLogging.py
1
5373
import os import logging import thug from thug.ThugAPI.ThugOpts import ThugOpts from thug.DOM.HTTPSession import HTTPSession from thug.Logging.ThugLogging import ThugLogging from thug.Classifier.URLClassifier import URLClassifier from thug.Classifier.SampleClassifier import SampleClassifier configuration_path = thug.__configuration_path__ log = logging.getLogger("Thug") log.configuration_path = configuration_path log.personalities_path = os.path.join(configuration_path, "personalities") if configuration_path else None log.ThugOpts = ThugOpts() log.HTTPSession = HTTPSession() log.URLClassifier = URLClassifier() log.SampleClassifier = SampleClassifier() thug_logging = ThugLogging(thug.__version__) class TestThugLogging: js = "var i = 0;" cert = "sample-certificate" content = b"sample, content" cwd_path = os.path.dirname(os.path.realpath(__file__)) jar_path = os.path.join(cwd_path, os.pardir, os.pardir, "tests/test_files/sample.jar") sample = {'sha1': 'b13d13733c4c9406fd0e01485bc4a34170b7d326', 'ssdeep': u'24:9EGtDqSyDVHNkCq4LOmvmuS+MfTAPxokCOB:97tG5DjQ4LDs+sTAPxLT', 'sha256': '459bf0aeda19633c8e757c05ee06b8121a51217cea69ce60819bb34092a296a0', 'type': 'JAR', 'md5': 'd4be8fbeb3a219ec8c6c26ffe4033a16'} def test_set_url(self): thug_logging.set_url("https://www.example.com") assert thug_logging.url in ("https://www.example.com", ) def test_add_code_snippet(self): log.ThugOpts.code_logging = False tag_hex = thug_logging.add_code_snippet(self.js, 'Javascript', 'Contained_Inside') assert not tag_hex log.ThugOpts.code_logging = True assert not thug_logging.add_code_snippet("var", 'Javascript', 'Contained', check = True) tag_hex = thug_logging.add_code_snippet(self.js, 'Javascript', 'Contained_Inside') assert tag_hex def test_add_shellcode_snippet(self): tag_hex = thug_logging.add_shellcode_snippet("sample", "Assembly", "Shellcode", "Static Analysis") assert tag_hex def test_log_file(self): sample = thug_logging.log_file(data = "") assert not sample data = open(self.jar_path, 'rb').read() sample = thug_logging.log_file(data = data, url = self.jar_path, sampletype = 'JAR') assert sample['sha1'] in ('b13d13733c4c9406fd0e01485bc4a34170b7d326', ) def test_log_event(self, caplog): caplog.clear() log.ThugOpts.file_logging = True thug_logging.log_event() assert 'Thug analysis logs saved' in caplog.text log.ThugOpts.file_logging = False def test_log_connection(self): thug_logging.log_connection("referer", "url", "href") def test_log_location(self): thug_logging.log_location("https://example.com", None) def test_log_exploit_event(self, caplog): caplog.clear() thug_logging.log_exploit_event("https://www.example.com", "module", "sample-description") assert "[module] sample-description" in caplog.text def test_log_classifier(self, caplog): caplog.clear() thug_logging.log_classifier("sample", self.jar_path, "N/A", None) assert "[SAMPLE Classifier]" in caplog.text assert "(Rule: N/A, Classification: None)" in caplog.text def test_log_warning(self, caplog): caplog.clear() thug_logging.log_warning("sample-text") assert "sample-text" in caplog.text def test_log_redirect(self, caplog): pass def test_log_href_direct(self, caplog): caplog.clear() thug_logging.log_href_redirect("referer", "url") assert "[HREF Redirection (document.location)]" in caplog.text assert "Content-Location: referer --> Location: url" in caplog.text def test_log_certificate(self, caplog): caplog.clear() log.ThugOpts.cert_logging = False thug_logging.log_certificate("url", self.cert) assert "[Certificate]" not in caplog.text log.ThugOpts.cert_logging = True thug_logging.log_certificate("url", self.cert) assert "[Certificate]\n %s" % (self.cert, ) in caplog.text def test_log_virustotal(self): log.ThugOpts.file_logging = True path = "%s.json" % (self.sample['md5'],) thug_logging.log_virustotal(os.getcwd(), self.sample, self.content) assert self.content in open(path, 'rb').read() os.remove(path) log.ThugOpts.file_logging = False def test_log_honeyagent(self): log.ThugOpts.file_logging = True path = "%s.json" % (self.sample['md5'], ) thug_logging.log_honeyagent(os.getcwd(), self.sample, self.content) assert self.content in open(path, 'rb').read() os.remove(path) log.ThugOpts.file_logging = False def test_store_content(self): log.ThugOpts.file_logging = True fname = thug_logging.store_content(os.getcwd(), "sample.csv", self.content) path = os.path.join(os.getcwd(), "sample.csv") assert fname == path assert self.content in open(path, 'rb').read() os.remove(path) log.ThugOpts.file_logging = False fname = thug_logging.store_content(os.getcwd(), "sample.csv", self.content) assert not fname
gpl-2.0
google/hypebot
hypebot/plugins/hypejack_lib.py
1
15524
# coding=utf-8 # Copyright 2018 The Hypebot Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Waste your hard earned hypecoins here.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from collections import defaultdict from functools import wraps import itertools import random import re import threading from absl import logging from hypebot.core import schedule_lib from hypebot.core import util_lib from hypebot.plugins import playing_cards_lib from hypebot.plugins import vegas_game_lib from hypebot.protos import bet_pb2 from hypebot.protos import user_pb2 from typing import Dict, List, Optional, Text # Double deck to allow people to count cards. _NUM_DECKS = 2 # Maps card values to all of their potential points. _CARD_POINTS = { playing_cards_lib.ACE: [1, 11], 2: [2], 3: [3], 4: [4], 5: [5], 6: [6], 7: [7], 8: [8], 9: [9], 10: [10], playing_cards_lib.JACK: [10], playing_cards_lib.QUEEN: [10], playing_cards_lib.KING: [10], } class Hand(object): """Collection of cards with blackjack game state.""" def __init__(self, bet, *cards): self.bet = bet # type: bet_pb2.Bet self.cards = list(cards) # type: List[playing_cards_lib.Card] self.stand = False def IsActive(self): return not (self.IsBusted() or self.IsHypeJack() or self.stand) def IsBusted(self): return self.Score() > 21 def IsHypeJack(self): return self.Score() == 21 and len(self.cards) == 2 def Score(self): """Computes the best possible score for the hand.""" points = _CARD_POINTS[self.cards[0].value] for card in self.cards[1:]: points = [ pts[0] + pts[1] for pts in itertools.product(points, _CARD_POINTS[card.value]) ] non_bust = [p for p in points if p <= 21] if non_bust: score = max(non_bust) if score == 21: self.stand = True return score return min(points) def __unicode__(self): status_str = '' if self.IsBusted(): status_str = '✕' elif self.IsHypeJack(): status_str = '✪' elif self.stand: status_str = '✋' return '[%s]%s' % (', '.join(map(unicode, self.cards)), status_str) def HandFromMatch(fn): """Wrapper that calls the function with the correct hand. Determines what hand was desired based on the following order: 1) Hand passed directly as hand kwarg. 2) Corresponding hand based on number specified in match kwarg. Args: fn: Function to wrap. Returns: Wrapped function. """ @wraps(fn) def Wrapper(self, user: user_pb2.User, *args, **kwargs): """Internal wrapper.""" # pylint: disable=protected-access with self._lock: if user.user_id not in self._peeps: self._msg_fn( None, '%s: You are not playing in this round.' % user.display_name) return if 'hand' in kwargs: return fn(self, user, *args, **kwargs) # Default to first hand if none specified. try: hand_id = int(kwargs['match'].groups()[0]) except Exception: # pylint: disable=broad-except hand_id = 0 try: hand = self._peeps[user.user_id][hand_id] except KeyError: self._msg_fn( None, '%s: Please specify a valid hand: 0 through %d' % (user.display_name, len(self._peeps[user.user_id]) - 1)) return if not hand.IsActive(): self._msg_fn( None, '%s: Hand %s is already complete.' % (user.display_name, hand_id)) return kwargs['hand'] = hand return fn(self, user, *args, **kwargs) # pylint: enable=protected-access return Wrapper class Game(vegas_game_lib.GameBase): """Blackjack style game.""" # Seconds after first bet until round starts ROUND_DELAY = 5 # Seconds that users have to complete their hands before they are auto-stood. # Prevents a user from betting and walking away. MAX_ROUND_LENGTH = 60 def __init__(self, channel, core, msg_fn): # Used for thread safe access to class data. self._lock = threading.RLock() # Condition variable used to force end the game after a certain amount of # time has passed. self._game_ender = threading.Condition(lock=self._lock) self.channel = channel self._core = core self._msg_fn = msg_fn self._pending_start = False self._active_round = False self._scheduler = schedule_lib.HypeScheduler() # Maps users to their hands for the active round. self._peeps = {} # type: Dict[Text, List[Hand]] self._dealer_hand = None # type: Hand self._shoe = [] # ============================================================================ # GameBase abstract signature. # ============================================================================ @property def name(self): return self.channel.name # Do not take any bets from random channels. We directly place bets ourselves. def TakeBet(self, bet): return False def FormatBet(self, bet): return u'%s %s %s in %s' % (util_lib.FormatHypecoins( bet.amount), bet_pb2.Bet.Direction.Name( bet.direction).lower(), bet.target, self.name) def SettleBets(self, pool, msg_fn, *args, **kwargs): with self._lock: winners = defaultdict(int) users_by_id = {} for user_id, user_bets in pool.items(): if user_id not in self._peeps: # This means the game wasn't finished. Either user timed out or prior # crash. Hypebot steals the bet either way. continue users_by_id[user_id] = user_bets[0].user for bet in user_bets: hand_id = int(bet.target.split('-')[-1]) hand = self._peeps[user_id][hand_id] result_str = 'lost' if hand.IsBusted(): result_str = 'busted' elif hand.IsHypeJack(): if self._dealer_hand.IsHypeJack(): result_str = 'pushed' winners[user_id] += bet.amount else: result_str = 'hypejack!' winners[user_id] += bet.amount * 5 // 2 elif (self._dealer_hand.IsBusted() or hand.Score() > self._dealer_hand.Score()): winners[user_id] += bet.amount * 2 result_str = 'won' elif hand.Score() == self._dealer_hand.Score(): winners[user_id] += bet.amount result_str = 'pushed' self._msg_fn( None, '%s: %s %s' % (bet.user.display_name, unicode(hand), result_str)) return ({ users_by_id[user_id]: amount for user_id, amount in winners.items() }, {}, []) # ============================================================================ # HypeJack logic. # ============================================================================ def HandleMessage(self, user: user_pb2.User, msg: Text): with self._lock: hand_regex = r' ?([0-9]*)' bet_match = re.match(r'^b(?:et)? ([0-9]+)', msg) double_match = re.match(r'^d(?:ouble)?%s' % hand_regex, msg) hit_match = re.match(r'^h(?:it)?%s' % hand_regex, msg) stand_match = re.match(r'^st(?:and)?%s' % hand_regex, msg) split_match = re.match(r'^sp(?:lit)?%s' % hand_regex, msg) help_match = re.match(r'^h[ae]lp', msg) if bet_match: self.Bet(user, bet_match) elif help_match: # Help before hit since they will both match `help`. self.Help(user) elif double_match: self.Double(user, match=double_match) elif hit_match: self.Hit(user, match=hit_match) elif stand_match: self.Stand(user, match=stand_match) elif split_match: self.Split(user, match=split_match) self._PossiblyEndRound() # ============================================================================ # User commands. # ============================================================================ def Bet(self, user: user_pb2.User, match): with self._lock: if self._active_round: self._msg_fn(None, '%s: Round is currently active.' % user.display_name) return amount = self._core.bank.ParseAmount(user, match.groups()[0], self._msg_fn) bet = bet_pb2.Bet( user=user, amount=amount, resolver=self._core.name.lower(), direction=bet_pb2.Bet.FOR, target='hand-0') if not self._core.bets.PlaceBet(self, bet, self._msg_fn): return self._msg_fn(None, '%s joined the round.' % user.display_name) if not self._pending_start: self._pending_start = True self._msg_fn(None, 'Round starting soon, type "bet [amount]" to join.') self._scheduler.InSeconds(self.ROUND_DELAY, self.PlayRound) @HandFromMatch def Double(self, user: user_pb2.User, hand: Optional[Hand] = None, match=None): if not hand: return with self._lock: logging.info('Prior Bet: %s', hand.bet) hand.bet.amount *= 2 if not self._core.bets.PlaceBet(self, hand.bet, self._msg_fn): self._msg_fn(None, '%s: Not enough hypecoins to double.' % user.display_name) hand.bet.amount /= 2 return self.Hit(user, hand=hand) self.Stand(user, hand=hand) self._DisplayUser(user) def Help(self, user: user_pb2.User): lines = """HypeJack bears a strong resemblence to a popular casino game. Commands: * bet [amount]: signal intent to play in the round. * hit [hand_id]: request a card for hand_id. * stand [hand_id]: wait for dealer and compare hands. * split [hand_id]: split a hand of same value cards into two hands. * double [hand_id]: double your bet, take a single hit, and stand. """.split('\n') self._msg_fn(user, lines) @HandFromMatch def Hit(self, user: user_pb2.User, hand: Optional[Hand] = None, match=None): if not hand: return with self._lock: hand.cards.append(self._shoe.pop()) self._DisplayUser(user) @HandFromMatch def Stand(self, user: user_pb2.User, hand: Optional[Hand] = None, match=None): if not hand: return with self._lock: hand.stand = True self._DisplayUser(user) @HandFromMatch def Split(self, user: user_pb2.User, hand: Optional[Hand] = None, match=None): if not hand: return with self._lock: if (len(hand.cards) != 2 or _CARD_POINTS[hand.cards[0].value] != _CARD_POINTS[hand.cards[1].value]): self._msg_fn( None, '%s: Can only split 2 equal value cards.' % user.display_name) return new_bet = bet_pb2.Bet() new_bet.CopyFrom(hand.bet) new_bet.target = 'hand-%d' % len(self._peeps[user.user_id]) if not self._core.bets.PlaceBet(self, new_bet, self._msg_fn): self._msg_fn(None, '%s: Not enough hypecoins to split.' % user.display_name) return new_hand = Hand(new_bet, hand.cards.pop()) self._peeps[user.user_id].append(new_hand) self.Hit(user, hand=hand) self.Hit(user, hand=new_hand) self._DisplayUser(user) # ============================================================================ # Game logic. # ============================================================================ def PlayRound(self): """Plays one round of HypeJack with all active players. Should be called in a separate thread since it will sleep until the game timeout unless woken by all peeps completing their hands. """ with self._lock: if self._active_round: logging.error('HypeJack game already active.') return bets = self._core.bets.LookupBets( self.name, resolver=self._core.name.lower()) if not bets: logging.error('Attempted to start HypeJack with no players.') return self._pending_start = False # Shuffle the deck when it gets low. We assume a reasonable number of # cards needed per player, but with lots of splits / low cards we may # still run out of cards to play the hand. if len(self._shoe) < (len(self._peeps) + 1) * 7: self._ShuffleCards() # Deal cards to plebs. for user_id, user_bets in bets.items(): hand = Hand(user_bets[0], self._shoe.pop(), self._shoe.pop()) self._peeps[user_id] = [hand] self._DisplayUser(user_bets[0].user) # Deal cards to hypebot. self._dealer_hand = Hand(None, self._shoe.pop(), self._shoe.pop()) # self._dealer_hand = Hand(playing_cards_lib.Card('Hearts', 8), # playing_cards_lib.Card('Spades', 8)) self._msg_fn(None, 'Dealer: [%s, %s]' % (self._dealer_hand.cards[0], '🂠')) self._active_round = True # Short-circuit game play if the dealer has a hypejack or if all peeps # have hypejacks. if not self._dealer_hand.IsHypeJack() and any( [self._IsActive(user_id) for user_id in self._peeps.keys()]): # Force the round to end after some time if some peep ran away. Waiting # on a condition releases the lock while waiting, then reacquires it # automatically. Will shortcircuit if notified when all peeps have # finished their hands. self._game_ender.wait(timeout=self.MAX_ROUND_LENGTH) # Complete dealer hand. self._msg_fn(None, 'Dealer: %s' % self._dealer_hand) while self._dealer_hand.Score() < 17: self._dealer_hand.cards.append(self._shoe.pop()) self._msg_fn(None, 'Dealer: %s' % self._dealer_hand) self._core.bets.SettleBets(self, self._core.name.lower(), self._msg_fn) # Reset game state. self._peeps = {} self._active_round = False def _ShuffleCards(self): with self._lock: self._msg_fn(None, 'Shuffling cards.') self._shoe = [] for _ in range(_NUM_DECKS): self._shoe.extend(playing_cards_lib.BuildDeck()) random.shuffle(self._shoe) def _DisplayUser(self, user: user_pb2.User): with self._lock: if user in self._peeps and len(self._peeps[user.user_id]): hands = self._peeps[user.user_id] self._msg_fn( None, '%s: %s' % (user.display_name, ', '.join([ '%s:%s' % (i, unicode(hand)) for i, hand in enumerate(hands) ]))) def _IsActive(self, user_id: Text): """Check if user has any active hands.""" with self._lock: return (user_id in self._peeps and any([hand.IsActive() for hand in self._peeps[user_id]])) def _PossiblyEndRound(self): """End round if no users are active.""" with self._lock: if all([not self._IsActive(user_id) for user_id in self._peeps.keys()]): self._game_ender.notify()
apache-2.0
BiznetGIO/horizon
openstack_dashboard/test/api_tests/neutron_rest_tests.py
2
12748
# # (c) Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.http import request as django_request import mock from openstack_dashboard import api from openstack_dashboard.api import base from openstack_dashboard.api.rest import neutron from openstack_dashboard.test import helpers as test from openstack_dashboard.test.test_data import neutron_data from openstack_dashboard.test.test_data.utils import TestData TEST = TestData(neutron_data.data) class NeutronNetworksTestCase(test.TestCase): def setUp(self): super(NeutronNetworksTestCase, self).setUp() self._networks = [test.mock_factory(n) for n in TEST.api_networks.list()] @mock.patch.object(neutron.api, 'neutron') def test_get_list_for_tenant(self, client): request = self.mock_rest_request() networks = self._networks client.network_list_for_tenant.return_value = networks response = neutron.Networks().get(request) self.assertStatusCode(response, 200) self.assertItemsCollectionEqual(response, TEST.api_networks.list()) client.network_list_for_tenant.assert_called_once_with( request, request.user.tenant_id) @mock.patch.object(neutron.api, 'neutron') def test_create(self, client): self._test_create( '{"name": "mynetwork"}', {'name': 'mynetwork'} ) @mock.patch.object(neutron.api, 'neutron') def test_create_with_bogus_param(self, client): self._test_create( '{"name": "mynetwork","bilbo":"baggins"}', {'name': 'mynetwork'} ) @mock.patch.object(neutron.api, 'neutron') def _test_create(self, supplied_body, expected_call, client): request = self.mock_rest_request(body=supplied_body) client.network_create.return_value = self._networks[0] response = neutron.Networks().post(request) self.assertStatusCode(response, 201) self.assertEqual(response['location'], '/api/neutron/networks/' + str(TEST.api_networks.first().get("id"))) self.assertEqual(response.json, TEST.api_networks.first()) # # Services # @test.create_stubs({api.base: ('is_service_enabled',)}) @test.create_stubs({api.neutron: ('is_extension_supported',)}) @mock.patch.object(neutron.api, 'neutron') def test_services_get(self, client): params = django_request.QueryDict('network_id=the_network') request = self.mock_rest_request(GET=params) api.base.is_service_enabled(request, 'network').AndReturn(True) api.neutron.is_extension_supported(request, 'agent').AndReturn(True) client.agent_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': '1'}}), mock.Mock(**{'to_dict.return_value': {'id': '2'}}) ] self.mox.ReplayAll() response = neutron.Services().get(request) self.assertStatusCode(response, 200) client.agent_list.assert_called_once_with( request, network_id='the_network') self.assertEqual(response.content.decode('utf-8'), '{"items": [{"id": "1"}, {"id": "2"}]}') @test.create_stubs({api.base: ('is_service_enabled',)}) def test_services_get_disabled(self): request = self.mock_rest_request( GET={"network_id": self._networks[0].id}) api.base.is_service_enabled(request, 'network').AndReturn(False) self.mox.ReplayAll() response = neutron.Services().get(request) self.assertStatusCode(response, 501) class NeutronSubnetsTestCase(test.TestCase): def setUp(self): super(NeutronSubnetsTestCase, self).setUp() self._networks = [test.mock_factory(n) for n in TEST.api_networks.list()] self._subnets = [test.mock_factory(n) for n in TEST.api_subnets.list()] @mock.patch.object(neutron.api, 'neutron') def test_get(self, client): params = django_request.QueryDict('network_id=%s' % self._networks[0].id) request = self.mock_rest_request(GET=params) client.subnet_list.return_value = [self._subnets[0]] response = neutron.Subnets().get(request) self.assertStatusCode(response, 200) client.subnet_list.assert_called_once_with( request, network_id=TEST.api_networks.first().get("id")) @mock.patch.object(neutron.api, 'neutron') def test_create(self, client): request = self.mock_rest_request( body='{"network_id": "%s",' ' "ip_version": "4",' ' "cidr": "192.168.199.0/24"}' % self._networks[0].id) client.subnet_create.return_value = self._subnets[0] response = neutron.Subnets().post(request) self.assertStatusCode(response, 201) self.assertEqual(response['location'], '/api/neutron/subnets/' + str(TEST.api_subnets.first().get("id"))) self.assertEqual(response.json, TEST.api_subnets.first()) class NeutronPortsTestCase(test.TestCase): def setUp(self): super(NeutronPortsTestCase, self).setUp() self._networks = [test.mock_factory(n) for n in TEST.api_networks.list()] self._ports = [test.mock_factory(n) for n in TEST.api_ports.list()] @mock.patch.object(neutron.api, 'neutron') def test_get(self, client): params = django_request.QueryDict('network_id=%s' % self._networks[0].id) request = self.mock_rest_request(GET=params) client.port_list_with_trunk_types.return_value = [self._ports[0]] response = neutron.Ports().get(request) self.assertStatusCode(response, 200) client.port_list_with_trunk_types.assert_called_once_with( request, network_id=TEST.api_networks.first().get("id")) class NeutronTrunkTestCase(test.TestCase): @mock.patch.object(neutron.api, 'neutron') def test_trunk_delete(self, client): request = self.mock_rest_request() neutron.Trunk().delete(request, 1) client.trunk_delete.assert_called_once_with(request, 1) @mock.patch.object(neutron.api, 'neutron') def test_trunk_get(self, client): trunk_id = TEST.api_trunks.first().get("id") request = self.mock_rest_request(GET={"trunk_id": trunk_id}) client.trunk_show.return_value = self.trunks.first() response = neutron.Trunk().get(request, trunk_id=trunk_id) self.assertStatusCode(response, 200) client.trunk_show.assert_called_once_with( request, trunk_id) class NeutronTrunksTestCase(test.TestCase): @mock.patch.object(neutron.api, 'neutron') def test_trunks_get(self, client): request = self.mock_rest_request(GET=django_request.QueryDict()) client.trunk_list.return_value = self.trunks.list() response = neutron.Trunks().get(request) self.assertStatusCode(response, 200) self.assertItemsCollectionEqual( response, [t.to_dict() for t in self.trunks.list()]) class NeutronExtensionsTestCase(test.TestCase): def setUp(self): super(NeutronExtensionsTestCase, self).setUp() self._extensions = [n for n in TEST.api_extensions.list()] @mock.patch.object(neutron.api, 'neutron') def test_list_extensions(self, nc): request = self.mock_rest_request(**{'GET': {}}) nc.list_extensions.return_value = self._extensions response = neutron.Extensions().get(request) self.assertStatusCode(response, 200) self.assertItemsCollectionEqual(response, TEST.api_extensions.list()) nc.list_extensions.assert_called_once_with(request) class NeutronDefaultQuotasTestCase(test.TestCase): @test.create_stubs({base: ('is_service_enabled',)}) @mock.patch.object(neutron.api, 'neutron') def test_quotas_sets_defaults_get_when_service_is_enabled(self, client): filters = {'user': {'tenant_id': 'tenant'}} request = self.mock_rest_request(**{'GET': dict(filters)}) base.is_service_enabled(request, 'network').AndReturn(True) client.tenant_quota_get.return_value = [ base.Quota("network", 100), base.Quota("q2", 101)] self.mox.ReplayAll() response = neutron.DefaultQuotaSets().get(request) self.assertStatusCode(response, 200) self.assertItemsCollectionEqual(response, [ {'limit': 100, 'display_name': 'Networks', 'name': 'network'}, {'limit': 101, 'display_name': 'Q2', 'name': 'q2'}]) client.tenant_quota_get.assert_called_once_with( request, request.user.tenant_id) @test.create_stubs({neutron.api.base: ('is_service_enabled',)}) @mock.patch.object(neutron.api, 'neutron') def test_quota_sets_defaults_get_when_service_is_disabled(self, client): filters = {'user': {'tenant_id': 'tenant'}} request = self.mock_rest_request(**{'GET': dict(filters)}) base.is_service_enabled(request, 'network').AndReturn(False) self.mox.ReplayAll() response = neutron.DefaultQuotaSets().get(request) self.assertStatusCode(response, 501) self.assertEqual(response.content.decode('utf-8'), '"Service Neutron is disabled."') client.tenant_quota_get.assert_not_called() class NeutronQuotaSetsTestCase(test.TestCase): def setUp(self): super(NeutronQuotaSetsTestCase, self).setUp() quota_set = self.neutron_quotas.list()[0] self._quota_data = {} for quota in quota_set: self._quota_data[quota.name] = quota.limit @mock.patch.object(neutron, 'quotas') @mock.patch.object(neutron.api, 'neutron') @mock.patch.object(neutron.api, 'base') def test_quotas_sets_patch(self, bc, nc, qc): request = self.mock_rest_request(body=''' {"network": "5", "subnet": "5", "port": "50", "router": "5", "floatingip": "50", "security_group": "5", "security_group_rule": "50", "volumes": "5", "cores": "50"} ''') qc.get_disabled_quotas.return_value = [] qc.NEUTRON_QUOTA_FIELDS = {n for n in self._quota_data} bc.is_service_enabled.return_value = True nc.is_extension_supported.return_value = True response = neutron.QuotasSets().patch(request, 'spam123') self.assertStatusCode(response, 204) self.assertEqual(response.content.decode('utf-8'), '') nc.tenant_quota_update.assert_called_once_with( request, 'spam123', network='5', subnet='5', port='50', router='5', floatingip='50', security_group='5', security_group_rule='50') @mock.patch.object(neutron, 'quotas') @mock.patch.object(neutron.api, 'neutron') @mock.patch.object(neutron.api, 'base') def test_quotas_sets_patch_when_service_is_disabled(self, bc, nc, qc): request = self.mock_rest_request(body=''' {"network": "5", "subnet": "5", "port": "50", "router": "5", "floatingip": "50", "security_group": "5", "security_group_rule": "50", "volumes": "5", "cores": "50"} ''') qc.get_disabled_quotas.return_value = [] qc.NEUTRON_QUOTA_FIELDS = {n for n in self._quota_data} bc.is_service_enabled.return_value = False response = neutron.QuotasSets().patch(request, 'spam123') message = \ '"Service Neutron is disabled or quotas extension not available."' self.assertStatusCode(response, 501) self.assertEqual(response.content.decode('utf-8'), message) nc.tenant_quota_update.assert_not_called() def mock_obj_to_dict(r): return mock.Mock(**{'to_dict.return_value': r}) def mock_factory(r): """mocks all the attributes as well as the to_dict """ mocked = mock_obj_to_dict(r) mocked.configure_mock(**r) return mocked
apache-2.0
ray-project/ray
rllib/agents/dreamer/dreamer_torch_policy.py
1
8742
import logging import ray from ray.rllib.agents.dreamer.utils import FreezeParameters from ray.rllib.models.catalog import ModelCatalog from ray.rllib.policy.policy_template import build_policy_class from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.torch_ops import apply_grad_clipping torch, nn = try_import_torch() if torch: from torch import distributions as td logger = logging.getLogger(__name__) # This is the computation graph for workers (inner adaptation steps) def compute_dreamer_loss(obs, action, reward, model, imagine_horizon, discount=0.99, lambda_=0.95, kl_coeff=1.0, free_nats=3.0, log=False): """Constructs loss for the Dreamer objective Args: obs (TensorType): Observations (o_t) action (TensorType): Actions (a_(t-1)) reward (TensorType): Rewards (r_(t-1)) model (TorchModelV2): DreamerModel, encompassing all other models imagine_horizon (int): Imagine horizon for actor and critic loss discount (float): Discount lambda_ (float): Lambda, like in GAE kl_coeff (float): KL Coefficient for Divergence loss in model loss free_nats (float): Threshold for minimum divergence in model loss log (bool): If log, generate gifs """ encoder_weights = list(model.encoder.parameters()) decoder_weights = list(model.decoder.parameters()) reward_weights = list(model.reward.parameters()) dynamics_weights = list(model.dynamics.parameters()) critic_weights = list(model.value.parameters()) model_weights = list(encoder_weights + decoder_weights + reward_weights + dynamics_weights) device = (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")) # PlaNET Model Loss latent = model.encoder(obs) post, prior = model.dynamics.observe(latent, action) features = model.dynamics.get_feature(post) image_pred = model.decoder(features) reward_pred = model.reward(features) image_loss = -torch.mean(image_pred.log_prob(obs)) reward_loss = -torch.mean(reward_pred.log_prob(reward)) prior_dist = model.dynamics.get_dist(prior[0], prior[1]) post_dist = model.dynamics.get_dist(post[0], post[1]) div = torch.mean( torch.distributions.kl_divergence(post_dist, prior_dist).sum(dim=2)) div = torch.clamp(div, min=free_nats) model_loss = kl_coeff * div + reward_loss + image_loss # Actor Loss # [imagine_horizon, batch_length*batch_size, feature_size] with torch.no_grad(): actor_states = [v.detach() for v in post] with FreezeParameters(model_weights): imag_feat = model.imagine_ahead(actor_states, imagine_horizon) with FreezeParameters(model_weights + critic_weights): reward = model.reward(imag_feat).mean value = model.value(imag_feat).mean pcont = discount * torch.ones_like(reward) returns = lambda_return(reward[:-1], value[:-1], pcont[:-1], value[-1], lambda_) discount_shape = pcont[:1].size() discount = torch.cumprod( torch.cat([torch.ones(*discount_shape).to(device), pcont[:-2]], dim=0), dim=0) actor_loss = -torch.mean(discount * returns) # Critic Loss with torch.no_grad(): val_feat = imag_feat.detach()[:-1] target = returns.detach() val_discount = discount.detach() val_pred = model.value(val_feat) critic_loss = -torch.mean(val_discount * val_pred.log_prob(target)) # Logging purposes prior_ent = torch.mean(prior_dist.entropy()) post_ent = torch.mean(post_dist.entropy()) log_gif = None if log: log_gif = log_summary(obs, action, latent, image_pred, model) return_dict = { "model_loss": model_loss, "reward_loss": reward_loss, "image_loss": image_loss, "divergence": div, "actor_loss": actor_loss, "critic_loss": critic_loss, "prior_ent": prior_ent, "post_ent": post_ent, } if log_gif is not None: return_dict["log_gif"] = log_gif return return_dict # Similar to GAE-Lambda, calculate value targets def lambda_return(reward, value, pcont, bootstrap, lambda_): def agg_fn(x, y): return y[0] + y[1] * lambda_ * x next_values = torch.cat([value[1:], bootstrap[None]], dim=0) inputs = reward + pcont * next_values * (1 - lambda_) last = bootstrap returns = [] for i in reversed(range(len(inputs))): last = agg_fn(last, [inputs[i], pcont[i]]) returns.append(last) returns = list(reversed(returns)) returns = torch.stack(returns, dim=0) return returns # Creates gif def log_summary(obs, action, embed, image_pred, model): truth = obs[:6] + 0.5 recon = image_pred.mean[:6] init, _ = model.dynamics.observe(embed[:6, :5], action[:6, :5]) init = [itm[:, -1] for itm in init] prior = model.dynamics.imagine(action[:6, 5:], init) openl = model.decoder(model.dynamics.get_feature(prior)).mean mod = torch.cat([recon[:, :5] + 0.5, openl + 0.5], 1) error = (mod - truth + 1.0) / 2.0 return torch.cat([truth, mod, error], 3) def dreamer_loss(policy, model, dist_class, train_batch): log_gif = False if "log_gif" in train_batch: log_gif = True policy.stats_dict = compute_dreamer_loss( train_batch["obs"], train_batch["actions"], train_batch["rewards"], policy.model, policy.config["imagine_horizon"], policy.config["discount"], policy.config["lambda"], policy.config["kl_coeff"], policy.config["free_nats"], log_gif, ) loss_dict = policy.stats_dict return (loss_dict["model_loss"], loss_dict["actor_loss"], loss_dict["critic_loss"]) def build_dreamer_model(policy, obs_space, action_space, config): policy.model = ModelCatalog.get_model_v2( obs_space, action_space, 1, config["dreamer_model"], name="DreamerModel", framework="torch") policy.model_variables = policy.model.variables() return policy.model def action_sampler_fn(policy, model, input_dict, state, explore, timestep): """Action sampler function has two phases. During the prefill phase, actions are sampled uniformly [-1, 1]. During training phase, actions are evaluated through DreamerPolicy and an additive gaussian is added to incentivize exploration. """ obs = input_dict["obs"] # Custom Exploration if timestep <= policy.config["prefill_timesteps"]: logp = [0.0] # Random action in space [-1.0, 1.0] action = 2.0 * torch.rand(1, model.action_space.shape[0]) - 1.0 state = model.get_initial_state() else: # Weird RLLib Handling, this happens when env rests if len(state[0].size()) == 3: # Very hacky, but works on all envs state = model.get_initial_state() action, logp, state = model.policy(obs, state, explore) action = td.Normal(action, policy.config["explore_noise"]).sample() action = torch.clamp(action, min=-1.0, max=1.0) policy.global_timestep += policy.config["action_repeat"] return action, logp, state def dreamer_stats(policy, train_batch): return policy.stats_dict def dreamer_optimizer_fn(policy, config): model = policy.model encoder_weights = list(model.encoder.parameters()) decoder_weights = list(model.decoder.parameters()) reward_weights = list(model.reward.parameters()) dynamics_weights = list(model.dynamics.parameters()) actor_weights = list(model.actor.parameters()) critic_weights = list(model.value.parameters()) model_opt = torch.optim.Adam( encoder_weights + decoder_weights + reward_weights + dynamics_weights, lr=config["td_model_lr"]) actor_opt = torch.optim.Adam(actor_weights, lr=config["actor_lr"]) critic_opt = torch.optim.Adam(critic_weights, lr=config["critic_lr"]) return (model_opt, actor_opt, critic_opt) DreamerTorchPolicy = build_policy_class( name="DreamerTorchPolicy", framework="torch", get_default_config=lambda: ray.rllib.agents.dreamer.dreamer.DEFAULT_CONFIG, action_sampler_fn=action_sampler_fn, loss_fn=dreamer_loss, stats_fn=dreamer_stats, make_model=build_dreamer_model, optimizer_fn=dreamer_optimizer_fn, extra_grad_process_fn=apply_grad_clipping)
apache-2.0
n0m4dz/odoo
addons/mrp/wizard/mrp_workcenter_load.py
381
2222
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class mrp_workcenter_load(osv.osv_memory): _name = 'mrp.workcenter.load' _description = 'Work Center Load' _columns = { 'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True), 'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True), } def print_report(self, cr, uid, ids, context=None): """ To print the report of Work Center Load @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return : Report """ if context is None: context = {} datas = {'ids' : context.get('active_ids',[])} res = self.read(cr, uid, ids, ['time_unit','measure_unit']) res = res and res[0] or {} datas['form'] = res return { 'type' : 'ir.actions.report.xml', 'report_name':'mrp.workcenter.load', 'datas' : datas, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Slezhuk/ansible
lib/ansible/modules/web_infrastructure/ejabberd_user.py
48
7599
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ejabberd_user version_added: "1.5" author: "Peter Sprygada (@privateip)" short_description: Manages users for ejabberd servers requirements: - ejabberd with mod_admin_extra description: - This module provides user management for ejabberd servers options: username: description: - the name of the user to manage required: true host: description: - the ejabberd host associated with this username required: true password: description: - the password to assign to the username required: false logging: description: - enables or disables the local syslog facility for this module required: false default: false choices: [ 'true', 'false', 'yes', 'no' ] state: description: - describe the desired state of the user to be managed required: false default: 'present' choices: [ 'present', 'absent' ] notes: - Password parameter is required for state == present only - Passwords must be stored in clear text for this release - The ejabberd configuration file must include mod_admin_extra as a module. ''' EXAMPLES = ''' # Example playbook entries using the ejabberd_user module to manage users state. - name: create a user if it does not exists ejabberd_user: username: test host: server password: password - name: delete a user if it exists ejabberd_user: username: test host: server state: absent ''' import syslog from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.basic import * class EjabberdUserException(Exception): """ Base exception for EjabberdUser class object """ pass class EjabberdUser(object): """ This object represents a user resource for an ejabberd server. The object manages user creation and deletion using ejabberdctl. The following commands are currently supported: * ejabberdctl register * ejabberdctl deregister """ def __init__(self, module): self.module = module self.logging = module.params.get('logging') self.state = module.params.get('state') self.host = module.params.get('host') self.user = module.params.get('username') self.pwd = module.params.get('password') @property def changed(self): """ This method will check the current user and see if the password has changed. It will return True if the user does not match the supplied credentials and False if it does not """ try: options = [self.user, self.host, self.pwd] (rc, out, err) = self.run_command('check_password', options) except EjabberdUserException: e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return rc @property def exists(self): """ This method will check to see if the supplied username exists for host specified. If the user exists True is returned, otherwise False is returned """ try: options = [self.user, self.host] (rc, out, err) = self.run_command('check_account', options) except EjabberdUserException: e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return not bool(int(rc)) def log(self, entry): """ This method will log information to the local syslog facility """ if self.logging: syslog.openlog('ansible-%s' % self.module._name) syslog.syslog(syslog.LOG_NOTICE, entry) def run_command(self, cmd, options): """ This method will run the any command specified and return the returns using the Ansible common module """ if not all(options): raise EjabberdUserException cmd = 'ejabberdctl %s ' % cmd cmd += " ".join(options) self.log('command: %s' % cmd) return self.module.run_command(cmd.split()) def update(self): """ The update method will update the credentials for the user provided """ try: options = [self.user, self.host, self.pwd] (rc, out, err) = self.run_command('change_password', options) except EjabberdUserException: e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return (rc, out, err) def create(self): """ The create method will create a new user on the host with the password provided """ try: options = [self.user, self.host, self.pwd] (rc, out, err) = self.run_command('register', options) except EjabberdUserException: e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return (rc, out, err) def delete(self): """ The delete method will delete the user from the host """ try: options = [self.user, self.host] (rc, out, err) = self.run_command('unregister', options) except EjabberdUserException: e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return (rc, out, err) def main(): module = AnsibleModule( argument_spec = dict( host=dict(default=None, type='str'), username=dict(default=None, type='str'), password=dict(default=None, type='str', no_log=True), state=dict(default='present', choices=['present', 'absent']), logging=dict(default=False, type='bool') ), supports_check_mode = True ) obj = EjabberdUser(module) rc = None result = dict() if obj.state == 'absent': if obj.exists: if module.check_mode: module.exit_json(changed=True) (rc, out, err) = obj.delete() if rc != 0: module.fail_json(msg=err, rc=rc) elif obj.state == 'present': if not obj.exists: if module.check_mode: module.exit_json(changed=True) (rc, out, err) = obj.create() elif obj.changed: if module.check_mode: module.exit_json(changed=True) (rc, out, err) = obj.update() if rc is not None and rc != 0: module.fail_json(msg=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
BIT-SYS/gem5-spm-module
src/arch/x86/isa/insts/x87/control/initialize.py
91
2159
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' # FINIT # FNINIT '''
bsd-3-clause
MediffRobotics/DeepRobotics
DeepLearnMaterials/tutorials/tensorflowTUT/tf14_tensorboard/full_code.py
1
2025
# View more python learning tutorial on my Youtube and Youku channel!!! # Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg # Youku video tutorial: http://i.youku.com/pythontutorial """ Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly. """ from __future__ import print_function import tensorflow as tf def add_layer(inputs, in_size, out_size, activation_function=None): # add one more layer and return the output of this layer with tf.name_scope('layer'): with tf.name_scope('weights'): Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W') with tf.name_scope('biases'): biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b') with tf.name_scope('Wx_plus_b'): Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases) if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b, ) return outputs # define placeholder for inputs to network with tf.name_scope('inputs'): xs = tf.placeholder(tf.float32, [None, 1], name='x_input') ys = tf.placeholder(tf.float32, [None, 1], name='y_input') # add hidden layer l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) # add output layer prediction = add_layer(l1, 10, 1, activation_function=None) # the error between prediciton and real data with tf.name_scope('loss'): loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1])) with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) sess = tf.Session() writer = tf.train.SummaryWriter("logs/", sess.graph) # tf.initialize_all_variables() no long valid from # 2017-03-02 if using tensorflow >= 0.12 sess.run(tf.global_variables_initializer()) # direct to the local dir and run this in terminal: # $ tensorboard --logdir=logs
gpl-3.0
qnl/pyalazar
alazar/board_mock.py
1
7971
# Copyright (C) 2015 Chris Macklin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Mock of a board for testing purposes.""" import multiprocessing as mp from itertools import izip import numpy as np from board import (def_acq_params, AlazarException, is_9870, is_9360, _make_channel_mask) import params from process import _process_buffers, _reshape_buffer from processor import BufferProcessor class MockAlazar(object): """Mock version of an Alazar board.""" def __init__(self, board_type): """Initialize a new MockAlazar digitizer. Args: board_type: the numeric code for which board type to imitate. """ self.board_type = board_type self.systemID = 1 self.boardID = 1 # Cython needs a getter to access this, imitate the same API def get_board_type(self): return self.board_type def get_board_model(self): return params.board_types[self.board_type] def setup_capture_clock(self, clock_source, sample_rate, decimation=0, edge="rising"): """Set the capture clock for this alazar board. This mock function is just a placeholder, it always returns success and doesn't do any validation of the inputs. """ pass def setup_input_channels(self, input_range, channel="all", coupling="dc", impedance="50ohm", bw="open"): """Set the input parameters for a digitizer channel. This mock function is just a placeholder, it always returns success and doesn't do any validation of the inputs. """ pass def setup_one_trigger(self, source_channel="ext", slope="rising", level=0.2, ext_coupling="dc", ext_range="5 V", delay = 0): """Configure the Alazar trigger engine. This mock function is just a placeholder, it always returns success and doesn't do any validation of the inputs. """ pass def acquire(self, samples_per_record, records_per_acquisition, records_per_buffer, channels_to_acquire="all", processors = [BufferProcessor()], buffer_count = 64, timeout = 5000): """Perform an acquisition using two-port NPT DMA mode. This mock function operates on the processors like a real board. Each mock record is a rising sawtooth where each successive sample rises by one digitizer unit, wrapping back to zero. It does not do any validation of the inputs, and never raises an exception. It actually does pickle and unpickle the processors to mimic the behavior of the real processing function, to avoid possible confusion of mutating the input buffers. """ buffers_per_acquisition = records_per_acquisition / records_per_buffer if is_9870(self.board_type): bits_per_sample = 8 interleave = False elif is_9360(self.board_type): bits_per_sample = 12 interleave = True else: raise MockAlazarException("MockAlazar only can mimic the " "9870 and 9360; got board type of {}" .format(self.board_type)) (_, channel_count) = _make_channel_mask(self.board_type, channels_to_acquire) bytes_per_sample = (bits_per_sample + 7) / 8 if bytes_per_sample <= 1: sample_type = np.uint8 else: sample_type = np.uint16 acq_params = def_acq_params(samples_per_record, records_per_acquisition, records_per_buffer, channel_count, sample_type, bits_per_sample) # get a queue to send buffers to the buffer processor buf_queue = mp.Queue() # get a queue to receive messages back from the processors comm = mp.Queue() # start a buffer processor to do the acquisition: buf_processor = mp.Process(target = _process_buffers, args = (buf_queue, comm, processors, acq_params,)) buf_processor.start() try: buf = make_mock_buffer(records_per_buffer, samples_per_record, bits_per_sample, sample_type, channel_count) # handle each buffer for _ in xrange(buffers_per_acquisition): # pickles the buffer and sends to the worker buf_queue.put( (buf, None) ) except Exception as err: buf_queue.put((None, err)) raise # get the processors and return them return comm.get() def make_mock_buffer(records_per_buffer, record_len, bit_depth, dtype, chan_count): """Return a buffer of sawtooth records. Each record has the form [0, 1, 2, 3, ... 2**bit_depth - 1, 0, 1, ...]; however, for sample depths greater than 8 bits, this value is bit-shifted into the most significant bits to fully mimic the behavior of the Alazar boards. The records for channel A are rising sawtooths, while the records for channel B are falling sawtooths. Args: records_per_buffer: the number of records in the buffer, also the first value of the shape of the output array record_len: the number of samples in each records, also the second value of the shape of the output array bit_depth: the bit depth of the board to emulate dtype: the dtype of the resulting numpy array chan_count: the number of acquisition channels """ buffer_length = records_per_buffer * record_len * chan_count buff = np.empty(buffer_length, dtype=dtype) for chan in xrange(chan_count): for rec in xrange(records_per_buffer): record_vals = mock_record(record_len, reverse=(chan==1), bit_depth=bit_depth) record = np.fromiter(record_vals, dtype=dtype, count=record_len) rec_start = 2*rec*record_len + chan buff[rec_start:rec_start+2*record_len:2] = record if bit_depth > 8: return buff << (16 - bit_depth) else: return buff def mock_record(record_len, reverse, bit_depth): """Generate the values in a single measurement record. Args: record_len: The number of samples in the record. reverse (bool): If True, generate a falling sawtooth pattern. Otherwise, rising. bit_depth: The bit depth of the ADC. """ limit = 2**bit_depth the_iter = xrange(record_len) if reverse: the_iter = reversed(the_iter) for val in the_iter: yield val % limit # --- Exception and error handling for MockAlazar boards class MockAlazarException(Exception): pass
gpl-2.0
eemirtekin/edx-platform
lms/djangoapps/open_ended_grading/staff_grading_service.py
17
16274
""" This module provides views that proxy to the staff grading backend service. """ import json import logging from django.conf import settings from django.http import HttpResponse, Http404 from django.utils.translation import ugettext as _ from opaque_keys.edx.locations import SlashSeparatedCourseKey from xmodule.open_ended_grading_classes.grading_service_module import GradingService, GradingServiceError from courseware.access import has_access from edxmako.shortcuts import render_to_string from student.models import unique_id_for_user from open_ended_grading.utils import does_location_exist import dogstats_wrapper as dog_stats_api log = logging.getLogger(__name__) STAFF_ERROR_MESSAGE = _( u'Could not contact the external grading server. Please contact the ' u'development team at {email}.' ).format( email=u'<a href="mailto:{tech_support_email}>{tech_support_email}</a>'.format( tech_support_email=settings.TECH_SUPPORT_EMAIL ) ) MAX_ALLOWED_FEEDBACK_LENGTH = 5000 class MockStaffGradingService(object): """ A simple mockup of a staff grading service, testing. """ def __init__(self): self.cnt = 0 def get_next(self, course_id, location, grader_id): self.cnt += 1 return {'success': True, 'submission_id': self.cnt, 'submission': 'Test submission {cnt}'.format(cnt=self.cnt), 'num_graded': 3, 'min_for_ml': 5, 'num_pending': 4, 'prompt': 'This is a fake prompt', 'ml_error_info': 'ML info', 'max_score': 2 + self.cnt % 3, 'rubric': 'A rubric'} def get_problem_list(self, course_id, grader_id): self.cnt += 1 return { 'success': True, 'problem_list': [ json.dumps({ 'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10, }), json.dumps({ 'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10, }), ], } def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): return self.get_next(course_id, 'fake location', grader_id) class StaffGradingService(GradingService): """ Interface to staff grading backend. """ METRIC_NAME = 'edxapp.open_ended_grading.staff_grading_service' def __init__(self, config): config['render_template'] = render_to_string super(StaffGradingService, self).__init__(config) self.url = config['url'] + config['staff_grading'] self.login_url = self.url + '/login/' self.get_next_url = self.url + '/get_next_submission/' self.save_grade_url = self.url + '/save_grade/' self.get_problem_list_url = self.url + '/get_problem_list/' self.get_notifications_url = self.url + "/get_notifications/" def get_problem_list(self, course_id, grader_id): """ Get the list of problems for a given course. Args: course_id: course id that we want the problems of grader_id: who is grading this? The anonymous user_id of the grader. Returns: dict with the response from the service. (Deliberately not writing out the fields here--see the docs on the staff_grading view in the grading_controller repo) Raises: GradingServiceError: something went wrong with the connection. """ params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id} result = self.get(self.get_problem_list_url, params) tags = [u'course_id:{}'.format(course_id)] self._record_result('get_problem_list', result, tags) dog_stats_api.histogram( self._metric_name('get_problem_list.result.length'), len(result.get('problem_list', [])) ) return result def get_next(self, course_id, location, grader_id): """ Get the next thing to grade. Args: course_id: the course that this problem belongs to location: location of the problem that we are grading and would like the next submission for grader_id: who is grading this? The anonymous user_id of the grader. Returns: dict with the response from the service. (Deliberately not writing out the fields here--see the docs on the staff_grading view in the grading_controller repo) Raises: GradingServiceError: something went wrong with the connection. """ result = self._render_rubric( self.get( self.get_next_url, params={ 'location': location.to_deprecated_string(), 'grader_id': grader_id } ) ) tags = [u'course_id:{}'.format(course_id)] self._record_result('get_next', result, tags) return result def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores, submission_flagged): """ Save a score and feedback for a submission. Returns: dict with keys 'success': bool 'error': error msg, if something went wrong. Raises: GradingServiceError if there's a problem connecting. """ data = {'course_id': course_id.to_deprecated_string(), 'submission_id': submission_id, 'score': score, 'feedback': feedback, 'grader_id': grader_id, 'skipped': skipped, 'rubric_scores': rubric_scores, 'rubric_scores_complete': True, 'submission_flagged': submission_flagged} result = self._render_rubric(self.post(self.save_grade_url, data=data)) tags = [u'course_id:{}'.format(course_id)] self._record_result('save_grade', result, tags) return result def get_notifications(self, course_id): params = {'course_id': course_id.to_deprecated_string()} result = self.get(self.get_notifications_url, params) tags = [ u'course_id:{}'.format(course_id), u'staff_needs_to_grade:{}'.format(result.get('staff_needs_to_grade')) ] self._record_result('get_notifications', result, tags) return result # don't initialize until staff_grading_service() is called--means that just # importing this file doesn't create objects that may not have the right config _service = None def staff_grading_service(): """ Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True, returns a mock one, otherwise a real one. Caches the result, so changing the setting after the first call to this function will have no effect. """ global _service if _service is not None: return _service if settings.MOCK_STAFF_GRADING: _service = MockStaffGradingService() else: _service = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE) return _service def _err_response(msg): """ Return a HttpResponse with a json dump with success=False, and the given error message. """ return HttpResponse(json.dumps({'success': False, 'error': msg}), mimetype="application/json") def _check_access(user, course_id): """ Raise 404 if user doesn't have staff access to course_id """ if not has_access(user, 'staff', course_id): raise Http404 return def get_next(request, course_id): """ Get the next thing to grade for course_id and with the location specified in the request. Returns a json dict with the following keys: 'success': bool 'submission_id': a unique identifier for the submission, to be passed back with the grade. 'submission': the submission, rendered as read-only html for grading 'rubric': the rubric, also rendered as html. 'message': if there was no submission available, but nothing went wrong, there will be a message field. 'error': if success is False, will have an error message with more info. """ assert(isinstance(course_id, basestring)) course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) _check_access(request.user, course_key) required = set(['location']) if request.method != 'POST': raise Http404 actual = set(request.POST.keys()) missing = required - actual if len(missing) > 0: return _err_response('Missing required keys {0}'.format( ', '.join(missing))) grader_id = unique_id_for_user(request.user) p = request.POST location = course_key.make_usage_key_from_deprecated_string(p['location']) return HttpResponse(json.dumps(_get_next(course_key, grader_id, location)), mimetype="application/json") def get_problem_list(request, course_id): """ Get all the problems for the given course id Returns a json dict with the following keys: success: bool problem_list: a list containing json dicts with the following keys: each dict represents a different problem in the course location: the location of the problem problem_name: the name of the problem num_graded: the number of responses that have been graded num_pending: the number of responses that are sitting in the queue min_for_ml: the number of responses that need to be graded before the ml can be run 'error': if success is False, will have an error message with more info. """ assert(isinstance(course_id, basestring)) course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) _check_access(request.user, course_key) try: response = staff_grading_service().get_problem_list(course_key, unique_id_for_user(request.user)) # If 'problem_list' is in the response, then we got a list of problems from the ORA server. # If it is not, then ORA could not find any problems. if 'problem_list' in response: problem_list = response['problem_list'] else: problem_list = [] # Make an error messages to reflect that we could not find anything to grade. response['error'] = _( u'Cannot find any open response problems in this course. ' u'Have you submitted answers to any open response assessment questions? ' u'If not, please do so and return to this page.' ) valid_problem_list = [] for i in xrange(0, len(problem_list)): # Needed to ensure that the 'location' key can be accessed. try: problem_list[i] = json.loads(problem_list[i]) except Exception: pass if does_location_exist(course_key.make_usage_key_from_deprecated_string(problem_list[i]['location'])): valid_problem_list.append(problem_list[i]) response['problem_list'] = valid_problem_list response = json.dumps(response) return HttpResponse(response, mimetype="application/json") except GradingServiceError: #This is a dev_facing_error log.exception( "Error from staff grading service in open " "ended grading. server url: {0}".format(staff_grading_service().url) ) #This is a staff_facing_error return HttpResponse(json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE})) def _get_next(course_id, grader_id, location): """ Implementation of get_next (also called from save_grade) -- returns a json string """ try: return staff_grading_service().get_next(course_id, location, grader_id) except GradingServiceError: #This is a dev facing error log.exception( "Error from staff grading service in open " "ended grading. server url: {0}".format(staff_grading_service().url) ) #This is a staff_facing_error return json.dumps({'success': False, 'error': STAFF_ERROR_MESSAGE}) def save_grade(request, course_id): """ Save the grade and feedback for a submission, and, if all goes well, return the next thing to grade. Expects the following POST parameters: 'score': int 'feedback': string 'submission_id': int Returns the same thing as get_next, except that additional error messages are possible if something goes wrong with saving the grade. """ course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) _check_access(request.user, course_key) if request.method != 'POST': raise Http404 p = request.POST required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged']) skipped = 'skipped' in p #If the instructor has skipped grading the submission, then there will not be any rubric scores. #Only add in the rubric scores if the instructor has not skipped. if not skipped: required.add('rubric_scores[]') actual = set(p.keys()) missing = required - actual if len(missing) > 0: return _err_response('Missing required keys {0}'.format( ', '.join(missing))) success, message = check_feedback_length(p) if not success: return _err_response(message) grader_id = unique_id_for_user(request.user) location = course_key.make_usage_key_from_deprecated_string(p['location']) try: result = staff_grading_service().save_grade(course_key, grader_id, p['submission_id'], p['score'], p['feedback'], skipped, p.getlist('rubric_scores[]'), p['submission_flagged']) except GradingServiceError: #This is a dev_facing_error log.exception( "Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}".format( request, course_id)) #This is a staff_facing_error return _err_response(STAFF_ERROR_MESSAGE) except ValueError: #This is a dev_facing_error log.exception( "save_grade returned broken json in the staff grading interface in open ended grading: {0}".format( result_json)) #This is a staff_facing_error return _err_response(STAFF_ERROR_MESSAGE) if not result.get('success', False): #This is a dev_facing_error log.warning( 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json)) return _err_response(STAFF_ERROR_MESSAGE) # Ok, save_grade seemed to work. Get the next submission to grade. return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)), mimetype="application/json") def check_feedback_length(data): feedback = data.get("feedback") if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH: return False, "Feedback is too long, Max length is {0} characters.".format( MAX_ALLOWED_FEEDBACK_LENGTH ) else: return True, ""
agpl-3.0
buntyke/Flask
microblog/flask/lib/python2.7/site-packages/whoosh/codec/base.py
52
24009
# Copyright 2011 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. """ This module contains base classes/interfaces for "codec" objects. """ from bisect import bisect_right from whoosh import columns from whoosh.automata import lev from whoosh.compat import abstractmethod, izip, unichr, xrange from whoosh.filedb.compound import CompoundStorage from whoosh.system import emptybytes from whoosh.util import random_name # Exceptions class OutOfOrderError(Exception): pass # Base classes class Codec(object): length_stats = True # Per document value writer @abstractmethod def per_document_writer(self, storage, segment): raise NotImplementedError # Inverted index writer @abstractmethod def field_writer(self, storage, segment): raise NotImplementedError # Postings @abstractmethod def postings_writer(self, dbfile, byteids=False): raise NotImplementedError @abstractmethod def postings_reader(self, dbfile, terminfo, format_, term=None, scorer=None): raise NotImplementedError # Index readers def automata(self, storage, segment): return Automata() @abstractmethod def terms_reader(self, storage, segment): raise NotImplementedError @abstractmethod def per_document_reader(self, storage, segment): raise NotImplementedError # Segments and generations @abstractmethod def new_segment(self, storage, indexname): raise NotImplementedError class WrappingCodec(Codec): def __init__(self, child): self._child = child def per_document_writer(self, storage, segment): return self._child.per_document_writer(storage, segment) def field_writer(self, storage, segment): return self._child.field_writer(storage, segment) def postings_writer(self, dbfile, byteids=False): return self._child.postings_writer(dbfile, byteids=byteids) def postings_reader(self, dbfile, terminfo, format_, term=None, scorer=None): return self._child.postings_reader(dbfile, terminfo, format_, term=term, scorer=scorer) def automata(self, storage, segment): return self._child.automata(storage, segment) def terms_reader(self, storage, segment): return self._child.terms_reader(storage, segment) def per_document_reader(self, storage, segment): return self._child.per_document_reader(storage, segment) def new_segment(self, storage, indexname): return self._child.new_segment(storage, indexname) # Writer classes class PerDocumentWriter(object): @abstractmethod def start_doc(self, docnum): raise NotImplementedError @abstractmethod def add_field(self, fieldname, fieldobj, value, length): raise NotImplementedError @abstractmethod def add_column_value(self, fieldname, columnobj, value): raise NotImplementedError("Codec does not implement writing columns") @abstractmethod def add_vector_items(self, fieldname, fieldobj, items): raise NotImplementedError def add_vector_matcher(self, fieldname, fieldobj, vmatcher): def readitems(): while vmatcher.is_active(): text = vmatcher.id() weight = vmatcher.weight() valuestring = vmatcher.value() yield (text, weight, valuestring) vmatcher.next() self.add_vector_items(fieldname, fieldobj, readitems()) def finish_doc(self): pass def close(self): pass class FieldWriter(object): def add_postings(self, schema, lengths, items): # This method translates a generator of (fieldname, btext, docnum, w, v) # postings into calls to start_field(), start_term(), add(), # finish_term(), finish_field(), etc. start_field = self.start_field start_term = self.start_term add = self.add finish_term = self.finish_term finish_field = self.finish_field if lengths: dfl = lengths.doc_field_length else: dfl = lambda docnum, fieldname: 0 # The fieldname of the previous posting lastfn = None # The bytes text of the previous posting lasttext = None # The (fieldname, btext) of the previous spelling posting lastspell = None # The field object for the current field fieldobj = None for fieldname, btext, docnum, weight, value in items: # Check for out-of-order postings. This is convoluted because Python # 3 removed the ability to compare a string to None if lastfn is not None and fieldname < lastfn: raise OutOfOrderError("Field %r .. %r" % (lastfn, fieldname)) if fieldname == lastfn and lasttext and btext < lasttext: raise OutOfOrderError("Term %s:%r .. %s:%r" % (lastfn, lasttext, fieldname, btext)) # If the fieldname of this posting is different from the last one, # tell the writer we're starting a new field if fieldname != lastfn: if lasttext is not None: finish_term() if lastfn is not None and fieldname != lastfn: finish_field() fieldobj = schema[fieldname] start_field(fieldname, fieldobj) lastfn = fieldname lasttext = None # HACK: items where docnum == -1 indicate words that should be added # to the spelling graph, not the postings if docnum == -1: # spellterm = (fieldname, btext) # # There can be duplicates of spelling terms, so only add a spell # # term if it's greater than the last one # if lastspell is None or spellterm > lastspell: # spellword = fieldobj.from_bytes(btext) # self.add_spell_word(fieldname, spellword) # lastspell = spellterm continue # If this term is different from the term in the previous posting, # tell the writer to start a new term if btext != lasttext: if lasttext is not None: finish_term() start_term(btext) lasttext = btext # Add this posting length = dfl(docnum, fieldname) if value is None: value = emptybytes add(docnum, weight, value, length) if lasttext is not None: finish_term() if lastfn is not None: finish_field() @abstractmethod def start_field(self, fieldname, fieldobj): raise NotImplementedError @abstractmethod def start_term(self, text): raise NotImplementedError @abstractmethod def add(self, docnum, weight, vbytes, length): raise NotImplementedError def add_spell_word(self, fieldname, text): raise NotImplementedError @abstractmethod def finish_term(self): raise NotImplementedError def finish_field(self): pass def close(self): pass # Postings class PostingsWriter(object): @abstractmethod def start_postings(self, format_, terminfo): raise NotImplementedError @abstractmethod def add_posting(self, id_, weight, vbytes, length=None): raise NotImplementedError def finish_postings(self): pass @abstractmethod def written(self): """Returns True if this object has already written to disk. """ raise NotImplementedError # Reader classes class FieldCursor(object): def first(self): raise NotImplementedError def find(self, string): raise NotImplementedError def next(self): raise NotImplementedError def term(self): raise NotImplementedError class TermsReader(object): @abstractmethod def __contains__(self, term): raise NotImplementedError @abstractmethod def cursor(self, fieldname, fieldobj): raise NotImplementedError @abstractmethod def terms(self): raise NotImplementedError @abstractmethod def terms_from(self, fieldname, prefix): raise NotImplementedError @abstractmethod def items(self): raise NotImplementedError @abstractmethod def items_from(self, fieldname, prefix): raise NotImplementedError @abstractmethod def term_info(self, fieldname, text): raise NotImplementedError @abstractmethod def frequency(self, fieldname, text): return self.term_info(fieldname, text).weight() @abstractmethod def doc_frequency(self, fieldname, text): return self.term_info(fieldname, text).doc_frequency() @abstractmethod def matcher(self, fieldname, text, format_, scorer=None): raise NotImplementedError @abstractmethod def indexed_field_names(self): raise NotImplementedError def close(self): pass class Automata(object): @staticmethod def levenshtein_dfa(uterm, maxdist, prefix=0): return lev.levenshtein_automaton(uterm, maxdist, prefix).to_dfa() @staticmethod def find_matches(dfa, cur): unull = unichr(0) term = cur.text() if term is None: return match = dfa.next_valid_string(term) while match: cur.find(match) term = cur.text() if term is None: return if match == term: yield match term += unull match = dfa.next_valid_string(term) def terms_within(self, fieldcur, uterm, maxdist, prefix=0): dfa = self.levenshtein_dfa(uterm, maxdist, prefix) return self.find_matches(dfa, fieldcur) # Per-doc value reader class PerDocumentReader(object): def close(self): pass @abstractmethod def doc_count(self): raise NotImplementedError @abstractmethod def doc_count_all(self): raise NotImplementedError # Deletions @abstractmethod def has_deletions(self): raise NotImplementedError @abstractmethod def is_deleted(self, docnum): raise NotImplementedError @abstractmethod def deleted_docs(self): raise NotImplementedError def all_doc_ids(self): """ Returns an iterator of all (undeleted) document IDs in the reader. """ is_deleted = self.is_deleted return (docnum for docnum in xrange(self.doc_count_all()) if not is_deleted(docnum)) def iter_docs(self): for docnum in self.all_doc_ids(): yield docnum, self.stored_fields(docnum) # Columns def supports_columns(self): return False def has_column(self, fieldname): return False def list_columns(self): raise NotImplementedError # Don't need to override this if supports_columns() returns False def column_reader(self, fieldname, column): raise NotImplementedError # Bitmaps def field_docs(self, fieldname): return None # Lengths @abstractmethod def doc_field_length(self, docnum, fieldname, default=0): raise NotImplementedError @abstractmethod def field_length(self, fieldname): raise NotImplementedError @abstractmethod def min_field_length(self, fieldname): raise NotImplementedError @abstractmethod def max_field_length(self, fieldname): raise NotImplementedError # Vectors def has_vector(self, docnum, fieldname): return False # Don't need to override this if has_vector() always returns False def vector(self, docnum, fieldname, format_): raise NotImplementedError # Stored @abstractmethod def stored_fields(self, docnum): raise NotImplementedError def all_stored_fields(self): for docnum in self.all_doc_ids(): yield self.stored_fields(docnum) # Segment base class class Segment(object): """Do not instantiate this object directly. It is used by the Index object to hold information about a segment. A list of objects of this class are pickled as part of the TOC file. The TOC file stores a minimal amount of information -- mostly a list of Segment objects. Segments are the real reverse indexes. Having multiple segments allows quick incremental indexing: just create a new segment for the new documents, and have the index overlay the new segment over previous ones for purposes of reading/search. "Optimizing" the index combines the contents of existing segments into one (removing any deleted documents along the way). """ # Extension for compound segment files COMPOUND_EXT = ".seg" # self.indexname # self.segid def __init__(self, indexname): self.indexname = indexname self.segid = self._random_id() self.compound = False @classmethod def _random_id(cls, size=16): return random_name(size=size) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.segment_id()) def codec(self): raise NotImplementedError def index_name(self): return self.indexname def segment_id(self): if hasattr(self, "name"): # Old segment class return self.name else: return "%s_%s" % (self.index_name(), self.segid) def is_compound(self): if not hasattr(self, "compound"): return False return self.compound # File convenience methods def make_filename(self, ext): return "%s%s" % (self.segment_id(), ext) def list_files(self, storage): prefix = "%s." % self.segment_id() return [name for name in storage.list() if name.startswith(prefix)] def create_file(self, storage, ext, **kwargs): """Convenience method to create a new file in the given storage named with this segment's ID and the given extension. Any keyword arguments are passed to the storage's create_file method. """ fname = self.make_filename(ext) return storage.create_file(fname, **kwargs) def open_file(self, storage, ext, **kwargs): """Convenience method to open a file in the given storage named with this segment's ID and the given extension. Any keyword arguments are passed to the storage's open_file method. """ fname = self.make_filename(ext) return storage.open_file(fname, **kwargs) def create_compound_file(self, storage): segfiles = self.list_files(storage) assert not any(name.endswith(self.COMPOUND_EXT) for name in segfiles) cfile = self.create_file(storage, self.COMPOUND_EXT) CompoundStorage.assemble(cfile, storage, segfiles) for name in segfiles: storage.delete_file(name) self.compound = True def open_compound_file(self, storage): name = self.make_filename(self.COMPOUND_EXT) dbfile = storage.open_file(name) return CompoundStorage(dbfile, use_mmap=storage.supports_mmap) # Abstract methods @abstractmethod def doc_count_all(self): """ Returns the total number of documents, DELETED OR UNDELETED, in this segment. """ raise NotImplementedError def doc_count(self): """ Returns the number of (undeleted) documents in this segment. """ return self.doc_count_all() - self.deleted_count() def set_doc_count(self, doccount): raise NotImplementedError def has_deletions(self): """ Returns True if any documents in this segment are deleted. """ return self.deleted_count() > 0 @abstractmethod def deleted_count(self): """ Returns the total number of deleted documents in this segment. """ raise NotImplementedError @abstractmethod def deleted_docs(self): raise NotImplementedError @abstractmethod def delete_document(self, docnum, delete=True): """Deletes the given document number. The document is not actually removed from the index until it is optimized. :param docnum: The document number to delete. :param delete: If False, this undeletes a deleted document. """ raise NotImplementedError @abstractmethod def is_deleted(self, docnum): """ Returns True if the given document number is deleted. """ raise NotImplementedError def should_assemble(self): return True # Wrapping Segment class WrappingSegment(Segment): def __init__(self, child): self._child = child def codec(self): return self._child.codec() def index_name(self): return self._child.index_name() def segment_id(self): return self._child.segment_id() def is_compound(self): return self._child.is_compound() def should_assemble(self): return self._child.should_assemble() def make_filename(self, ext): return self._child.make_filename(ext) def list_files(self, storage): return self._child.list_files(storage) def create_file(self, storage, ext, **kwargs): return self._child.create_file(storage, ext, **kwargs) def open_file(self, storage, ext, **kwargs): return self._child.open_file(storage, ext, **kwargs) def create_compound_file(self, storage): return self._child.create_compound_file(storage) def open_compound_file(self, storage): return self._child.open_compound_file(storage) def delete_document(self, docnum, delete=True): return self._child.delete_document(docnum, delete=delete) def has_deletions(self): return self._child.has_deletions() def deleted_count(self): return self._child.deleted_count() def deleted_docs(self): return self._child.deleted_docs() def is_deleted(self, docnum): return self._child.is_deleted(docnum) def set_doc_count(self, doccount): self._child.set_doc_count(doccount) def doc_count(self): return self._child.doc_count() def doc_count_all(self): return self._child.doc_count_all() # Multi per doc reader class MultiPerDocumentReader(PerDocumentReader): def __init__(self, readers, offset=0): self._readers = readers self._doc_offsets = [] self._doccount = 0 for pdr in readers: self._doc_offsets.append(self._doccount) self._doccount += pdr.doc_count_all() self.is_closed = False def close(self): for r in self._readers: r.close() self.is_closed = True def doc_count_all(self): return self._doccount def doc_count(self): total = 0 for r in self._readers: total += r.doc_count() return total def _document_reader(self, docnum): return max(0, bisect_right(self._doc_offsets, docnum) - 1) def _reader_and_docnum(self, docnum): rnum = self._document_reader(docnum) offset = self._doc_offsets[rnum] return rnum, docnum - offset # Deletions def has_deletions(self): return any(r.has_deletions() for r in self._readers) def is_deleted(self, docnum): x, y = self._reader_and_docnum(docnum) return self._readers[x].is_deleted(y) def deleted_docs(self): for r, offset in izip(self._readers, self._doc_offsets): for docnum in r.deleted_docs(): yield docnum + offset def all_doc_ids(self): for r, offset in izip(self._readers, self._doc_offsets): for docnum in r.all_doc_ids(): yield docnum + offset # Columns def has_column(self, fieldname): return any(r.has_column(fieldname) for r in self._readers) def column_reader(self, fieldname, column): if not self.has_column(fieldname): raise ValueError("No column %r" % (fieldname,)) default = column.default_value() colreaders = [] for r in self._readers: if r.has_column(fieldname): cr = r.column_reader(fieldname, column) else: cr = columns.EmptyColumnReader(default, r.doc_count_all()) colreaders.append(cr) if len(colreaders) == 1: return colreaders[0] else: return columns.MultiColumnReader(colreaders) # Lengths def doc_field_length(self, docnum, fieldname, default=0): x, y = self._reader_and_docnum(docnum) return self._readers[x].doc_field_length(y, fieldname, default) def field_length(self, fieldname): total = 0 for r in self._readers: total += r.field_length(fieldname) return total def min_field_length(self): return min(r.min_field_length() for r in self._readers) def max_field_length(self): return max(r.max_field_length() for r in self._readers) # Extended base classes class PerDocWriterWithColumns(PerDocumentWriter): def __init__(self): PerDocumentWriter.__init__(self) # Implementations need to set these attributes self._storage = None self._segment = None self._docnum = None @abstractmethod def _has_column(self, fieldname): raise NotImplementedError @abstractmethod def _create_column(self, fieldname, column): raise NotImplementedError @abstractmethod def _get_column(self, fieldname): raise NotImplementedError def add_column_value(self, fieldname, column, value): if not self._has_column(fieldname): self._create_column(fieldname, column) self._get_column(fieldname).add(self._docnum, value) # FieldCursor implementations class EmptyCursor(FieldCursor): def first(self): return None def find(self, term): return None def next(self): return None def text(self): return None def term_info(self): return None def is_valid(self): return False
mit
lucarebuffi/wofrysrw
wofrysrw/beamline/optical_elements/mirrors/srw_spherical_mirror.py
1
3978
from wofrysrw.beamline.optical_elements.mirrors.srw_mirror import SRWMirror, Orientation, SimulationMethod, TreatInputOutput from syned.beamline.shape import Sphere from oasys_srw.srwlib import SRWLOptMirSph class SRWSphericalMirror(SRWMirror): def __init__(self, name = "Undefined", optical_element_displacement = None, tangential_size = 1.2, sagittal_size = 0.01, grazing_angle = 0.003, orientation_of_reflection_plane = Orientation.UP, invert_tangent_component = False, radius = 1, height_profile_data_file = "mirror.dat", height_profile_data_file_dimension = 1, height_amplification_coefficient = 1.0): super().__init__(name=name, optical_element_displacement=optical_element_displacement, tangential_size=tangential_size, sagittal_size=sagittal_size, grazing_angle=grazing_angle, orientation_of_reflection_plane=orientation_of_reflection_plane, invert_tangent_component=invert_tangent_component, height_profile_data_file=height_profile_data_file, height_profile_data_file_dimension=height_profile_data_file_dimension, height_amplification_coefficient=height_amplification_coefficient) self.radius = radius def get_shape(self): return Sphere() def get_SRWLOptMir(self, nvx, nvy, nvz, tvx, tvy, x, y, ap_shape): return SRWLOptMirSph(_size_tang=self.tangential_size, _size_sag=self.sagittal_size, _r=self.radius, _ap_shape=ap_shape, _sim_meth=SimulationMethod.THICK, _treat_in_out=TreatInputOutput.WAVEFRONT_INPUT_CENTER_OUTPUT_CENTER, _nvx=nvx, _nvy=nvy, _nvz=nvz, _tvx=tvx, _tvy=tvy, _x=x, _y=y) def fromSRWLOpt(self, srwlopt=SRWLOptMirSph()): if not isinstance(srwlopt, SRWLOptMirSph): raise ValueError("SRW object is not a SRWLOptMirEl object") super().fromSRWLOpt(srwlopt) self.radius = srwlopt.rad def to_python_code_aux(self, nvx, nvy, nvz, tvx, tvy, x, y, ap_shape): text_code = "SRWLOptMirSph(_size_tang=" + str(self.tangential_size) +"," + "\n" text_code += " _size_sag=" + str(self.sagittal_size) +"," + "\n" text_code += " _r=" + str(self.radius) +"," + "\n" text_code += " _ap_shape='" + str(ap_shape) +"'," + "\n" text_code += " _sim_meth=" + str(SimulationMethod.THICK) +"," + "\n" text_code += " _treat_in_out=" + str(TreatInputOutput.WAVEFRONT_INPUT_CENTER_OUTPUT_CENTER) +"," + "\n" text_code += " _nvx=" + str(nvx) +"," + "\n" text_code += " _nvy=" + str(nvy) +"," + "\n" text_code += " _nvz=" + str(nvz) +"," + "\n" text_code += " _tvx=" + str(tvx) +"," + "\n" text_code += " _tvy=" + str(tvy) +"," + "\n" text_code += " _x=" + str(x) +"," + "\n" text_code += " _y=" + str(y) +")" + "\n" return text_code
mit
dsullivan7/scikit-learn
sklearn/qda.py
21
7639
""" Quadratic Discriminant Analysis """ # Author: Matthieu Perrot <matthieu.perrot@gmail.com> # # License: BSD 3 clause import warnings import numpy as np from .base import BaseEstimator, ClassifierMixin from .externals.six.moves import xrange from .utils import check_array, check_X_y from .utils.validation import check_is_fitted from .utils.fixes import bincount __all__ = ['QDA'] class QDA(BaseEstimator, ClassifierMixin): """ Quadratic Discriminant Analysis (QDA) A classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class. Parameters ---------- priors : array, optional, shape = [n_classes] Priors on classes reg_param : float, optional Regularizes the covariance estimate as ``(1-reg_param)*Sigma + reg_param*np.eye(n_features)`` Attributes ---------- covariances_ : list of array-like, shape = [n_features, n_features] Covariance matrices of each class. means_ : array-like, shape = [n_classes, n_features] Class means. priors_ : array-like, shape = [n_classes] Class priors (sum to 1). rotations_ : list of arrays For each class k an array of shape [n_features, n_k], with ``n_k = min(n_features, number of elements in class k)`` It is the rotation of the Gaussian distribution, i.e. its principal axis. scalings_ : list of arrays For each class k an array of shape [n_k]. It contains the scaling of the Gaussian distributions along its principal axes, i.e. the variance in the rotated coordinate system. Examples -------- >>> from sklearn.qda import QDA >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = QDA() >>> clf.fit(X, y) QDA(priors=None, reg_param=0.0) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.lda.LDA: Linear discriminant analysis """ def __init__(self, priors=None, reg_param=0.): self.priors = np.asarray(priors) if priors is not None else None self.reg_param = reg_param def fit(self, X, y, store_covariances=False, tol=1.0e-4): """ Fit the QDA model according to the given training data and parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target values (integers) store_covariances : boolean If True the covariance matrices are computed and stored in the `self.covariances_` attribute. tol : float, optional, default 1.0e-4 Threshold used for rank estimation. """ X, y = check_X_y(X, y) self.classes_, y = np.unique(y, return_inverse=True) n_samples, n_features = X.shape n_classes = len(self.classes_) if n_classes < 2: raise ValueError('y has less than 2 classes') if self.priors is None: self.priors_ = bincount(y) / float(n_samples) else: self.priors_ = self.priors cov = None if store_covariances: cov = [] means = [] scalings = [] rotations = [] for ind in xrange(n_classes): Xg = X[y == ind, :] meang = Xg.mean(0) means.append(meang) if len(Xg) == 1: raise ValueError('y has only 1 sample in class %s, covariance ' 'is ill defined.' % str(self.classes_[ind])) Xgc = Xg - meang # Xgc = U * S * V.T U, S, Vt = np.linalg.svd(Xgc, full_matrices=False) rank = np.sum(S > tol) if rank < n_features: warnings.warn("Variables are collinear") S2 = (S ** 2) / (len(Xg) - 1) S2 = ((1 - self.reg_param) * S2) + self.reg_param if store_covariances: # cov = V * (S^2 / (n-1)) * V.T cov.append(np.dot(S2 * Vt.T, Vt)) scalings.append(S2) rotations.append(Vt.T) if store_covariances: self.covariances_ = cov self.means_ = np.asarray(means) self.scalings_ = scalings self.rotations_ = rotations return self def _decision_function(self, X): check_is_fitted(self, 'classes_') X = check_array(X) norm2 = [] for i in range(len(self.classes_)): R = self.rotations_[i] S = self.scalings_[i] Xm = X - self.means_[i] X2 = np.dot(Xm, R * (S ** (-0.5))) norm2.append(np.sum(X2 ** 2, 1)) norm2 = np.array(norm2).T # shape = [len(X), n_classes] u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) return (-0.5 * (norm2 + u) + np.log(self.priors_)) def decision_function(self, X): """Apply decision function to an array of samples. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples (test vectors). Returns ------- C : array, shape = [n_samples, n_classes] or [n_samples,] Decision function values related to each class, per sample. In the two-class case, the shape is [n_samples,], giving the log likelihood ratio of the positive class. """ dec_func = self._decision_function(X) # handle special case of two classes if len(self.classes_) == 2: return dec_func[:, 1] - dec_func[:, 0] return dec_func def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] """ d = self._decision_function(X) y_pred = self.classes_.take(d.argmax(1)) return y_pred def predict_proba(self, X): """Return posterior probabilities of classification. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples/test vectors. Returns ------- C : array, shape = [n_samples, n_classes] Posterior probabilities of classification per class. """ values = self._decision_function(X) # compute the likelihood of the underlying gaussian models # up to a multiplicative constant. likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis]) # compute posterior probabilities return likelihood / likelihood.sum(axis=1)[:, np.newaxis] def predict_log_proba(self, X): """Return posterior probabilities of classification. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples/test vectors. Returns ------- C : array, shape = [n_samples, n_classes] Posterior log-probabilities of classification per class. """ # XXX : can do better to avoid precision overflows probas_ = self.predict_proba(X) return np.log(probas_)
bsd-3-clause
adamjmcgrath/glancydesign
django/contrib/auth/management/commands/changepassword.py
320
1527
from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User import getpass class Command(BaseCommand): help = "Change a user's password for django.contrib.auth." requires_model_validation = False def _get_pass(self, prompt="Password: "): p = getpass.getpass(prompt=prompt) if not p: raise CommandError("aborted") return p def handle(self, *args, **options): if len(args) > 1: raise CommandError("need exactly one or zero arguments for username") if args: username, = args else: username = getpass.getuser() try: u = User.objects.get(username=username) except User.DoesNotExist: raise CommandError("user '%s' does not exist" % username) print "Changing password for user '%s'" % u.username MAX_TRIES = 3 count = 0 p1, p2 = 1, 2 # To make them initially mismatch. while p1 != p2 and count < MAX_TRIES: p1 = self._get_pass() p2 = self._get_pass("Password (again): ") if p1 != p2: print "Passwords do not match. Please try again." count = count + 1 if count == MAX_TRIES: raise CommandError("Aborting password change for user '%s' after %s attempts" % (username, count)) u.set_password(p1) u.save() return "Password changed successfully for user '%s'" % u.username
bsd-3-clause
anushbmx/kitsune
kitsune/kbadge/models.py
1
13303
# Pruned and mnodified version of django-badger/badger/models.py # https://github.com/mozilla/django-badger/blob/master/badger/models.py import re from django.conf import settings from django.db import models from django.db.models import Q from django.contrib.auth.models import User from django.core.urlresolvers import reverse from kitsune.kbadge.signals import badge_will_be_awarded, badge_was_awarded IMG_MAX_SIZE = getattr(settings, "BADGER_IMG_MAX_SIZE", (256, 256)) MK_UPLOAD_TMPL = '%(base)s/%(h1)s/%(h2)s/%(hash)s_%(field_fn)s_%(now)s_%(rand)04d.%(ext)s' DEFAULT_HTTP_PROTOCOL = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http") def _document_django_model(cls): """Adds meta fields to the docstring for better autodoccing""" fields = cls._meta.fields doc = cls.__doc__ if not doc.endswith('\n\n'): doc = doc + '\n\n' for f in fields: doc = doc + ' :arg {0}:\n'.format(f.name) cls.__doc__ = doc return cls # Taken from http://stackoverflow.com/a/4019144 def slugify(txt): """A custom version of slugify that retains non-ascii characters. The purpose of this function in the application is to make URLs more readable in a browser, so there are some added heuristics to retain as much of the title meaning as possible while excluding characters that are troublesome to read in URLs. For example, question marks will be seen in the browser URL as %3F and are thereful unreadable. Although non-ascii characters will also be hex-encoded in the raw URL, most browsers will display them as human-readable glyphs in the address bar -- those should be kept in the slug.""" # remove trailing whitespace txt = txt.strip() # remove spaces before and after dashes txt = re.sub('\s*-\s*', '-', txt, re.UNICODE) # replace remaining spaces with dashes txt = re.sub('[\s/]', '-', txt, re.UNICODE) # replace colons between numbers with dashes txt = re.sub('(\d):(\d)', r'\1-\2', txt, re.UNICODE) # replace double quotes with single quotes txt = re.sub('"', "'", txt, re.UNICODE) # remove some characters altogether txt = re.sub(r'[?,:!@#~`+=$%^&\\*()\[\]{}<>]', '', txt, re.UNICODE) return txt def get_permissions_for(self, user): """Mixin method to collect permissions for a model instance""" pre = 'allows_' pre_len = len(pre) methods = (m for m in dir(self) if m.startswith(pre)) perms = dict( (m[pre_len:], getattr(self, m)(user)) for m in methods ) return perms class SearchManagerMixin(object): """Quick & dirty manager mixin for search""" # See: http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/ def _normalize_query(self, query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub): """ Splits the query string in invidual keywords, getting rid of unecessary spaces and grouping quoted words together. Example:: foo._normalize_query(' some random words "with quotes " and spaces') ['some', 'random', 'words', 'with quotes', 'and', 'spaces'] """ return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)] # See: http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/ def _get_query(self, query_string, search_fields): """ Returns a query, that is a combination of Q objects. That combination aims to search keywords within a model by testing the given search fields. """ query = None # Query to search for every search term terms = self._normalize_query(query_string) for term in terms: or_query = None # Query to search for a given term in each field for field_name in search_fields: q = Q(**{"%s__icontains" % field_name: term}) if or_query is None: or_query = q else: or_query = or_query | q if query is None: query = or_query else: query = query & or_query return query def search(self, query_string, sort='title'): """Quick and dirty keyword search on submissions""" # TODO: Someday, replace this with something like Sphinx or another real # search engine strip_qs = query_string.strip() if not strip_qs: return self.all_sorted(sort).order_by('-modified') else: query = self._get_query(strip_qs, self.search_fields) return self.all_sorted(sort).filter(query).order_by('-modified') def all_sorted(self, sort=None): """Apply to .all() one of the sort orders supported for views""" queryset = self.all() if sort == 'title': return queryset.order_by('title') else: return queryset.order_by('-created') class BadgeException(Exception): """General Badger model exception""" class BadgeException(BadgeException): """Badge model exception""" class BadgeAwardNotAllowedException(BadgeException): """Attempt to award a badge not allowed.""" class BadgeAlreadyAwardedException(BadgeException): """Attempt to award a unique badge twice.""" class BadgeDeferredAwardManagementNotAllowedException(BadgeException): """Attempt to manage deferred awards not allowed.""" class BadgeManager(models.Manager, SearchManagerMixin): """Manager for Badge model objects""" search_fields = ('title', 'slug', 'description', ) def allows_add_by(self, user): if user.is_anonymous(): return False if getattr(settings, "BADGER_ALLOW_ADD_BY_ANYONE", False): return True if user.has_perm('badger.add_badge'): return True return False def allows_grant_by(self, user): if user.is_anonymous(): return False if user.has_perm('badger.grant_deferredaward'): return True return False @_document_django_model class Badge(models.Model): """Representation of a badge""" objects = BadgeManager() title = models.CharField(max_length=255, blank=False, unique=True, help_text=u'Short, descriptive title') slug = models.SlugField(blank=False, unique=True, help_text=u'Very short name, for use in URLs and links') description = models.TextField(blank=True, help_text=u'Longer description of the badge and its criteria') image = models.ImageField(blank=True, null=True, upload_to=settings.BADGE_IMAGE_PATH, help_text=u'Must be square. Recommended 256x256.') # TODO: Rename? Eventually we'll want a globally-unique badge. That is, one # unique award for one person for the whole site. unique = models.BooleanField(default=True, help_text=('Should awards of this badge be limited to ' 'one-per-person?')) creator = models.ForeignKey(User, blank=True, null=True) created = models.DateTimeField(auto_now_add=True, blank=False) modified = models.DateTimeField(auto_now=True, blank=False) class Meta: db_table = 'badger_badge' unique_together = ('title', 'slug') ordering = ['-modified', '-created'] get_permissions_for = get_permissions_for def __unicode__(self): return self.title def get_absolute_url(self): return reverse('kbadge.badge_detail', args=(self.slug,)) def get_upload_meta(self): return ("badge", self.slug) def save(self, **kwargs): """Save the submission, updating slug and screenshot thumbnails""" if not self.slug: self.slug = slugify(self.title) super(Badge, self).save(**kwargs) def delete(self, **kwargs): """Make sure deletes cascade to awards""" self.award_set.all().delete() super(Badge, self).delete(**kwargs) def allows_detail_by(self, user): # TODO: Need some logic here, someday. return True def allows_edit_by(self, user): if user.is_anonymous(): return False if user.has_perm('badger.change_badge'): return True if user == self.creator: return True return False def allows_delete_by(self, user): if user.is_anonymous(): return False if user.has_perm('badger.change_badge'): return True if user == self.creator: return True return False def allows_award_to(self, user): """Is award_to() allowed for this user?""" if user is None: return True if user.is_anonymous(): return False if user.is_staff or user.is_superuser: return True if user == self.creator: return True # TODO: List of delegates for whom awarding is allowed return False def award_to(self, awardee=None, email=None, awarder=None, description='', raise_already_awarded=False): """Award this badge to the awardee on the awarder's behalf""" # If no awarder given, assume this is on the badge creator's behalf. if not awarder: awarder = self.creator if not self.allows_award_to(awarder): raise BadgeAwardNotAllowedException() # If we have an email, but no awardee, try looking up the user. if email and not awardee: qs = User.objects.filter(email=email) if qs: awardee = qs.latest('date_joined') if self.unique and self.is_awarded_to(awardee): if raise_already_awarded: raise BadgeAlreadyAwardedException() else: return Award.objects.filter(user=awardee, badge=self)[0] return Award.objects.create(user=awardee, badge=self, creator=awarder, description=description) def is_awarded_to(self, user): """Has this badge been awarded to the user?""" return Award.objects.filter(user=user, badge=self).count() > 0 class AwardManager(models.Manager): def get_query_set(self): return super(AwardManager, self).get_query_set().exclude(hidden=True) @_document_django_model class Award(models.Model): """Representation of a badge awarded to a user""" admin_objects = models.Manager() objects = AwardManager() description = models.TextField(blank=True, help_text='Explanation and evidence for the badge award') badge = models.ForeignKey(Badge) image = models.ImageField(blank=True, null=True, upload_to=settings.BADGE_IMAGE_PATH) user = models.ForeignKey(User, related_name="award_user") creator = models.ForeignKey(User, related_name="award_creator", blank=True, null=True) hidden = models.BooleanField(default=False) created = models.DateTimeField(auto_now_add=True, blank=False) modified = models.DateTimeField(auto_now=True, blank=False) get_permissions_for = get_permissions_for class Meta: db_table = 'badger_award' ordering = ['-modified', '-created'] def __unicode__(self): by = self.creator and (u' by %s' % self.creator) or u'' return u'Award of %s to %s%s' % (self.badge, self.user, by) @models.permalink def get_absolute_url(self): return ('kbadge.award_detail', (self.badge.slug, self.pk)) def get_upload_meta(self): u = self.user.username return ("award/%s/%s/%s" % (u[0], u[1], u), self.badge.slug) def allows_detail_by(self, user): # TODO: Need some logic here, someday. return True def allows_delete_by(self, user): if user.is_anonymous(): return False if user == self.user: return True if user == self.creator: return True if user.has_perm('badger.change_award'): return True return False def save(self, *args, **kwargs): # Signals and some bits of logic only happen on a new award. is_new = not self.pk if is_new: # Bail if this is an attempt to double-award a unique badge if self.badge.unique and self.badge.is_awarded_to(self.user): raise BadgeAlreadyAwardedException() # Only fire will-be-awarded signal on a new award. badge_will_be_awarded.send(sender=self.__class__, award=self) super(Award, self).save(*args, **kwargs) if is_new: # Only fire was-awarded signal on a new award. # TODO: we might not need this as there are no more notifications badge_was_awarded.send(sender=self.__class__, award=self) def delete(self): super(Award, self).delete()
bsd-3-clause
nitin-cherian/LifeLongLearning
Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/tornado/stack_context.py
26
13160
#!/usr/bin/env python # # Copyright 2010 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """`StackContext` allows applications to maintain threadlocal-like state that follows execution as it moves to other execution contexts. The motivating examples are to eliminate the need for explicit ``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to allow some additional context to be kept for logging. This is slightly magic, but it's an extension of the idea that an exception handler is a kind of stack-local state and when that stack is suspended and resumed in a new context that state needs to be preserved. `StackContext` shifts the burden of restoring that state from each call site (e.g. wrapping each `.AsyncHTTPClient` callback in ``async_callback``) to the mechanisms that transfer control from one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, thread pools, etc). Example usage:: @contextlib.contextmanager def die_on_error(): try: yield except Exception: logging.error("exception in asynchronous operation",exc_info=True) sys.exit(1) with StackContext(die_on_error): # Any exception thrown here *or in callback and its descendants* # will cause the process to exit instead of spinning endlessly # in the ioloop. http_client.fetch(url, callback) ioloop.start() Most applications shouldn't have to work with `StackContext` directly. Here are a few rules of thumb for when it's necessary: * If you're writing an asynchronous library that doesn't rely on a stack_context-aware library like `tornado.ioloop` or `tornado.iostream` (for example, if you're writing a thread pool), use `.stack_context.wrap()` before any asynchronous operations to capture the stack context from where the operation was started. * If you're writing an asynchronous library that has some shared resources (such as a connection pool), create those shared resources within a ``with stack_context.NullContext():`` block. This will prevent ``StackContexts`` from leaking from one request to another. * If you want to write something like an exception handler that will persist across asynchronous calls, create a new `StackContext` (or `ExceptionStackContext`), and make your asynchronous calls in a ``with`` block that references your `StackContext`. """ from __future__ import absolute_import, division, print_function import sys import threading from tornado.util import raise_exc_info class StackContextInconsistentError(Exception): pass class _State(threading.local): def __init__(self): self.contexts = (tuple(), None) _state = _State() class StackContext(object): """Establishes the given context as a StackContext that will be transferred. Note that the parameter is a callable that returns a context manager, not the context itself. That is, where for a non-transferable context manager you would say:: with my_context(): StackContext takes the function itself rather than its result:: with StackContext(my_context): The result of ``with StackContext() as cb:`` is a deactivation callback. Run this callback when the StackContext is no longer needed to ensure that it is not propagated any further (note that deactivating a context does not affect any instances of that context that are currently pending). This is an advanced feature and not necessary in most applications. """ def __init__(self, context_factory): self.context_factory = context_factory self.contexts = [] self.active = True def _deactivate(self): self.active = False # StackContext protocol def enter(self): context = self.context_factory() self.contexts.append(context) context.__enter__() def exit(self, type, value, traceback): context = self.contexts.pop() context.__exit__(type, value, traceback) # Note that some of this code is duplicated in ExceptionStackContext # below. ExceptionStackContext is more common and doesn't need # the full generality of this class. def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0] + (self,), self) _state.contexts = self.new_contexts try: self.enter() except: _state.contexts = self.old_contexts raise return self._deactivate def __exit__(self, type, value, traceback): try: self.exit(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts # Generator coroutines and with-statements with non-local # effects interact badly. Check here for signs of # the stack getting out of sync. # Note that this check comes after restoring _state.context # so that if it fails things are left in a (relatively) # consistent state. if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class ExceptionStackContext(object): """Specialization of StackContext for exception handling. The supplied ``exception_handler`` function will be called in the event of an uncaught exception in this context. The semantics are similar to a try/finally clause, and intended use cases are to log an error, close a socket, or similar cleanup actions. The ``exc_info`` triple ``(type, value, traceback)`` will be passed to the exception_handler function. If the exception handler returns true, the exception will be consumed and will not be propagated to other exception handlers. """ def __init__(self, exception_handler): self.exception_handler = exception_handler self.active = True def _deactivate(self): self.active = False def exit(self, type, value, traceback): if type is not None: return self.exception_handler(type, value, traceback) def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0], self) _state.contexts = self.new_contexts return self._deactivate def __exit__(self, type, value, traceback): try: if type is not None: return self.exception_handler(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class NullContext(object): """Resets the `StackContext`. Useful when creating a shared resource on demand (e.g. an `.AsyncHTTPClient`) where the stack that caused the creating is not relevant to future operations. """ def __enter__(self): self.old_contexts = _state.contexts _state.contexts = (tuple(), None) def __exit__(self, type, value, traceback): _state.contexts = self.old_contexts def _remove_deactivated(contexts): """Remove deactivated handlers from the chain""" # Clean ctx handlers stack_contexts = tuple([h for h in contexts[0] if h.active]) # Find new head head = contexts[1] while head is not None and not head.active: head = head.old_contexts[1] # Process chain ctx = head while ctx is not None: parent = ctx.old_contexts[1] while parent is not None: if parent.active: break ctx.old_contexts = parent.old_contexts parent = parent.old_contexts[1] ctx = parent return (stack_contexts, head) def wrap(fn): """Returns a callable object that will restore the current `StackContext` when executed. Use this whenever saving a callback to be executed later in a different execution context (either in a different thread or asynchronously in the same thread). """ # Check if function is already wrapped if fn is None or hasattr(fn, '_wrapped'): return fn # Capture current stack head # TODO: Any other better way to store contexts and update them in wrapped function? cap_contexts = [_state.contexts] if not cap_contexts[0][0] and not cap_contexts[0][1]: # Fast path when there are no active contexts. def null_wrapper(*args, **kwargs): try: current_state = _state.contexts _state.contexts = cap_contexts[0] return fn(*args, **kwargs) finally: _state.contexts = current_state null_wrapper._wrapped = True return null_wrapper def wrapped(*args, **kwargs): ret = None try: # Capture old state current_state = _state.contexts # Remove deactivated items cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) # Force new state _state.contexts = contexts # Current exception exc = (None, None, None) top = None # Apply stack contexts last_ctx = 0 stack = contexts[0] # Apply state for n in stack: try: n.enter() last_ctx += 1 except: # Exception happened. Record exception info and store top-most handler exc = sys.exc_info() top = n.old_contexts[1] # Execute callback if no exception happened while restoring state if top is None: try: ret = fn(*args, **kwargs) except: exc = sys.exc_info() top = contexts[1] # If there was exception, try to handle it by going through the exception chain if top is not None: exc = _handle_exception(top, exc) else: # Otherwise take shorter path and run stack contexts in reverse order while last_ctx > 0: last_ctx -= 1 c = stack[last_ctx] try: c.exit(*exc) except: exc = sys.exc_info() top = c.old_contexts[1] break else: top = None # If if exception happened while unrolling, take longer exception handler path if top is not None: exc = _handle_exception(top, exc) # If exception was not handled, raise it if exc != (None, None, None): raise_exc_info(exc) finally: _state.contexts = current_state return ret wrapped._wrapped = True return wrapped def _handle_exception(tail, exc): while tail is not None: try: if tail.exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() tail = tail.old_contexts[1] return exc def run_with_stack_context(context, func): """Run a coroutine ``func`` in the given `StackContext`. It is not safe to have a ``yield`` statement within a ``with StackContext`` block, so it is difficult to use stack context with `.gen.coroutine`. This helper function runs the function in the correct context while keeping the ``yield`` and ``with`` statements syntactically separate. Example:: @gen.coroutine def incorrect(): with StackContext(ctx): # ERROR: this will raise StackContextInconsistentError yield other_coroutine() @gen.coroutine def correct(): yield run_with_stack_context(StackContext(ctx), other_coroutine) .. versionadded:: 3.1 """ with context: return func()
mit
bors-ltd/c2json
c2json/views.py
1
3172
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals from os import path import zipfile from django.contrib import messages from django.http.response import HttpResponse from django.shortcuts import render from django.utils import timezone from django.utils.translation import ugettext_lazy as _ import openpyxl from openpyxl.writer.excel import save_virtual_workbook from . import forms, lib def converter(request): json_form = forms.JsonForm(prefix="json") xlsx_form = forms.XlsxForm(prefix="xlsx") now = timezone.now() if request.method == 'POST': if request.POST.get('format') == 'to_xlsx': json_form = forms.JsonForm(data=request.POST, files=request.FILES, prefix="json") if json_form.is_valid(): file = json_form.cleaned_data['file'] try: wb = lib.c2json_to_xlsx(file.read().decode('utf-8')) except ValueError: messages.error(request, _("Invalid JSON file.")) except AssertionError: messages.error(request, _(u"This JSON file is not in the expected format.")) else: output = save_virtual_workbook(wb) response = HttpResponse( content=output, content_type="application/json; charset=utf-8" ) # Filename basename, _extension = path.splitext(file.name) filename = basename + now.strftime("_%Y%m%d-%H%M%S.xlsx") # Force download response['Content-Disposition'] = "attachment; filename=%s" % filename return response elif request.POST.get('format') == "to_json": xlsx_form = forms.XlsxForm(data=request.POST, files=request.FILES or None, prefix="xlsx") if xlsx_form.is_valid(): file = xlsx_form.cleaned_data['file'] try: wb = openpyxl.load_workbook(file) output = lib.xlsx_to_c2json(wb) except zipfile.BadZipfile: messages.error(request, _(u"Invalid or truncated XLSX file.")) except AssertionError: messages.error(request, _(u"This XLSX file is not in the expected format.")) else: response = HttpResponse( content=output.encode('utf-8'), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) # Filename basename, _extension = path.splitext(file.name) filename = basename + now.strftime("_%Y%m%d-%H%M%S.json") # Force download response['Content-Disposition'] = "attachment; filename=%s" % filename return response return render( request, "c2json.html", { 'json_form': json_form, 'xlsx_form': xlsx_form, } )
agpl-3.0
wanderknight/trading-with-python
cookbook/downloadVixFutures.py
77
1313
#------------------------------------------------------------------------------- # Name: download CBOE futures # Purpose: get VIX futures data from CBOE and save to user directory # # # Created: 15-10-2011 # Copyright: (c) Jev Kuznetsov 2011 # Licence: GPL v2 #------------------------------------------------------------------------------- #!/usr/bin/env python from urllib import urlretrieve import os m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures dataDir = os.getenv("USERPROFILE")+'\\twpData\\vixFutures' # data directory def saveVixFutureData(year,month, path): ''' Get future from CBOE and save to file ''' fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:]) urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName) try: urlretrieve(urlStr,path+'\\'+fName) except Exception as e: print e if __name__ == '__main__': if not os.path.exists(dataDir): os.makedirs(dataDir) for year in range(2004,2012): for month in range(12): print 'Getting data for {0}/{1}'.format(year,month) saveVixFutureData(year,month,dataDir) print 'Data was saved to {0}'.format(dataDir)
bsd-3-clause
tushar7795/MicroBlog
flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py
2994
1681
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import GB2312DistributionAnalysis from .mbcssm import GB2312SMModel class GB2312Prober(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(GB2312SMModel) self._mDistributionAnalyzer = GB2312DistributionAnalysis() self.reset() def get_charset_name(self): return "GB2312"
bsd-3-clause
yoer/hue
desktop/core/ext-py/Django-1.6.10/tests/m2m_through_regress/tests.py
51
9138
from __future__ import absolute_import from django.core import management from django.contrib.auth.models import User from django.test import TestCase from django.utils.six import StringIO from .models import (Person, Group, Membership, UserMembership, Car, Driver, CarDriver) class M2MThroughTestCase(TestCase): def test_everything(self): bob = Person.objects.create(name="Bob") jim = Person.objects.create(name="Jim") rock = Group.objects.create(name="Rock") roll = Group.objects.create(name="Roll") frank = User.objects.create_user("frank", "frank@example.com", "password") jane = User.objects.create_user("jane", "jane@example.com", "password") Membership.objects.create(person=bob, group=rock) Membership.objects.create(person=bob, group=roll) Membership.objects.create(person=jim, group=rock) self.assertQuerysetEqual( bob.group_set.all(), [ "<Group: Rock>", "<Group: Roll>", ], ordered=False ) self.assertQuerysetEqual( roll.members.all(), [ "<Person: Bob>", ] ) self.assertRaises(AttributeError, setattr, bob, "group_set", []) self.assertRaises(AttributeError, setattr, roll, "members", []) self.assertRaises(AttributeError, rock.members.create, name="Anne") self.assertRaises(AttributeError, bob.group_set.create, name="Funk") UserMembership.objects.create(user=frank, group=rock) UserMembership.objects.create(user=frank, group=roll) UserMembership.objects.create(user=jane, group=rock) self.assertQuerysetEqual( frank.group_set.all(), [ "<Group: Rock>", "<Group: Roll>", ], ordered=False ) self.assertQuerysetEqual( roll.user_members.all(), [ "<User: frank>", ] ) def test_serialization(self): "m2m-through models aren't serialized as m2m fields. Refs #8134" p = Person.objects.create(name="Bob") g = Group.objects.create(name="Roll") m =Membership.objects.create(person=p, group=g) pks = {"p_pk": p.pk, "g_pk": g.pk, "m_pk": m.pk} out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks) out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out) self.assertXMLEqual(out.getvalue().strip(), """ <?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"> <object pk="%(m_pk)s" model="m2m_through_regress.membership"> <field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field> <field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field> <field type="IntegerField" name="price">100</field> </object> <object pk="%(p_pk)s" model="m2m_through_regress.person"> <field type="CharField" name="name">Bob</field> </object> <object pk="%(g_pk)s" model="m2m_through_regress.group"> <field type="CharField" name="name">Roll</field> </object> </django-objects> """.strip() % pks) def test_join_trimming(self): "Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254" bob = Person.objects.create(name="Bob") jim = Person.objects.create(name="Jim") rock = Group.objects.create(name="Rock") roll = Group.objects.create(name="Roll") Membership.objects.create(person=bob, group=rock) Membership.objects.create(person=jim, group=rock, price=50) Membership.objects.create(person=bob, group=roll, price=50) self.assertQuerysetEqual( rock.members.filter(membership__price=50), [ "<Person: Jim>", ] ) self.assertQuerysetEqual( bob.group_set.filter(membership__price=50), [ "<Group: Roll>", ] ) class ToFieldThroughTests(TestCase): def setUp(self): self.car = Car.objects.create(make="Toyota") self.driver = Driver.objects.create(name="Ryan Briscoe") CarDriver.objects.create(car=self.car, driver=self.driver) # We are testing if wrong objects get deleted due to using wrong # field value in m2m queries. So, it is essential that the pk # numberings do not match. # Create one intentionally unused driver to mix up the autonumbering self.unused_driver = Driver.objects.create(name="Barney Gumble") # And two intentionally unused cars. self.unused_car1 = Car.objects.create(make="Trabant") self.unused_car2 = Car.objects.create(make="Wartburg") def test_to_field(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) def test_to_field_reverse(self): self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) def test_to_field_clear_reverse(self): self.driver.car_set.clear() self.assertQuerysetEqual( self.driver.car_set.all(),[]) def test_to_field_clear(self): self.car.drivers.clear() self.assertQuerysetEqual( self.car.drivers.all(),[]) # Low level tests for _add_items and _remove_items. We test these methods # because .add/.remove aren't available for m2m fields with through, but # through is the only way to set to_field currently. We do want to make # sure these methods are ready if the ability to use .add or .remove with # to_field relations is added some day. def test_add(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) # Yikes - barney is going to drive... self.car.drivers._add_items('car', 'driver', self.unused_driver) self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"] ) def test_add_null(self): nullcar = Car.objects.create(make=None) with self.assertRaises(ValueError): nullcar.drivers._add_items('car', 'driver', self.unused_driver) def test_add_related_null(self): nulldriver = Driver.objects.create(name=None) with self.assertRaises(ValueError): self.car.drivers._add_items('car', 'driver', nulldriver) def test_add_reverse(self): car2 = Car.objects.create(make="Honda") self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) self.driver.car_set._add_items('driver', 'car', car2) self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>", "<Car: Honda>"], ordered=False ) def test_add_null_reverse(self): nullcar = Car.objects.create(make=None) with self.assertRaises(ValueError): self.driver.car_set._add_items('driver', 'car', nullcar) def test_add_null_reverse_related(self): nulldriver = Driver.objects.create(name=None) with self.assertRaises(ValueError): nulldriver.car_set._add_items('driver', 'car', self.car) def test_remove(self): self.assertQuerysetEqual( self.car.drivers.all(), ["<Driver: Ryan Briscoe>"] ) self.car.drivers._remove_items('car', 'driver', self.driver) self.assertQuerysetEqual( self.car.drivers.all(),[]) def test_remove_reverse(self): self.assertQuerysetEqual( self.driver.car_set.all(), ["<Car: Toyota>"] ) self.driver.car_set._remove_items('driver', 'car', self.car) self.assertQuerysetEqual( self.driver.car_set.all(),[]) class ThroughLoadDataTestCase(TestCase): fixtures = ["m2m_through"] def test_sequence_creation(self): "Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107" out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
apache-2.0
lepistone/account-financial-reporting
__unported__/account_financial_report_webkit_xls/report/__init__.py
28
1165
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2013 Noviat nv/sa (www.noviat.com). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import general_ledger_xls from . import trial_balance_xls from . import partners_balance_xls from . import partner_ledger_xls from . import open_invoices_xls
agpl-3.0
jacebrowning/gdm
gitman/common.py
2
5221
"""Common exceptions, classes, and functions.""" import argparse import logging import os import sys import time import log from . import settings class WideHelpFormatter(argparse.HelpFormatter): """Command-line help text formatter with wider help text.""" def __init__(self, *args, **kwargs): kwargs['max_help_position'] = 40 super().__init__(*args, **kwargs) class WarningFormatter(logging.Formatter): """Logging formatter that displays verbose formatting for WARNING+.""" def __init__(self, default_format, verbose_format, *args, **kwargs): super().__init__(*args, **kwargs) self.default_format = default_format self.verbose_format = verbose_format def format(self, record): # pylint: disable=protected-access if record.levelno > log.INFO: self._style._fmt = self.verbose_format else: self._style._fmt = self.default_format return super().format(record) def positive_int(value): value = int(value) if value < 1: raise TypeError return value class _Config: """Share logging options.""" MAX_VERBOSITY = 4 verbosity = 0 indent_level = 0 def configure_logging(count=0): """Configure logging using the provided verbosity count.""" if count == -1: level = settings.QUIET_LOGGING_LEVEL default_format = settings.DEFAULT_LOGGING_FORMAT verbose_format = settings.LEVELED_LOGGING_FORMAT elif count == 0: level = settings.DEFAULT_LOGGING_LEVEL default_format = settings.DEFAULT_LOGGING_FORMAT verbose_format = settings.LEVELED_LOGGING_FORMAT elif count == 1: level = settings.VERBOSE_LOGGING_LEVEL default_format = settings.VERBOSE_LOGGING_FORMAT verbose_format = settings.VERBOSE_LOGGING_FORMAT elif count == 2: level = settings.VERBOSE2_LOGGING_LEVEL default_format = settings.VERBOSE_LOGGING_FORMAT verbose_format = settings.VERBOSE_LOGGING_FORMAT elif count == 3: level = settings.VERBOSE2_LOGGING_LEVEL default_format = settings.VERBOSE2_LOGGING_FORMAT verbose_format = settings.VERBOSE2_LOGGING_FORMAT else: level = settings.VERBOSE2_LOGGING_LEVEL - 1 default_format = settings.VERBOSE2_LOGGING_FORMAT verbose_format = settings.VERBOSE2_LOGGING_FORMAT # Set a custom formatter log.init(level=level) log.silence('datafiles', allow_warning=True) logging.captureWarnings(True) formatter = WarningFormatter( default_format, verbose_format, datefmt=settings.LOGGING_DATEFMT ) logging.root.handlers[0].setFormatter(formatter) # Warn about excessive verbosity if count > _Config.MAX_VERBOSITY: msg = "Maximum verbosity level is {}".format(_Config.MAX_VERBOSITY) log.warning(msg) _Config.verbosity = _Config.MAX_VERBOSITY else: _Config.verbosity = count def indent(): """Increase the indent of future output lines.""" _Config.indent_level += 1 def dedent(level=None): """Decrease (or reset) the indent of future output lines.""" if level is None: _Config.indent_level = max(0, _Config.indent_level - 1) else: _Config.indent_level = level def newline(): """Write a new line to standard output.""" show("") def show( *messages, file=sys.stdout, log=log, # pylint: disable=redefined-outer-name **kwargs, ): """Write to standard output or error if enabled.""" if any(messages): assert 'color' in kwargs, "Color is required" color = kwargs.pop('color', None) if color == 'message': time.sleep(settings.RECORDING_DELAY) for message in messages: if _Config.verbosity == 0: text = ' ' * 2 * _Config.indent_level + style(message, color) print(text, file=file) elif _Config.verbosity >= 1: message = message.strip() if message and log: if color == 'error': log.error(message) else: log.info(message) if color == 'message': time.sleep(settings.RECORDING_DELAY) BOLD = '\033[1m' RED = '\033[31m' GREEN = '\033[32m' YELLOW = '\033[33m' BLUE = '\033[34m' MAGENTA = '\033[35m' CYAN = '\033[36m' WHITE = '\033[37m' RESET = '\033[0m' COLORS = dict( path='', git_rev=BOLD + BLUE, git_dirty=BOLD + MAGENTA, git_changes=YELLOW, shell=BOLD + GREEN, shell_info=MAGENTA, shell_output=CYAN, shell_error=YELLOW, message=BOLD + WHITE, success=BOLD + GREEN, error=BOLD + RED, ) def style(msg, name=None, *, _color_support=False): is_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() supports_ansi = sys.platform != 'win32' or 'ANSICON' in os.environ if not (is_tty and supports_ansi) and not _color_support: return msg if name == 'shell': return msg.replace("$ ", COLORS[name] + "$ " + RESET) color = COLORS.get(name) if color: return color + msg + RESET if msg: assert color is not None, "Unknown style name requested: {!r}".format(name) return msg
mit
Arable/old-www-do-not-use
lib/python2.7/site-packages/pip/vendor/distlib/scripts.py
79
9291
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging import os import re import struct import sys from . import DistlibException from .compat import sysconfig, fsencode, detect_encoding from .resources import finder from .util import FileOperator, get_export_entry, convert_path, get_executable logger = logging.getLogger(__name__) # check if Python is called on the first line with this expression FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') SCRIPT_TEMPLATE = '''%(shebang)s if __name__ == '__main__': import sys, re def _resolve(module, func): __import__(module) mod = sys.modules[module] parts = func.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result try: sys.argv[0] = re.sub('-script.pyw?$', '', sys.argv[0]) func = _resolve('%(module)s', '%(func)s') rc = func() # None interpreted as 0 except Exception as e: # only supporting Python >= 2.6 sys.stderr.write('%%s\\n' %% e) rc = 1 sys.exit(rc) ''' class ScriptMaker(object): """ A class to copy or create scripts from source scripts or callable specifications. """ script_template = SCRIPT_TEMPLATE executable = None # for shebangs def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None): self.source_dir = source_dir self.target_dir = target_dir self.add_launchers = add_launchers self.force = False self.set_mode = False self._fileop = fileop or FileOperator(dry_run) def _get_alternate_executable(self, executable, flags): if 'gui' in flags and os.name == 'nt': dn, fn = os.path.split(executable) fn = fn.replace('python', 'pythonw') executable = os.path.join(dn, fn) return executable def _get_shebang(self, encoding, post_interp=b'', flags=None): if self.executable: executable = self.executable elif not sysconfig.is_python_build(): executable = get_executable() elif hasattr(sys, 'base_prefix') and sys.prefix != sys.base_prefix: executable = os.path.join( sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) else: executable = os.path.join( sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) if flags: executable = self._get_alternate_executable(executable, flags) executable = fsencode(executable) shebang = b'#!' + executable + post_interp + b'\n' # Python parser starts to read a script using UTF-8 until # it gets a #coding:xxx cookie. The shebang has to be the # first line of a file, the #coding:xxx cookie cannot be # written before. So the shebang has to be decodable from # UTF-8. try: shebang.decode('utf-8') except UnicodeDecodeError: raise ValueError( 'The shebang (%r) is not decodable from utf-8' % shebang) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. if encoding != 'utf-8': try: shebang.decode(encoding) except UnicodeDecodeError: raise ValueError( 'The shebang (%r) is not decodable ' 'from the script encoding (%r)' % (shebang, encoding)) return shebang def _get_script_text(self, shebang, entry): return self.script_template % dict(shebang=shebang, module=entry.prefix, func=entry.suffix) def _make_script(self, entry, filenames): shebang = self._get_shebang('utf-8', flags=entry.flags).decode('utf-8') script = self._get_script_text(shebang, entry) outname = os.path.join(self.target_dir, entry.name) use_launcher = self.add_launchers and os.name == 'nt' if use_launcher: exename = '%s.exe' % outname if 'gui' in entry.flags: ext = 'pyw' launcher = self._get_launcher('w') else: ext = 'py' launcher = self._get_launcher('t') outname = '%s-script.%s' % (outname, ext) self._fileop.write_text_file(outname, script, 'utf-8') if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) if use_launcher: self._fileop.write_binary_file(exename, launcher) filenames.append(exename) def _copy_script(self, script, filenames): adjust = False script = convert_path(script) outname = os.path.join(self.target_dir, os.path.basename(script)) filenames.append(outname) script = os.path.join(self.source_dir, script) if not self.force and not self._fileop.newer(script, outname): logger.debug('not copying %s (up-to-date)', script) return # Always open the file, but ignore failures in dry-run mode -- # that way, we'll get accurate feedback if we can read the # script. try: f = open(script, 'rb') except IOError: if not self.dry_run: raise f = None else: encoding, lines = detect_encoding(f.readline) f.seek(0) first_line = f.readline() if not first_line: logger.warning('%s: %s is an empty file (skipping)', self.get_command_name(), script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) if match: adjust = True post_interp = match.group(1) or b'' if not adjust: if f: f.close() self._fileop.copy_file(script, outname) else: logger.info('copying and adjusting %s -> %s', script, self.target_dir) if not self._fileop.dry_run: shebang = self._get_shebang(encoding, post_interp) use_launcher = self.add_launchers and os.name == 'nt' if use_launcher: n, e = os.path.splitext(outname) exename = n + '.exe' if b'pythonw' in first_line: launcher = self._get_launcher('w') suffix = '-script.pyw' else: launcher = self._get_launcher('t') suffix = '-script.py' outname = n + suffix filenames[-1] = outname self._fileop.write_binary_file(outname, shebang + f.read()) if use_launcher: self._fileop.write_binary_file(exename, launcher) filenames.append(exename) if f: f.close() if self.set_mode: self._fileop.set_executable_mode([outname]) @property def dry_run(self): return self._fileop.dry_run @dry_run.setter def dry_run(self, value): self._fileop.dry_run = value if os.name == 'nt': # Executable launcher support. # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ def _get_launcher(self, kind): if struct.calcsize('P') == 8: # 64-bit bits = '64' else: bits = '32' name = '%s%s.exe' % (kind, bits) result = finder('distlib').find(name).bytes return result # Public API follows def make(self, specification): """ Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :return: A list of all absolute pathnames written to, """ filenames = [] entry = get_export_entry(specification) if entry is None: self._copy_script(specification, filenames) else: self._make_script(entry, filenames) return filenames def make_multiple(self, specifications): """ Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to, """ filenames = [] for specification in specifications: filenames.extend(self.make(specification)) return filenames
mit
AutorestCI/azure-sdk-for-python
azure-cognitiveservices-search-imagesearch/azure/cognitiveservices/search/imagesearch/models/trending_images.py
2
1999
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .response import Response class TrendingImages(Response): """The top-level object that the response includes when a trending images request succeeds. Variables are only populated by the server, and will be ignored when sending a request. :param _type: Constant filled by server. :type _type: str :ivar id: A String identifier. :vartype id: str :ivar read_link: The URL that returns this resource. :vartype read_link: str :ivar web_search_url: The URL To Bing's search result for this item. :vartype web_search_url: str :param categories: A list that identifies categories of images and a list of trending images in that category. :type categories: list[~azure.cognitiveservices.search.imagesearch.models.TrendingImagesCategory] """ _validation = { '_type': {'required': True}, 'id': {'readonly': True}, 'read_link': {'readonly': True}, 'web_search_url': {'readonly': True}, 'categories': {'required': True}, } _attribute_map = { '_type': {'key': '_type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'read_link': {'key': 'readLink', 'type': 'str'}, 'web_search_url': {'key': 'webSearchUrl', 'type': 'str'}, 'categories': {'key': 'categories', 'type': '[TrendingImagesCategory]'}, } def __init__(self, categories): super(TrendingImages, self).__init__() self.categories = categories self._type = 'TrendingImages'
mit
h3biomed/ansible
test/units/modules/source_control/test_gitlab_deploy_key.py
6
4130
# -*- coding: utf-8 -*- # Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import import pytest from ansible.modules.source_control.gitlab_deploy_key import GitLabDeployKey def _dummy(x): """Dummy function. Only used as a placeholder for toplevel definitions when the test is going to be skipped anyway""" return x pytestmark = [] try: from .gitlab import (GitlabModuleTestCase, python_version_match_requirement, resp_get_project, resp_find_project_deploy_key, resp_create_project_deploy_key, resp_delete_project_deploy_key) # Gitlab module requirements if python_version_match_requirement(): from gitlab.v4.objects import ProjectKey except ImportError: pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing")) # Need to set these to something so that we don't fail when parsing GitlabModuleTestCase = object resp_get_project = _dummy resp_find_project_deploy_key = _dummy resp_create_project_deploy_key = _dummy resp_delete_project_deploy_key = _dummy # Unit tests requirements try: from httmock import with_httmock # noqa except ImportError: pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing")) with_httmock = _dummy class TestGitlabDeployKey(GitlabModuleTestCase): def setUp(self): super(TestGitlabDeployKey, self).setUp() self.moduleUtil = GitLabDeployKey(module=self.mock_module, gitlab_instance=self.gitlab_instance) @with_httmock(resp_get_project) @with_httmock(resp_find_project_deploy_key) def test_deploy_key_exist(self): project = self.gitlab_instance.projects.get(1) rvalue = self.moduleUtil.existsDeployKey(project, "Public key") self.assertEqual(rvalue, True) rvalue = self.moduleUtil.existsDeployKey(project, "Private key") self.assertEqual(rvalue, False) @with_httmock(resp_get_project) @with_httmock(resp_create_project_deploy_key) def test_create_deploy_key(self): project = self.gitlab_instance.projects.get(1) deploy_key = self.moduleUtil.createDeployKey(project, {"title": "Public key", "key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM" "4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc" "KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfD" "zpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="}) self.assertEqual(type(deploy_key), ProjectKey) self.assertEqual(deploy_key.title, "Public key") @with_httmock(resp_get_project) @with_httmock(resp_find_project_deploy_key) @with_httmock(resp_create_project_deploy_key) def test_update_deploy_key(self): project = self.gitlab_instance.projects.get(1) deployKey = self.moduleUtil.findDeployKey(project, "Public key") changed, newDeploy_key = self.moduleUtil.updateDeployKey(deployKey, {"title": "Private key"}) self.assertEqual(changed, True) self.assertEqual(type(newDeploy_key), ProjectKey) self.assertEqual(newDeploy_key.title, "Private key") changed, newDeploy_key = self.moduleUtil.updateDeployKey(deployKey, {"title": "Private key"}) self.assertEqual(changed, False) self.assertEqual(newDeploy_key.title, "Private key") @with_httmock(resp_get_project) @with_httmock(resp_find_project_deploy_key) @with_httmock(resp_delete_project_deploy_key) def test_delete_deploy_key(self): project = self.gitlab_instance.projects.get(1) self.moduleUtil.existsDeployKey(project, "Public key") rvalue = self.moduleUtil.deleteDeployKey() self.assertEqual(rvalue, None)
gpl-3.0
benedictpaten/cactus
src/cactus/progressive/multiCactusProject.py
1
5516
#!/usr/bin/env python #Copyright (C) 2011 by Glenn Hickey # #Released under the MIT license, see LICENSE.txt """ Basic interface to the multi cactus project xml file. """ import xml.etree.ElementTree as ET from xml.dom import minidom from cactus.progressive.multiCactusTree import MultiCactusTree from cactus.shared.experimentWrapper import ExperimentWrapper from sonLib.nxnewick import NXNewick from toil.lib.bioio import logger class MultiCactusProject: def __init__(self): self.mcTree = None self.expMap = dict() self.expIDMap = None self.inputSequences = [] self.inputSequenceIDs = None self.outputSequenceIDMap = None self.configID = None def readXML(self, path): xmlRoot = ET.parse(path).getroot() treeElem = xmlRoot.find("tree") self.mcTree = MultiCactusTree(NXNewick().parseString(treeElem.text, addImpliedRoots = False)) self.expMap = dict() self.expIDMap = dict() cactusPathElemList = xmlRoot.findall("cactus") for cactusPathElem in cactusPathElemList: nameElem = cactusPathElem.attrib["name"] pathElem = cactusPathElem.attrib["experiment_path"] self.expMap[nameElem] = pathElem if "experiment_id" in cactusPathElem.attrib: self.expIDMap[nameElem] = cactusPathElem.attrib["experiment_id"] self.inputSequences = xmlRoot.attrib["inputSequences"].split() if "inputSequenceIDs" in xmlRoot.attrib: self.inputSequenceIDs = xmlRoot.attrib["inputSequenceIDs"].split() if "outputSequenceIDs" in xmlRoot.attrib: self.outputSequenceIDMap = dict(zip(xmlRoot.attrib["outputSequenceIDs"].split(), xmlRoot.attrib["outputSequenceNames"].split())) logger.info("xmlRoot = %s" % ET.tostring(xmlRoot)) if "configID" in xmlRoot.attrib: self.configID = xmlRoot.attrib["configID"] self.mcTree.assignSubtreeRootNames(self.expMap) def writeXML(self, path): xmlRoot = ET.Element("multi_cactus") treeElem = ET.Element("tree") treeElem.text = NXNewick().writeString(self.mcTree) xmlRoot.append(treeElem) for name, expPath in self.expMap.items(): cactusPathElem = ET.Element("cactus") cactusPathElem.attrib["name"] = name cactusPathElem.attrib["experiment_path"] = expPath if self.expIDMap: cactusPathElem.attrib["experiment_id"] = self.expIDMap[name] xmlRoot.append(cactusPathElem) #We keep track of all the input sequences at the top level xmlRoot.attrib["inputSequences"] = " ".join(self.inputSequences) if self.inputSequenceIDs: xmlRoot.attrib["inputSequenceIDs"] = " ".join(self.inputSequenceIDs) if self.outputSequenceIDMap: xmlRoot.attrib["outputSequenceIDs"] = " ".join(self.outputSequenceIDMap.values()) xmlRoot.attrib["outputSequenceNames"] = " ".join(self.outputSequenceIDMap.keys()) if self.configID: xmlRoot.attrib["configID"] = self.configID xmlFile = open(path, "w") xmlString = ET.tostring(xmlRoot) xmlString = minidom.parseString(xmlString).toprettyxml() xmlFile.write(xmlString) xmlFile.close() def syncToFileStore(self, toil): self.expIDMap = dict() for name, expPath in self.expMap.items(): expWrapper = ExperimentWrapper(ET.parse(expPath).getroot()) expWrapper.setConfigID(toil.importFile("file://" + expWrapper.getConfig())) if expWrapper.getConstraintsFilePath(): expWrapper.setConstraintsID(toil.importFile("file://" + expWrapper.getConstraintsFilePath())) expWrapper.writeXML(expPath) self.expIDMap[name] = toil.importFile("file://" + expPath) def getInputSequenceIDMap(self): """Return a map between event names and sequence IDs. """ inputSequenceMap = dict() i = 0 for node in self.mcTree.postOrderTraversal(): if self.mcTree.isLeaf(node) is True: inputSequenceMap[self.mcTree.getName(node)] = \ self.inputSequenceIDs[i] i += 1 assert i == len(self.inputSequenceIDs) return inputSequenceMap def getInputSequenceIDs(self): """Get the set of input sequences for the multicactus tree """ return self.inputSequenceIDs def getInputSequencePaths(self): return self.inputSequences def setOutputSequenceIDs(self, outputSequenceIDs): self.outputSequenceIDMap = dict() i = 0 for node in self.mcTree.postOrderTraversal(): if self.mcTree.isLeaf(node) is True: self.outputSequenceIDMap[self.mcTree.getName(node)] = \ outputSequenceIDs[i] i += 1 assert i == len(outputSequenceIDs) def getOutputSequenceIDMap(self): return self.outputSequenceIDMap def getConfigPath(self): return ExperimentWrapper(ET.parse(self.expMap.values()[0]).getroot()).getConfigPath() def setConfigID(self, configID): self.configID = configID def getConfigID(self): return self.configID def setInputSequenceIDs(self, inputSequenceIDs): self.inputSequenceIDs = inputSequenceIDs if __name__ == '__main__': main()
mit
pytorch/fairseq
fairseq/optim/adagrad.py
1
1279
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adagrad") class Adagrad(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return False
mit
g-vidal/upm
examples/python/adxrs610.py
6
2289
#!/usr/bin/env python # Author: Jon Trulson <jtrulson@ics.com> # Copyright (c) 2015 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import time, sys, signal, atexit from upm import pyupm_adxrs610 as sensorObj def main(): # Instantiate a ADXRS610 sensor on analog pin A0 (dataout), and # analog A1 (temp out) with an analog reference voltage of # 5.0 sensor = sensorObj.ADXRS610(0, 1, 5.0) ## Exit handlers ## # This function stops python from printing a stacktrace when you hit control-C def SIGINTHandler(signum, frame): raise SystemExit # This function lets you run code on exit def exitHandler(): print("Exiting") sys.exit(0) # Register exit handlers atexit.register(exitHandler) signal.signal(signal.SIGINT, SIGINTHandler) # set a deadband region around the zero point to report 0.0 (optional) sensor.setDeadband(0.015); # Every tenth of a second, sample the ADXRS610 and output it's # corresponding temperature and angular velocity while (1): print("Vel (deg/s):", sensor.getAngularVelocity()) print("Temp (C):", sensor.getTemperature()) time.sleep(.1) if __name__ == '__main__': main()
mit
jkandasa/integration_tests
cfme/tests/intelligence/test_chargeback_assignments.py
1
4594
# -*- coding: utf-8 -*- import cfme.intelligence.chargeback.assignments as cb import pytest import random from cfme.intelligence.chargeback.assignments import AssignmentsView from cfme import test_requirements from cfme.utils.appliance.implementations.ui import navigate_to pytestmark = [ pytest.mark.tier(3), test_requirements.chargeback ] @pytest.mark.meta(blockers=[1273654]) def test_assign_compute_enterprise(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') enterprise = cb.Assign( assign_to="The Enterprise", selections={ 'Enterprise': {'Rate': 'Default'} }) enterprise.computeassign() # Assert that the selection made is listed on the UI assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name='Enterprise') selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match' def test_assign_compute_provider(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') compute_provider = cb.Assign( assign_to='Selected Providers', selections={ virtualcenter_provider.name: {'Rate': 'Default'} }) compute_provider.computeassign() assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name=virtualcenter_provider.name) selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match' def test_assign_compute_cluster(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') cluster_name = random.choice(virtualcenter_provider.get_yaml_data()["clusters"]) cluster = cb.Assign( assign_to='Selected Cluster / Deployment Roles', selections={ cluster_name: {'Rate': 'Default'} }) cluster.computeassign() assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name=cluster_name) selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match' def test_assign_compute_taggedvm(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') tagged_vm = cb.Assign( assign_to="Tagged VMs and Instances", tag_category="Location", selections={ 'Chicago': {'Rate': 'Default'} }) tagged_vm.computeassign() assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name='Chicago') selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match' @pytest.mark.meta(blockers=[1273654]) def test_assign_storage_enterprise(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') enterprise = cb.Assign( assign_to="The Enterprise", selections={ 'Enterprise': {'Rate': 'Default'} }) enterprise.storageassign() assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name='Enterprise') selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match' def test_assign_storage_datastores(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') datastore = random.choice(virtualcenter_provider.get_yaml_data()["datastores"])["name"] sel_datastore = cb.Assign( assign_to="Selected Datastores", selections={ datastore: {'Rate': 'Default'} }) sel_datastore.storageassign() assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name=datastore) selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match' def test_assign_storage_tagged_datastores(appliance, virtualcenter_provider): view = navigate_to(appliance.server, 'Chargeback') tagged_datastore = cb.Assign( assign_to="Tagged Datastores", tag_category="Location", selections={ 'Chicago': {'Rate': 'Default'} }) tagged_datastore.storageassign() assign_view = view.browser.create_view(AssignmentsView) row = assign_view.selections.row(name='Chicago') selected_option = row.rate.widget.selected_option assert selected_option == "Default", 'Selection does not match'
gpl-2.0