repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
azverkan/scons
src/engine/SCons/Memoize.py
5
9546
# # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" __doc__ = """Memoizer A metaclass implementation to count hits and misses of the computed values that various methods cache in memory. Use of this modules assumes that wrapped methods be coded to cache their values in a consistent way. Here is an example of wrapping a method that returns a computed value, with no input parameters: memoizer_counters = [] # Memoization memoizer_counters.append(SCons.Memoize.CountValue('foo')) # Memoization def foo(self): try: # Memoization return self._memo['foo'] # Memoization except KeyError: # Memoization pass # Memoization result = self.compute_foo_value() self._memo['foo'] = result # Memoization return result Here is an example of wrapping a method that will return different values based on one or more input arguments: def _bar_key(self, argument): # Memoization return argument # Memoization memoizer_counters.append(SCons.Memoize.CountDict('bar', _bar_key)) # Memoization def bar(self, argument): memo_key = argument # Memoization try: # Memoization memo_dict = self._memo['bar'] # Memoization except KeyError: # Memoization memo_dict = {} # Memoization self._memo['dict'] = memo_dict # Memoization else: # Memoization try: # Memoization return memo_dict[memo_key] # Memoization except KeyError: # Memoization pass # Memoization result = self.compute_bar_value(argument) memo_dict[memo_key] = result # Memoization return result At one point we avoided replicating this sort of logic in all the methods by putting it right into this module, but we've moved away from that at present (see the "Historical Note," below.). Deciding what to cache is tricky, because different configurations can have radically different performance tradeoffs, and because the tradeoffs involved are often so non-obvious. Consequently, deciding whether or not to cache a given method will likely be more of an art than a science, but should still be based on available data from this module. Here are some VERY GENERAL guidelines about deciding whether or not to cache return values from a method that's being called a lot: -- The first question to ask is, "Can we change the calling code so this method isn't called so often?" Sometimes this can be done by changing the algorithm. Sometimes the *caller* should be memoized, not the method you're looking at. -- The memoized function should be timed with multiple configurations to make sure it doesn't inadvertently slow down some other configuration. -- When memoizing values based on a dictionary key composed of input arguments, you don't need to use all of the arguments if some of them don't affect the return values. Historical Note: The initial Memoizer implementation actually handled the caching of values for the wrapped methods, based on a set of generic algorithms for computing hashable values based on the method's arguments. This collected caching logic nicely, but had two drawbacks: Running arguments through a generic key-conversion mechanism is slower (and less flexible) than just coding these things directly. Since the methods that need memoized values are generally performance-critical, slowing them down in order to collect the logic isn't the right tradeoff. Use of the memoizer really obscured what was being called, because all the memoized methods were wrapped with re-used generic methods. This made it more difficult, for example, to use the Python profiler to figure out how to optimize the underlying methods. """ import types # A flag controlling whether or not we actually use memoization. use_memoizer = None CounterList = [] class Counter(object): """ Base class for counting memoization hits and misses. We expect that the metaclass initialization will have filled in the .name attribute that represents the name of the function being counted. """ def __init__(self, method_name): """ """ self.method_name = method_name self.hit = 0 self.miss = 0 CounterList.append(self) def display(self): fmt = " %7d hits %7d misses %s()" print fmt % (self.hit, self.miss, self.name) def __cmp__(self, other): try: return cmp(self.name, other.name) except AttributeError: return 0 class CountValue(Counter): """ A counter class for simple, atomic memoized values. A CountValue object should be instantiated in a class for each of the class's methods that memoizes its return value by simply storing the return value in its _memo dictionary. We expect that the metaclass initialization will fill in the .underlying_method attribute with the method that we're wrapping. We then call the underlying_method method after counting whether its memoized value has already been set (a hit) or not (a miss). """ def __call__(self, *args, **kw): obj = args[0] if self.method_name in obj._memo: self.hit = self.hit + 1 else: self.miss = self.miss + 1 return self.underlying_method(*args, **kw) class CountDict(Counter): """ A counter class for memoized values stored in a dictionary, with keys based on the method's input arguments. A CountDict object is instantiated in a class for each of the class's methods that memoizes its return value in a dictionary, indexed by some key that can be computed from one or more of its input arguments. We expect that the metaclass initialization will fill in the .underlying_method attribute with the method that we're wrapping. We then call the underlying_method method after counting whether the computed key value is already present in the memoization dictionary (a hit) or not (a miss). """ def __init__(self, method_name, keymaker): """ """ Counter.__init__(self, method_name) self.keymaker = keymaker def __call__(self, *args, **kw): obj = args[0] try: memo_dict = obj._memo[self.method_name] except KeyError: self.miss = self.miss + 1 else: key = self.keymaker(*args, **kw) if key in memo_dict: self.hit = self.hit + 1 else: self.miss = self.miss + 1 return self.underlying_method(*args, **kw) class Memoizer(object): """Object which performs caching of method calls for its 'primary' instance.""" def __init__(self): pass def Dump(title=None): if title: print title CounterList.sort() for counter in CounterList: counter.display() class Memoized_Metaclass(type): def __init__(cls, name, bases, cls_dict): super(Memoized_Metaclass, cls).__init__(name, bases, cls_dict) for counter in cls_dict.get('memoizer_counters', []): method_name = counter.method_name counter.name = cls.__name__ + '.' + method_name counter.underlying_method = cls_dict[method_name] replacement_method = types.MethodType(counter, None, cls) setattr(cls, method_name, replacement_method) def EnableMemoization(): global use_memoizer use_memoizer = 1 # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
bbbenja/SickRage
lib/html5lib/treewalkers/dom.py
1229
1457
from __future__ import absolute_import, division, unicode_literals from xml.dom import Node import gettext _ = gettext.gettext from . import _base class TreeWalker(_base.NonRecursiveTreeWalker): def getNodeDetails(self, node): if node.nodeType == Node.DOCUMENT_TYPE_NODE: return _base.DOCTYPE, node.name, node.publicId, node.systemId elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): return _base.TEXT, node.nodeValue elif node.nodeType == Node.ELEMENT_NODE: attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) if attr.namespaceURI: attrs[(attr.namespaceURI, attr.localName)] = attr.value else: attrs[(None, attr.name)] = attr.value return (_base.ELEMENT, node.namespaceURI, node.nodeName, attrs, node.hasChildNodes()) elif node.nodeType == Node.COMMENT_NODE: return _base.COMMENT, node.nodeValue elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): return (_base.DOCUMENT,) else: return _base.UNKNOWN, node.nodeType def getFirstChild(self, node): return node.firstChild def getNextSibling(self, node): return node.nextSibling def getParentNode(self, node): return node.parentNode
gpl-3.0
proxysh/Safejumper-for-Mac
buildmac/Resources/env/lib/python2.7/site-packages/twisted/plugins/twisted_words.py
19
1029
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from zope.interface import provider from twisted.plugin import IPlugin from twisted.application.service import ServiceMaker from twisted.words import iwords NewTwistedWords = ServiceMaker( "New Twisted Words", "twisted.words.tap", "A modern words server", "words") TwistedXMPPRouter = ServiceMaker( "XMPP Router", "twisted.words.xmpproutertap", "An XMPP Router server", "xmpp-router") @provider(IPlugin, iwords.IProtocolPlugin) class RelayChatInterface(object): name = 'irc' def getFactory(cls, realm, portal): from twisted.words import service return service.IRCFactory(realm, portal) getFactory = classmethod(getFactory) @provider(IPlugin, iwords.IProtocolPlugin) class PBChatInterface(object): name = 'pb' def getFactory(cls, realm, portal): from twisted.spread import pb return pb.PBServerFactory(portal, True) getFactory = classmethod(getFactory)
gpl-2.0
sfcl/ancon
index/api/views.py
2
69572
# -*- coding:utf-8 -*- import json, pytz from datetime import datetime from django.shortcuts import get_object_or_404 from django.db import transaction from django.db import models from django.utils import timezone from django.http import JsonResponse, Http404, HttpResponse from django.conf import settings from django.core.urlresolvers import reverse from django.template.loader import render_to_string from django.utils.translation import ugettext as _ from django.contrib import messages from django.db.models import Q from events.models import Events from index.forms.tr_to_firm import TransfeToFirmScanner from events.helpers import events_decoder from index.models import ( City, CartridgeItem, CartridgeType, OrganizationUnits, CartridgeItemName, FirmTonerRefill, Storages, STATUS ) from index.helpers import LastNumber, str2int from index.signals import ( sign_turf_cart, sign_add_full_to_stock, sign_tr_empty_cart_to_stock, sign_tr_cart_to_basket, sign_add_empty_to_stock, sign_tr_cart_to_uses, sign_tr_empty_cart_to_firm, sign_change_number, sign_tr_from_basket_to_stock, sign_faulty_transmission, sign_change_number_refills, ) from index.forms.add_items import AddItems from index.forms.add_items_from_barcode import AddItemsFromBarCodeScanner from index.forms.tr_to_firm import TransfeToFirm from docs.models import SCDoc, RefillingCart from service.helpers import SevercartConfigs from common.helpers import attachment_departament from accounts.decorators import permissions import logging logger = logging.getLogger(__name__) @permissions('dict') def del_firm(request): """ """ resp_dict = dict() firm_id = request.POST.get('selected', '') firm_id = str2int(firm_id) try: firm = FirmTonerRefill.objects.get(pk=firm_id) except FirmTonerRefill.DoesNotExist: resp_dict['text'] = _('Firm not found') resp_dict['error'] = '1' return JsonResponse(resp_dict) else: cart_in_firm = CartridgeItem.objects.filter(filled_firm=firm).count() if cart_in_firm == 0: firm.delete() resp_dict['text'] = _('Firm deleted!') resp_dict['error'] = '0' else: resp_dict['text'] = _('You can not delete, because have cartridges at a gas station.') resp_dict['error'] = '1' return JsonResponse(resp_dict) def search_number(cart_number, request): """Хелпер функция для поиска дублей номеров внутри представительства. Обработчиком запроса не является. """ cart_items = CartridgeItem.objects.filter(cart_number=cart_number) try: root_ou = request.user.departament des = root_ou.get_descendants(include_self=True) except: cart_items = [] else: cart_items = cart_items.filter(departament__in=des) return cart_items @permissions('cart') def ajax_add_session_items(request): """Довляем новые картриджи на склад через Аякс """ # если пришёл запрос то пополняем сессионную переменную # результаты отображаем на странице tmp_dict = dict() try: m1 = request.user.departament.pk except AttributeError: tmp_dict['mes'] = _('User not assosiate with organization unit!<br/>Error code: 101.') tmp_dict['error'] = '1' return JsonResponse(tmp_dict) tmp_dict['error'] = '0' form = AddItems(request.POST) if form.is_valid(): data_in_post = form.cleaned_data cart_name_id = data_in_post.get('cartName').pk cart_name = data_in_post.get('cartName').cart_itm_name storages = data_in_post.get('storages') cart_name = str(cart_name) cart_type = request.POST.get('cart_type') cart_doc_id = data_in_post.get('doc') cart_count = int(data_in_post.get('cartCount')) # фича добавленна после обращения пользователя из Новосибирска tumbler = request.POST.get('tumbler', 0) # отвечает за ручной ввод номера РМ tumbler_2 = request.POST.get('tumbler_2', 0) # отвечает за ручноую установку даты добавления РМ на склад tumbler = str2int(tumbler) tumbler_2 = str2int(tumbler_2) if tumbler_2: conf = SevercartConfigs() date_added = data_in_post.get('set_date') time_added = data_in_post.get('time') set_date = datetime(year=date_added['year'], month=date_added['month'], day=date_added['day'], hour=time_added['hours'], minute=time_added['minutes'], second=0, microsecond=0 ) #, tzinfo=pytz.timezone(conf.time_zone)) # d = datetime.datetime.now() local = pytz.timezone(conf.time_zone) local_dt = local.localize(set_date, is_dst=None) date_time_added = local_dt.astimezone(pytz.utc) else: date_time_added = timezone.now() # чтобы не плодить лишние сущности зделано одно вью для добавления разных картриджей if cart_type == 'full': cart_status = 1 elif cart_type == 'empty': cart_status = 3 else: tmp_dict['error'] ='1' tmp_dict['mes'] = _('Error in attrib "data" in input button add_item') return JsonResponse(tmp_dict) list_cplx = list() if tumbler: # если переключатель ручного ввода номера включен cart_number = request.POST.get('cart_number') cart_number = cart_number.strip() # далее выполняем проверку на дубли, только внутри своего представительства cart_items = search_number(cart_number, request) if len(cart_items): tmp_dict['error'] = '1' tmp_dict['mes'] = _('An object with this number has already been registered.') return JsonResponse(tmp_dict) else: # если тумблер ручного ввода номера РМ НЕ установлен, то генерируем новый свободный номер # находим нужный номер для отсчёта добавления новых картриджей num_obj = LastNumber(request) cart_number = num_obj.get_num() # перед тем как выполняется сохранение, производим поиск дубля # выполняем генерацию новых номеров пока не найдём свободный while len(search_number(cart_number, request)): cart_number += 1 # Добавляем картриджи в БД with transaction.atomic(): for i in range(cart_count): m1 = CartridgeItem(stor=storages, cart_number=str(cart_number), cart_itm_name=data_in_post.get('cartName'), cart_date_added=date_time_added, cart_date_change=date_time_added, cart_number_refills=0, departament=request.user.departament, cart_status=cart_status, delivery_doc=cart_doc_id, ) m1.save() list_cplx.append((m1.id, str(cart_number), cart_name)) if not(tumbler): try: # перестрахуемся cart_number = int(cart_number) except ValueError: cart_number = 0 cart_number += 1 # перед тем как выполняется сохранение, производим поиск дубля # выполняем генерацию новых номеров пока не найдём свободный while len(search_number(cart_number, request)): cart_number += 1 num_obj.last_number = str(cart_number) num_obj.commit() if cart_count == 1: tmpl_message = _('Cartridge successfully added.') elif cart_count > 1: tmpl_message = _('Cartridges successfully added.') # запускаем сигнал добавления событий if cart_status == 1: sign_add_full_to_stock.send(sender=None, list_cplx=list_cplx, request=request) elif cart_status == 3: sign_add_empty_to_stock.send(sender=None, list_cplx=list_cplx, request=request) else: pass # заполняем тупой кэш нужными данными названий картриджей и их айдишников, это минимизирует обращения к базе # в будующем simple_cache = dict() list_names = CartridgeItemName.objects.all() for elem in list_names: if elem.manufacturer: simple_cache[elem.pk] = (elem.cart_itm_name, elem.manufacturer,) else: simple_cache[elem.pk] = (elem.cart_itm_name, None,) numbers = [ i[1] for i in list_cplx ] # для экономного расходования дискового пространства будем использовать идешники tmp_list = [cart_name_id, cart_doc_id, numbers] if cart_status == 1: # наполняем сессионную переменную cumulative_list если производится # добавление новых картриджей на склад if request.session.get('cumulative_list', False): # если в сессионной переменной уже что-то есть session_data = request.session.get('cumulative_list') session_data = json.loads(session_data) session_data.append(tmp_list) use2var = session_data session_data = json.dumps(session_data) # перезаписываем переменную в сессии новыми значениями request.session['cumulative_list'] = session_data else: # если сессионная cumulative_list пуста use2var = [ tmp_list ] tmp_list = json.dumps(use2var) request.session['cumulative_list'] = tmp_list elif cart_status == 3: # наполняем сессионную переменную empty_cart_list если производится # добавление БУшных картриджей на склад if request.session.get('empty_cart_list', False): # если в сессионной переменной уже что-то есть session_data = request.session.get('empty_cart_list') session_data = json.loads(session_data) session_data.append(tmp_list) use2var = session_data session_data = json.dumps(session_data) # перезаписываем переменную в сессии новыми значениями request.session['empty_cart_list'] = session_data else: # если сессионная empty_cart_list пуста use2var = [ tmp_list ] tmp_list = json.dumps(use2var) request.session['empty_cart_list'] = tmp_list # формируем http ответ # формат [ [name, title, numbers=[1,2,3,4]] ... ] list_items = list() for elem in use2var: try: title = str(SCDoc.objects.get(pk=elem[1])) except SCDoc.DoesNotExist: title = '' list_items.append({'name': simple_cache.get(elem[0])[0], 'manufacturer': simple_cache.get(elem[0])[1], 'numbers': str(elem[2])[1:-1].replace('\'',''), 'title': title}) html = render_to_string('index/add_over_ajax.html', context={'list_items': list_items}) tmp_dict['html'] = html tmp_dict['mes'] = tmpl_message else: #form.errors pass return JsonResponse(tmp_dict, safe=False) @permissions('cart') def ajax_add_session_items_from_barcode(request): """Добавляем картриджи в сессионную переменную с помощью сканера штрих кодов. """ # если пришёл запрос то пополняем сессионную переменную # результаты отображаем на странице context = dict() try: request.user.departament.pk except AttributeError: context['mes'] = _('User not assosiate with organization unit!<br/>Error code: 101.') context['error'] = '1' return JsonResponse(context) context['error'] = '0' form = AddItemsFromBarCodeScanner(request.POST) if not(form.is_valid()): # если в БД уже есть РМ дубль с аналогичным номером, то # прекращаем выполнение программы и сообщаем об ошибке context['mes'] = form.errors.as_text() context['error'] = '1' return JsonResponse(context) if form.is_valid(): data_in_post = form.cleaned_data cart_number = data_in_post.get('cartNumber') cart_name = data_in_post.get('cartName').cart_itm_name manufacturer = data_in_post.get('cartName').manufacturer storages = data_in_post.get('storages') cart_name = str(cart_name) cart_name_id = data_in_post.get('cartName').pk cart_type = request.POST.get('cart_type') cart_doc_id = data_in_post.get('doc') tumbler = data_in_post.get('tumbler') # чтобы не плодить лишние сущности зделано одно вью для добавления разных картриджей if cart_type == 'full': #cart_status = 1 session_var = 'add_cartridges_full_in_stock' elif cart_type == 'empty': #cart_status = 3 session_var = 'add_cartridges_empty_in_stock' else: context['error'] ='1' context['mes'] = _('Error in attrib "data" in input button add_item') return JsonResponse(context) conf = SevercartConfigs() # проверяем на дубли имеющихся номеров cart_items = search_number(cart_number, request) if len(cart_items): context['error'] = '1' context['mes'] = _('An object with this number has already been registered.') return JsonResponse(context) if tumbler: date_added = data_in_post.get('set_date') time_added = data_in_post.get('time') set_date = datetime(year=date_added['year'], month=date_added['month'], day=date_added['day'], hour=time_added['hours'], minute=time_added['minutes'], second=0, microsecond=0 ) #, tzinfo=pytz.timezone(conf.time_zone)) # d = datetime.datetime.now() local = pytz.timezone(conf.time_zone) local_dt = local.localize(set_date, is_dst=None) date_time_added = local_dt.astimezone(pytz.utc) date_time_added_show = set_date else: date_time_added = timezone.now() date_time_added_show = '' # собираем очект РМ на основе полученных данных cart_obj = dict() cart_obj['cart_number'] = cart_number cart_obj['cart_name'] = cart_name cart_obj['cart_name_id'] = cart_name_id cart_obj['storages'] = storages cart_obj['cart_doc_id'] = cart_doc_id cart_obj['cart_type'] = cart_type cart_obj['manufacturer'] = manufacturer #local = pytz.timezone(conf.time_zone) #date_time_added = local.localize(date_time_added, is_dst=None) cart_obj['date_time_added'] = date_time_added cart_obj['date_time_added_show'] = date_time_added_show # Добавляем отсканированный картридж в БД if request.session.get(session_var, False): # если в сессионной переменной уже что-то есть session_data = request.session.get(session_var) # проверяем добавляем элемент на дубль в сессионной казине exist = False for elem in session_data: if elem['cart_number'] in cart_number: exist = True break if exist: message = _('Cartridge %(cart_number)s is already in the basket session.') % {'cart_number': cart_number} context['mes'] = message context['error'] = '1' return JsonResponse(context) #session_data.append(cart_obj) session_data.insert(0, cart_obj) else: # если сессионная basket_to_transfer_firm пуста или её нет вообще session_data = list() #session_data.append(cart_obj) session_data.insert(0, cart_obj) request.session[session_var] = session_data message = _('Cartridge %(cart_number)s successfully added in session basket.') % {'cart_number': cart_number} html = render_to_string('index/add_items_barcode_ajax.html', context={'list_items': session_data}) context['html'] = html context['mes'] = message context['error'] = '0' return JsonResponse(context) @permissions('cart') def add_items_in_stock_from_session_basket(request): """Добавление объектов на склад, в соответствии с содержанием сессионных переменных. Требуется доробтка. """ ansver = dict() session_var = request.POST.get('session_var', '') session_var = session_var.strip() if session_var == 'add_cartridges_full_in_stock': cart_status = 1 elif session_var == 'add_cartridges_empty_in_stock': cart_status = 3 else: ansver['error'] = '1' ansver['text'] = _('Session varible not emplimented.') return JsonResponse(ansver) if request.session.get(session_var, False): session_data = request.session.get(session_var) else: ansver['error'] = '1' ansver['text'] = _('Cart is empty. Add nothing to the warehouse.') return JsonResponse(ansver) list_cplx = list() number_list = list() with transaction.atomic(): for cartridge in session_data: cart_name = CartridgeItemName.objects.get(pk=cartridge['cart_name_id']) m1 = CartridgeItem( stor=cartridge['storages'], cart_number=cartridge['cart_number'], cart_itm_name=cart_name, cart_date_added=cartridge['date_time_added'], cart_date_change=cartridge['date_time_added'], cart_number_refills=0, departament=request.user.departament, cart_status=cart_status, delivery_doc=cartridge['cart_doc_id'] ) m1.save() number_list.append(cartridge['cart_number']) list_cplx.append((m1.id, cartridge['cart_number'], cart_name)) ansver['text'] = _('Cartridges %(cart_numbers)s successfully added.') % {'cart_numbers': number_list} ansver['error'] = '0' # очищаем сессионную переменную request.session[session_var] = None # запускаем сигнал добавления событий if cart_status == 1: sign_add_full_to_stock.send(sender=None, list_cplx=list_cplx, request=request) elif cart_status == 3: sign_add_empty_to_stock.send(sender=None, list_cplx=list_cplx, request=request) else: pass return JsonResponse(ansver) @permissions('cart') def transfer_to_stock(request): """Возврат исчерпаного картриджа от пользователя обратно на склад. """ checked_cartr = request.POST.getlist('selected[]') list_cplx = [] checked_cartr = [ str2int(i) for i in checked_cartr] ansver = dict() with transaction.atomic(): for inx in checked_cartr: m1 = CartridgeItem.objects.get(pk=inx) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if m1.departament in request.user.departament.get_descendants(): m1.cart_status = 3 # пустой объект на складе tmp_dept = m1.departament m1.departament = request.user.departament m1.cart_date_change = timezone.now() m1.save() list_cplx.append((m1.id, str(m1.cart_itm_name), str(tmp_dept), m1.cart_number)) if list_cplx: sign_tr_empty_cart_to_stock.send(sender=None, list_cplx=list_cplx, request=request) ansver['error'] = '0' ansver['text'] = _('Cartridges successfully moved.') return JsonResponse(ansver, safe=False) @permissions('cart') def transfer_to_firm(request): """Передача картриджей на обслуживание. """ ansver = dict() form = TransfeToFirm(request.POST) if form.is_valid(): data_in_post = form.cleaned_data numbers = data_in_post.get('numbers') firm = data_in_post.get('firm') doc_id = data_in_post.get('doc') price = data_in_post.get('price') firm = FirmTonerRefill.objects.get(pk=firm) # генерируем запись о заправке jsoning_list = [] for inx in numbers: cart_number = CartridgeItem.objects.get(pk=inx).cart_number cart_name = CartridgeItem.objects.get(pk=inx).cart_itm_name jsoning_list.append([cart_number, str(cart_name)]) jsoning_list = json.dumps(jsoning_list) doc_id = str2int(doc_id) try: doc = SCDoc.objects.get(pk=doc_id) except SCDoc.DoesNotExist: doc = None # генерируем номер акта передачи на основе даты и его порядкового номера sender_acts = RefillingCart.objects.filter(departament=request.user.departament).count() if sender_acts: act_number = sender_acts + 1 act_number = str(timezone.now().year) + '_' + str(sender_acts) else: act_number = str(timezone.now().year) + '_1' # сохраняем в БД акт передачи РМ на заправку act_doc = RefillingCart( doc_type = 1, # документ передачи на запраку number = act_number, date_created = timezone.now(), firm = firm, user = str(request.user), json_content = jsoning_list, money = price, parent_doc = doc, departament = request.user.departament ) act_doc.save() list_cplx = list() show_numbers = list() for inx in numbers: m1 = CartridgeItem.objects.get(pk=inx) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if m1.departament == request.user.departament: m1.cart_status = 4 # находится на заправке m1.filled_firm = firm m1.cart_date_change = timezone.now() m1.save() list_cplx.append((m1.pk, str(m1.cart_itm_name), m1.cart_number)) show_numbers.append(m1.cart_number) if list_cplx: sign_tr_empty_cart_to_firm.send(sender=None, list_cplx=list_cplx, request=request, firm=str(firm) + ':' + str(firm.pk) # сохраняем в логах событий имя и Id фирмы контрагента. ) show_numbers = str(show_numbers) # Убираем лишние авпострофы из списка с номерами show_numbers = show_numbers.replace('\'', '') if len(show_numbers): msg = _('Cartridges %(cart_nums)s successfully moved to firm.') % {'cart_nums': show_numbers} else: msg = _('No transmission facilities') ansver['success'] = '1' ansver['url'] = reverse('index:empty') messages.success(request, msg) else: # если форма содержит ошибки, то сообщаем о них пользователю. error_messages = dict([(key, [error for error in value]) for key, value in form.errors.items()]) ansver['errors'] = error_messages return JsonResponse(ansver) @permissions('cart') def clear_session(request): """Очищаем сессионные переменные """ cart_type = request.POST.get('cart_type') if cart_type == 'full': request.session['cumulative_list'] = None elif cart_type == 'empty': request.session['empty_cart_list'] = None else: pass return HttpResponse(_('Session cleared')) @permissions('dict') def city_list(request): """Возвращает список городов полученных из базы в ввиде json. """ cites = City.objects.all() tmp_dict = {} for elem in cites: tmp_dict[elem.id] = elem.city_name return JsonResponse(tmp_dict, safe=False) @permissions('dict') def del_node(request): """Удаляем нод(у)(ы) из структуры организации """ ansver = dict() ar = request.POST.getlist('selected[]') ar = [int(i) for i in ar ] if settings.DEMO: ansver['error'] = '1' ansver['text'] = _('In demo remove nodes not allow.') return JsonResponse(ansver) try: for ind in ar: node = OrganizationUnits.objects.get(pk=ind) node.delete() except models.ProtectedError: ansver['error'] = '1' ansver['text'] = _('But it can not be removed because other objects reference it.<br/>Error code: 102') else: ansver['error'] = '0' if len(ar) == 1: ansver['text'] = _('Name deleted successfully.') else: ansver['text'] = _('Names deleted successfully.') return JsonResponse(ansver) @permissions('cart') def turf_cartridge(request): """Безвозвратное удаление картриджа из БД. Списание расходника. """ ar = request.POST.getlist('selected[]') ar = [int(i) for i in ar ] list_cplx = [] for ind in ar: node = CartridgeItem.objects.get(pk=ind) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if node.departament == request.user.departament: list_cplx.append((node.id, str(node.cart_itm_name), node.cart_number)) node.delete() if list_cplx: sign_turf_cart.send(sender=None, list_cplx=list_cplx, request=request) return HttpResponse(_('Cartridjes deleted!')) @permissions('cart') def transfer_to_basket(request): """Перемещение картриджей в корзину. """ ansver = dict() checked_cartr = request.POST.getlist('selected[]') action_type = request.POST.get('atype', '') try: checked_cartr = [int(i) for i in checked_cartr ] except ValueError: ansver['error'] = '1' ansver['text'] = _('Error converting string numbers to int.') return JsonResponse(ansver) if action_type == '5': # перемещаем заправленный картридж в корзину cart_status = 5 elif action_type == '6': # перемещаем пустой картридж в корзину cart_status = 6 else: ansver['error'] = '1' ansver['text'] = _('This action type not implemented.') return JsonResponse(ansver) list_cplx = [] for inx in checked_cartr: m1 = CartridgeItem.objects.get(pk=inx) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if (m1.departament in request.user.departament.get_descendants()) or \ (m1.departament == request.user.departament): m1.cart_status = cart_status m1.departament = request.user.departament m1.cart_date_change = timezone.now() m1.save() list_cplx.append((m1.id, str(m1.cart_itm_name), m1.cart_number)) if list_cplx: sign_tr_cart_to_basket.send(sender=None, list_cplx=list_cplx, request=request) ansver['error'] = '0' ansver['text'] = _('Cartridges successfully transferred to basket.') return JsonResponse(ansver) @permissions('cart') def names_suggests(request): """ """ ansver = dict() tmp_list = list() cart_name = request.POST.get('cart_name') cart_name = cart_name.strip() if cart_name: names_list = CartridgeItemName.objects.filter(cart_itm_name__icontains=cart_name) for name_item in names_list: if name_item.manufacturer: tmp_list.append([name_item.pk, str(name_item.manufacturer) + ' ' + str(name_item.cart_itm_name)]) else: tmp_list.append([name_item.pk, str(name_item.cart_itm_name)]) ansver['res'] = tmp_list else: ansver['res'] = [['', '']] return JsonResponse(ansver) @permissions('cart') def get_cart_ou(request): """Получение списка установленных РМ у пользователя """ ansver = dict() context = dict() id_ou = request.POST.get('id_ou', '') departament = OrganizationUnits.objects.get(pk=id_ou) context['checked'] = request.CHECKBOX_RETURN context['list_items'] = CartridgeItem.objects.filter(departament=departament) ansver['html'] = render_to_string('index/get_cart_ou.html', context) return JsonResponse(ansver) @permissions('cart') def move_to_use(request): """Передача РМ в пользование. """ ansver = dict() data_in_post = request.POST moved = request.POST.getlist('moved[]') id_ou = data_in_post['id_ou'] installed = request.POST.getlist('installed[]') try: installed = [int(i) for i in installed] moved = [int(i) for i in moved] id_ou = int(id_ou) except ValueError as e: ansver['error'] = '1' ansver['text'] = str(e) return JsonResponse(ansver) get = lambda node_id: OrganizationUnits.objects.get(pk=node_id) list_cplx = list() show_numbers = list() # используется для информационных сообщений try: root_ou = request.user.departament des = root_ou.get_descendants() except: pass with transaction.atomic(): for inx in moved: m1 = CartridgeItem.objects.get(pk=inx) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if m1.departament == request.user.departament: m1.cart_status = 2 # объект находится в пользовании m1.departament = get(id_ou) m1.cart_date_change = timezone.now() show_numbers.append(m1.cart_number) m1.save() list_cplx.append((m1.id, str(m1.cart_itm_name), m1.cart_number)) if list_cplx: sign_tr_cart_to_uses.send(sender=None, list_cplx=list_cplx, request=request, org=str(get(id_ou)), org_id=id_ou) list_cplx = [] if installed: # если выбрано возврат РМ от пользователя обратно на склад with transaction.atomic(): for inx in installed: # проверяем принадлежность перемещаемого РМ департаменту # пользователя. m1 = CartridgeItem.objects.get(pk=inx) if m1.departament in des: m1.cart_status = 3 # пустой объект на складе tmp_dept = m1.departament m1.departament = request.user.departament m1.cart_date_change = timezone.now() m1.save() list_cplx.append((m1.id, str(m1.cart_itm_name), str(tmp_dept), m1.cart_number)) if list_cplx: sign_tr_empty_cart_to_stock.send(sender=None, list_cplx=list_cplx, request=request) ansver['error'] = '0' ansver['url'] = reverse('index:stock') show_numbers = str(show_numbers) show_numbers = show_numbers.replace('\'', '') msg = _('Cartridges %(cart_list)s successfully transferred for use') % {'cart_list': show_numbers} messages.success(request, msg) return JsonResponse(ansver) @permissions('login') def view_events(request): """Загрузка блока с событиями для страницы dashboard """ ansver = dict() context = dict() detail = request.POST.get('detail', 0) try: dept_id = request.user.departament.pk except AttributeError: dept_id = 0 detail = str2int(detail) MAX_EVENT_LIST = settings.DASHBOARD_EVENT_LIST if detail: events_list = Events.objects.filter(departament=dept_id).order_by('-pk')[:MAX_EVENT_LIST] context['count_events'] = len(events_list) if events_list.count() >= MAX_EVENT_LIST: context['show_more'] = True else: context['show_more'] = False context['events_list'] = events_decoder(events_list, simple=False) html = render_to_string('events/show_all_events.html', context=context, request=request) else: events_list = Events.objects.filter(departament=dept_id).order_by('-pk')[:MAX_EVENT_LIST] if events_list.count() >= MAX_EVENT_LIST: context['show_more'] = True else: context['show_more'] = False context['events_list'] = events_decoder(events_list, simple=False) html = render_to_string('index/events.html', context=context, request=request) ansver['html'] = html return JsonResponse(ansver) @permissions('cart') def from_basket_to_stock(request): """Возврат обратно картриджей на склад из корзины """ ansver = dict() list_cplx = list() ar = request.POST.getlist('selected[]') try: ar = [int(i) for i in ar ] except: # если пользователь сфальсифицировал запрос то # ничего не делаем и возвращаем пустой ответ raise Http404 for inx in ar: m1 = CartridgeItem.objects.get(pk=inx) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if m1.departament == request.user.departament: if m1.cart_status == 5: m1.cart_status = 1 # возвращаем обратно на склад заполненным elif m1.cart_status == 6: m1.cart_status = 3 # возвращаем обратно на склад пустым else: raise Http404 m1.cart_date_change = timezone.now() list_cplx.append((m1.pk, str(m1.cart_itm_name), m1.cart_number)) m1.save() if list_cplx: sign_tr_from_basket_to_stock.send(sender=None, list_cplx=list_cplx, request=request) return JsonResponse(ansver) @permissions('dict') def change_ou_name(request): """Изменение имени организационного подразделения. """ ansver = dict() ansver['error'] = 1 ouid = request.POST.getlist('ouid', []) ou_name = request.POST.getlist('ou_name', []) comment = request.POST.get('comment', '') comment = comment.strip() try: ouid = ouid[0] ou_name = ou_name[0] except: raise Http404 ouid = str2int(ouid) try: ou = OrganizationUnits.objects.get(pk=ouid) except: raise Http404 ou.name = ou_name ou.comment = comment ou.save() ansver['error'] = 0 return JsonResponse(ansver) @permissions('cart') def add_object_to_basket_for_firm(request): """Подготавливаем списки РМ, передаваемых контрагентам на обслуживание. """ ansver = dict() cart_barcode = request.POST.get('barcode', '') cart_barcode = cart_barcode.strip() try: root_ou = request.user.departament des = root_ou.get_descendants() except: ansver['error'] ='1' ansver['mes'] = _('Error: 101. Not set organization utint.') return JsonResponse(ansver) else: # выполняем поиск РМ обладающие разными статусами # например, пуст и на складе, задействованн, ... m1 = CartridgeItem.objects.filter( Q(cart_number=cart_barcode) & (Q(departament__in=des) | Q(departament=root_ou)) ) if len(m1) >= 1: cartridge = m1[0] m1 = None else: # если картридж с данным неомером не найденн ansver['error'] ='1' ansver['mes'] = _('Consumables with the number %(cart_barcode)s was not found.') % {'cart_barcode' : cart_barcode} return JsonResponse(ansver) session_data = request.session.get('basket_to_transfer_firm', False) if session_data and (str(cartridge.pk) in session_data): ansver['error'] ='1' ansver['mes'] = _('The object is already in the lists on the move.') return JsonResponse(ansver) if cartridge.cart_status == 3: # если картридж с нужным номером найденн и у него код статуса "Пустой и на складе" # добавляем информауию в сессионную переменную пользователя if request.session.get('basket_to_transfer_firm', False): # если в сессионной переменной уже что-то есть session_data = request.session.get('basket_to_transfer_firm') # если в сессионной переменной данные уже есть то РМ в список не добавляем try: session_data.index(cartridge.pk) except ValueError: session_data.append(cartridge.pk) request.session['basket_to_transfer_firm'] = session_data else: ansver['error'] ='1' ansver['mes'] = _('The object number %(cart_barcode)s is already present in the lists on the move.') % {'cart_barcode': cart_barcode} return JsonResponse(ansver) else: # если сессионная basket_to_transfer_firm пуста или её нет вообще session_data = [ cartridge.pk ] request.session['basket_to_transfer_firm'] = session_data ansver['error'] ='0' ansver['mes'] = _('Consumable material is successfully prepared for transfer') ansver['cart_name'] = str(cartridge.cart_itm_name) ansver['cart_num'] = str(cartridge.cart_number) ansver['pk'] = str(cartridge.pk) ansver['moved_list'] = str(session_data)[1:-1] return JsonResponse(ansver) else: cart_status = STATUS[cartridge.cart_status-1][1] ansver['error'] ='2' ansver['mes'] = _('This consumable is in the state \"%(cart_status)s\". Are you sure you want to place in the lists transmitted?') % {'cart_status': cart_status} return JsonResponse(ansver) return JsonResponse(ansver) @permissions('cart') def force_move_to_transfer(request): """Усиленная попытка перемещения РМ в "списки пустых и на скдале" """ ansver = dict() cart_barcode = request.POST.get('barcode', '') cart_barcode = cart_barcode.strip() try: root_ou = request.user.departament des = root_ou.get_descendants() except: ansver['error'] ='1' ansver['mes'] = _('Error: 101. Not set organization utint.') return JsonResponse(ansver) # выполняем поиск РМ обладающие разными статусами # например, пуст и на складе, задействованн, ... m1 = CartridgeItem.objects.filter( Q(cart_number=cart_barcode) & (Q(departament__in=des) | Q(departament=root_ou)) ) if len(m1) >= 1: cartridge = m1[0] m1 = None else: # если картридж с данным неомером не найденн ansver['error'] ='1' ansver['mes'] = _('Consumables with the number %(cart_barcode)s was not found.') % {'cart_barcode' : cart_barcode} return JsonResponse(ansver) list_cplx = list() # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if cartridge.departament in request.user.departament.get_descendants(): cartridge.cart_status = 3 # пустой объект на складе tmp_dept = cartridge.departament cartridge.departament = request.user.departament cartridge.cart_date_change = timezone.now() cartridge.save() list_cplx.append((cartridge.id, str(cartridge.cart_itm_name), str(tmp_dept), cartridge.cart_number)) if list_cplx: sign_tr_empty_cart_to_stock.send(sender=None, list_cplx=list_cplx, request=request) if request.session.get('basket_to_transfer_firm', False): # если в сессионной переменной уже что-то есть session_data = request.session.get('basket_to_transfer_firm') session_data.append(cartridge.pk) request.session['basket_to_transfer_firm'] = session_data else: # если сессионная basket_to_transfer_firm пуста или её нет вообще session_data = [ cartridge.pk, ] request.session['basket_to_transfer_firm'] = session_data ansver['error'] ='0' ansver['mes'] = _('Consumable material is successfully prepared for transfer') ansver['cart_name'] = str(cartridge.cart_itm_name) ansver['cart_num'] = str(cartridge.cart_number) ansver['pk'] = str(cartridge.pk) return JsonResponse(ansver) @permissions('cart') def remove_session_item(request): """Удаление элемента из сессионной переменной перемещения РМ на обслуживание. """ ansver = dict() selected = request.POST.getlist('selected[]') session_key = request.POST.get('session_key' ,'') if session_key == 'basket_to_transfer_firm': session_key = 'basket_to_transfer_firm' elif session_key == 'basket_to_transfer_stock': session_key = 'basket_to_transfer_stock' else: ansver['error'] = '1' ansver['mes'] = _('The session key is not recognized.') return JsonResponse(ansver) if request.session.get(session_key, []): session_data = request.session.get(session_key) if session_data: for select in selected: try: select = int(select) except ValueError: select = 0 session_data = list(item for item in session_data if select != item) request.session[session_key] = session_data ansver['error'] = '0' ansver['show_remove_session_button'] = True if session_data: ansver['show_remove_session_button'] = False ansver['moved_list'] = str(session_data)[1:-1] return JsonResponse(ansver) @permissions('cart') def move_objects_to_firm_with_barcode(request): """Аякс обработчик перемещения РМ на обслуживание контрагенту на основе подготовленных списков с сканером штрих кодов. """ ansver = dict() form = TransfeToFirmScanner(request.POST) if form.is_valid(): data_in_post = form.cleaned_data numbers = data_in_post.get('numbers') firm = data_in_post.get('firm') doc = data_in_post.get('doc') try: firm = FirmTonerRefill.objects.get(pk=firm) except FirmTonerRefill.DoesNotExist: firm = None # меняем статусы РМ в БД на основании запросов # генерируем запись о заправке jsoning_list = [] for inx in numbers: cart_number = CartridgeItem.objects.get(pk=inx).cart_number cart_name = CartridgeItem.objects.get(pk=inx).cart_itm_name jsoning_list.append([cart_number, str(cart_name)]) jsoning_list = json.dumps(jsoning_list) doc = str2int(doc) try: doc = SCDoc.objects.get(pk=doc) except SCDoc.DoesNotExist: doc = None # генерируем номер акта передачи на основе даты и его порядкового номера sender_acts = RefillingCart.objects.filter(departament=request.user.departament).count() if sender_acts: act_number = sender_acts + 1 act_number = str(timezone.now().year) + '_' + str(sender_acts) else: act_number = str(timezone.now().year) + '_1' # сохраняем в БД акт передачи РМ на заправку act_doc = RefillingCart(number = act_number, date_created = timezone.now(), firm = firm, user = str(request.user), json_content = jsoning_list, parent_doc = doc, departament = request.user.departament ) act_doc.save() list_cplx = list() show_numbers = list() with transaction.atomic(): for inx in numbers: m1 = CartridgeItem.objects.get(pk=inx) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. if m1.departament == request.user.departament: m1.cart_status = 4 # находится на заправке m1.filled_firm = firm m1.cart_date_change = timezone.now() m1.save() list_cplx.append((m1.pk, str(m1.cart_itm_name), m1.cart_number)) show_numbers.append(m1.cart_number) if list_cplx: sign_tr_empty_cart_to_firm.send(sender=None, list_cplx=list_cplx, request=request, firm=str(firm) ) ansver['error'] = '0' ansver['url'] = reverse('index:empty') msg = _('Objects %(numbers)s moved successfully.') % {'numbers': show_numbers} messages.success(request, msg) # очищаем сессионную переменную basket_to_transfer_firm request.session['basket_to_transfer_firm'] = [] else: ansver['error'] = '1' ansver['text'] = form.errors.as_text return JsonResponse(ansver) @permissions('cart') def add_object_to_basket_from_firm_to_stock(request): """Перемещение объектов в сессию для реализации передачи РМ из обслуживания обратно на склад. """ ansver = dict() cart_barcode = request.POST.get('barcode') cart_barcode = cart_barcode.strip() try: root_ou = request.user.departament des = root_ou.get_descendants() except: ansver['error'] ='1' ansver['mes'] = _('Error: 101. Not set organization utint.') return JsonResponse(ansver) else: # выполняем поиск РМ обладающие разными статусами # например, пуст и на складе, задействованн, ... m1 = CartridgeItem.objects.filter( Q(cart_number=cart_barcode) & (Q(departament__in=des) | Q(departament=root_ou)) ) if len(m1) >= 1: cartridge = m1[0] m1 = None else: # если картридж с данным неомером не найденн ansver['error'] ='1' ansver['mes'] = _('Consumables with the number %(cart_barcode)s was not found.') % {'cart_barcode' : cart_barcode} return JsonResponse(ansver) session_data = request.session.get('basket_to_transfer_stock', False) if session_data and (str(cartridge.pk) in session_data): ansver['error'] ='1' ansver['mes'] = _('The object is already in the lists on the move.') return JsonResponse(ansver) if cartridge.cart_status == 4: # если картридж с нужным номером найденн и у него код статуса "На обслуживании" # добавляем информауию в сессионную переменную пользователя if request.session.get('basket_to_transfer_stock', False): # если в сессионной переменной уже что-то есть session_data = request.session.get('basket_to_transfer_stock') # если в сессионной переменной данные уже есть то РМ в список не добавляем try: session_data.index(cartridge.pk) except ValueError: session_data.append(cartridge.pk) request.session['basket_to_transfer_stock'] = session_data else: ansver['error'] ='1' ansver['mes'] = _('The object number %(cart_barcode)s is already present in the lists on the move.') % {'cart_barcode': cart_barcode} return JsonResponse(ansver) else: # если сессионная basket_to_transfer_stock пуста или её нет вообще session_data = [ cartridge.pk ] request.session['basket_to_transfer_stock'] = session_data ansver['error'] ='0' ansver['mes'] = _('Consumable material is successfully prepared for transfer') ansver['cart_name'] = str(cartridge.cart_itm_name) ansver['cart_num'] = str(cartridge.cart_number) ansver['pk'] = str(cartridge.pk) ansver['moved_list'] = str(session_data)[1:-1] return JsonResponse(ansver) else: cart_status = STATUS[cartridge.cart_status-1][1] ansver['error'] ='2' ansver['mes'] = _('This consumable is in the state \"%(cart_status)s\". Return to the warehouse is impossible.') % {'cart_status': cart_status} return JsonResponse(ansver) return JsonResponse(ansver) @permissions('cart') def rate(request): """установка оценки обслуживающей организации, по конкретной единице картриджа. """ ansver = dict() action = request.POST.getlist('action')[0] firm_id = request.POST.getlist('firm_id')[0] cart_id = request.POST.getlist('cart_id')[0] firm_id = str2int(firm_id) cart_id = str2int(cart_id) node = get_object_or_404(CartridgeItem, pk=cart_id) firm = get_object_or_404(FirmTonerRefill, pk=firm_id) # проверяем принадлежность перемещаемого РМ департаменту # пользователя. try: root_ou = request.user.departament des = root_ou.get_descendants() except: pass if not(node.departament in des): ansver['error'] = '1' ansver['msg'] = _('An object with number %(cart_num)s belong to a different organizational unit.') % {'cart_num': node.cart_number} if action == 'set_good': rating = firm.vote_plus rating += 1 firm.vote_plus = rating elif action == 'set_bad': rating = firm.vote_minus rating += 1 firm.vote_minus = rating else: ansver['error'] = '1' ansver['msg'] = _('Action not supported') return JsonResponse(ansver) firm.save() node.vote = True node.save() ansver['error'] = '0' ansver['msg'] = _('Your score is accepted.') return JsonResponse(ansver) @permissions('cart') def change_cart_number(request): """ """ ansver = dict() try: root_ou = request.user.departament des = root_ou.get_descendants() except: ansver['error'] ='1' ansver['mes'] = _('Error: 101. Not set organization utint.') return JsonResponse(ansver) cart_id = request.POST.get('cart_id') cart_id = str2int(cart_id) cart_number = request.POST.get('cart_number', '') cart_number = cart_number.strip() if not len(cart_number): ansver['error'] ='1' ansver['mes'] = _('The number must not be empty.') return JsonResponse(ansver) try: m1 = CartridgeItem.objects.get(pk=cart_id) except CartridgeItem.DoesNotExist: ansver['error'] ='1' ansver['mes'] = _('Not found.') return JsonResponse(ansver) cart_index = m1.pk if not((m1.departament in des) or (m1.departament == request.user.departament)): ansver['error'] ='1' ansver['mes'] = _('An object with number %(cart_num)s belong to a different organizational unit.') % {'cart_num': cart_number} return JsonResponse(ansver) try: m2 = CartridgeItem.objects.filter(Q(cart_number=cart_number) & (Q(departament__in=des) | Q(departament=request.user.departament))) except CartridgeItem.DoesNotExist: ansver['error'] ='1' ansver['mes'] = _('Not found.') return JsonResponse(ansver) if m2.count(): ansver['error'] ='1' ansver['mes'] = _('Number is already in use.') return JsonResponse(ansver) else: old_cart_number = m1.cart_number m1.cart_number = cart_number m1.save() sign_change_number.send(sender=None, cart_index=cart_index, old_number=old_cart_number, new_number=cart_number, request=request) ansver['error'] = '0' return JsonResponse(ansver) @permissions('cart') def clear_basket_session(request): """Очистка сессионной корзины (добавление РМ через сканер штрихкода). Поэлементное удаление элементов из сессионной корзины, или очистка её целиком. """ ansver = dict() selected = request.POST.getlist('selected[]', '') session_var = request.POST.get('session_var', '') select_all = request.POST.get('select_all', '') session_var = session_var.strip() selected = [str2int(elem) for elem in selected] if session_var == 'add_cartridges_full_in_stock': session_data = request.session.get(session_var) elif session_var == 'add_cartridges_empty_in_stock': session_data = request.session.get(session_var) else: ansver['error'] = '1' ansver['text'] = _('Session varible not use.') return JsonResponse(ansver) if select_all == '1': request.session[session_var] = None ansver['error'] = '0' ansver['text'] = '' return JsonResponse(ansver) tmp_session_data = list() inx = 0 for elem in session_data: if not(inx in selected): tmp_session_data.append(elem) inx += 1 request.session[session_var] = tmp_session_data ansver['error'] = '0' ansver['text'] = selected return JsonResponse(ansver) @permissions('dict') def linked_name_objects(request): """Выбор всех объектов РМ для заданного имени. """ ansver = dict() name_id = request.POST.get('name_id', 0) action = request.POST.get('action', 0) name_id = str2int(name_id) if action == 'name': name_obj = get_object_or_404(CartridgeItemName, pk=name_id) list_items = CartridgeItem.objects.filter(cart_itm_name=name_obj) ansver['text'] = render_to_string('index/linked_name_objects.html', context={'list_items': list_items}) elif action == 'type': name_obj = get_object_or_404(CartridgeType, pk=name_id) list_items = CartridgeItemName.objects.filter(cart_itm_type=name_obj) ansver['text'] = render_to_string('index/linked_types_objects.html', context={'list_items': list_items}) else: ansver['error'] = 1 ansver['text'] = _('Action not found') return JsonResponse(ansver) ansver['error'] = 0 return JsonResponse(ansver) @permissions('cart') def push_to_bufer(request): """ Изменить статус РМ с буферизирован или нет. """ ansver = dict() try: root_ou = request.user.departament des = root_ou.get_descendants() except: ansver['error'] ='1' ansver['mes'] = _('Error: 101. Not set organization utint.') return JsonResponse(ansver) cart_id = request.POST.get('cart_id', '') cart_id = str2int(cart_id) try: m1 = CartridgeItem.objects.get(pk=cart_id) except CartridgeItem.DoesNotExist: ansver['error'] ='1' ansver['mes'] = _('Not found.') return JsonResponse(ansver) if not((m1.departament in des) or (m1.departament == request.user.departament)): ansver['error'] ='1' ansver['mes'] = _('An object with number %(cart_num)s belong to a different organizational unit.') % {'cart_num': m1.cart_number} return JsonResponse(ansver) # переключаем статус с буферизован или нет cart_status = m1.bufer if cart_status: m1.bufer = False else: m1.bufer = True m1.save() ansver['error'] = '0' return JsonResponse(ansver) @permissions('cart') def docs_firm_suggests(request): """Динамическое формирование списка договоров обслуживания для заданной фирмы. """ ansver = dict() firm_id = request.POST.get('firm_id', 0) firm_id = str2int(firm_id) firm = get_object_or_404(FirmTonerRefill, pk=firm_id) m1 = SCDoc.objects.filter(firm=firm).filter(doc_type=2).filter(departament=request.user.departament) res = list() for item in m1: res.append([item.pk, item.title]) ansver['res'] = res return JsonResponse(ansver) @permissions('cart') def faulty_transmission(request): """Возвращение РМ на склад, с удалением записи в списке событий, связанной с установкой РМ пользователю. """ ansver = dict() select = request.POST.get('select', 0) try: m1 = CartridgeItem.objects.get(pk=select) except CartridgeItem.DoesNotExist: ansver['error'] = '1' ansver['text'] = _('Not found.') return JsonResponse(ansver) if not(attachment_departament(request, m1.departament)): ansver['error'] = '1' ansver['text'] = _('Not found.') return JsonResponse(ansver) with transaction.atomic(): # устанавливаем параметры РМ (полон и на складе) m1.cart_status = 1 m1.cart_date_change = timezone.now() old_dept = str(m1.departament) m1.departament = request.user.departament m1.save() # удаляем последнее во времени событие передачи в пользование transfer_event = Events.objects.filter(cart_index=m1.pk). \ filter(departament=request.user.departament.pk). \ filter(event_type='TR').order_by('-pk')[0] try: transfer_event.delete() except: ansver['error'] = '1' ansver['text'] = _('Not found event Transfer.') return JsonResponse(ansver) else: sign_faulty_transmission.send(sender=None, \ old_dept=old_dept, \ request=request, \ cart_number=m1.cart_number, cart_index=m1.pk, cart_type=m1.cart_itm_name ) ansver['error'] = '0' ansver['text'] = _('The %(cart_num)s is successfully returned to the warehouse.') % {'cart_num': m1.cart_number} return JsonResponse(ansver) @permissions('cart') def api_tr_to_other_stock(request): """Меняем склад хранения РМ """ ansver = dict() stock_id = request.POST.get('stock', '') moved = request.POST.get('moved', '') moved = moved.split(',') moved = [ str2int(i) for i in moved] stock_id = str2int(stock_id) try: stock = Storages.objects.get(pk=stock_id) except Storages.DoesNotExist: ansver['error'] = '1' ansver['text'] = _('Storage not found.') return JsonResponse(ansver) if not(attachment_departament(request, stock.departament)): ansver['error'] = '1' ansver['text'] = _('The warehouse is not owned by the department.') return JsonResponse(ansver) with transaction.atomic(): for item_id in moved: try: cart_obj = CartridgeItem.objects.get(pk=item_id) except CartridgeItem.DoesNotExist: continue if not(attachment_departament(request, cart_obj.departament)): continue cart_obj.stor = stock cart_obj.save() ansver['error'] = '0' ansver['text'] = _('Successfully moved.') return JsonResponse(ansver) @permissions('cart') def api_del_comment(request): """ """ ansver = dict() indexitm = request.POST.get('indexitm') pk = request.POST.get('pk') indexitm = str2int(indexitm) pk = str2int(pk) try: cartridge = CartridgeItem.objects.get(pk=pk) except CartridgeItem.DoesNotExist: ansver['error'] = '1' ansver['text'] = _('Error. Not found.') return JsonResponse(ansver) if not(attachment_departament(request, cartridge.departament)): ansver['error'] = '1' ansver['text'] = _('Error. Not departament.') return JsonResponse(ansver) load_json_list = cartridge.ext_comment if load_json_list: if isinstance(load_json_list, type('str')): load_json_list = json.loads(load_json_list) if isinstance(load_json_list, type([])): pass else: load_json_list = list() try: load_json_list[indexitm] except IndexError: ansver['error'] = '1' ansver['text'] = _('Error. Index error.') return JsonResponse(ansver) user_comment_id = load_json_list[indexitm].get('user_id') user_current_id = request.user.pk current_user_is_admin = request.user.is_admin if (user_comment_id == user_current_id) or current_user_is_admin: del load_json_list[indexitm] cartridge.ext_comment = str(json.dumps(load_json_list)) cartridge.save() ansver['error'] = '0' ansver['text'] = _('Success.') else: ansver['error'] = '1' ansver['text'] = _('Insufficient rights.') return JsonResponse(ansver) @permissions('cart') def api_change_number_refills(request): """Изменяем количество перезаправок картриджа """ ansver = dict() num = request.POST.get('num') pk = request.POST.get('pk') pk = str2int(pk) try: num = int(num) except ValueError: ansver['error'] = '1' return JsonResponse(ansver) if num < 0: ansver['error'] = '1' return JsonResponse(ansver) try: cartridge = CartridgeItem.objects.get(pk=pk) except CartridgeItem.DoesNotExist: ansver['error'] = '1' return JsonResponse(ansver) last_number_refills = cartridge.cart_number_refills if last_number_refills == num: pass else: with transaction.atomic(): cartridge.cart_number_refills = num cartridge.save() sign_change_number_refills.send(sender=None, old=last_number_refills, \ new=num, pk=pk, request=request, \ cart_number=cartridge.cart_number, cart_type=cartridge.cart_itm_name) ansver['error'] = '0' return JsonResponse(ansver)
gpl-2.0
pchaigno/grr
lib/flows/general/administrative.py
3
29149
#!/usr/bin/env python """Administrative flows for managing the clients state.""" import shlex import threading import time import urllib import logging # pylint: disable=unused-import from grr.gui import django_lib # pylint: enable=unused-import from grr.lib import access_control from grr.lib import aff4 from grr.lib import config_lib from grr.lib import data_store from grr.lib import email_alerts from grr.lib import flow from grr.lib import queues from grr.lib import rdfvalue from grr.lib import registry from grr.lib import rendering from grr.lib import stats from grr.lib import utils from grr.lib.aff4_objects import collections from grr.lib.aff4_objects import reports from grr.lib.rdfvalues import client as rdf_client from grr.lib.rdfvalues import flows as rdf_flows from grr.lib.rdfvalues import structs as rdf_structs from grr.proto import flows_pb2 class AdministrativeInit(registry.InitHook): """Initialize the Django environment.""" pre = ["StatsInit"] def RunOnce(self): stats.STATS.RegisterCounterMetric("grr_client_crashes") class ClientCrashEventListener(flow.EventListener): """EventListener with additional helper methods to save crash details.""" def _AppendCrashDetails(self, path, crash_details): collection = aff4.FACTORY.Create( path, "PackedVersionedCollection", mode="rw", token=self.token) collection.Add(crash_details) collection.Close(sync=False) def WriteAllCrashDetails(self, client_id, crash_details, flow_session_id=None, hunt_session_id=None): # Update last crash attribute of the client. client_obj = aff4.FACTORY.Create(client_id, "VFSGRRClient", token=self.token) client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details)) client_obj.Close(sync=False) # Duplicate the crash information in a number of places so we can find it # easily. self._AppendCrashDetails(client_id.Add("crashes"), crash_details) self._AppendCrashDetails(aff4.ROOT_URN.Add("crashes"), crash_details) if flow_session_id: aff4_flow = aff4.FACTORY.Open(flow_session_id, "GRRFlow", mode="rw", age=aff4.NEWEST_TIME, token=self.token) aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details)) aff4_flow.Close(sync=False) hunt_str, hunt_id, _ = flow_session_id.Split(3) if hunt_str == "hunts": hunt_session_id = aff4.ROOT_URN.Add("hunts").Add(hunt_id) if hunt_session_id != flow_session_id: self._AppendCrashDetails( hunt_session_id.Add("crashes"), crash_details) class GetClientStatsProcessResponseMixin(object): """Mixin defining ProcessReponse() that writes client stats to datastore.""" def ProcessResponse(self, client_id, response): """Actually processes the contents of the response.""" urn = client_id.Add("stats") with aff4.FACTORY.Create(urn, "ClientStats", token=self.token, mode="w") as stats_fd: # Only keep the average of all values that fall within one minute. stats_fd.AddAttribute(stats_fd.Schema.STATS, response.DownSample()) class GetClientStats(flow.GRRFlow, GetClientStatsProcessResponseMixin): """This flow retrieves information about the GRR client process.""" category = "/Administrative/" @flow.StateHandler(next_state=["StoreResults"]) def Start(self): self.CallClient("GetClientStats", next_state="StoreResults") @flow.StateHandler() def StoreResults(self, responses): """Stores the responses.""" if not responses.success: self.Error("Failed to retrieve client stats.") return for response in responses: self.ProcessResponse(self.client_id, response) class GetClientStatsAuto(flow.WellKnownFlow, GetClientStatsProcessResponseMixin): """This action pushes client stats to the server automatically.""" category = None well_known_session_id = rdfvalue.SessionID(flow_name="Stats", queue=queues.STATS) def ProcessMessage(self, message): """Processes a stats response from the client.""" client_stats = rdf_client.ClientStats(message.payload) self.ProcessResponse(message.source, client_stats) class DeleteGRRTempFilesArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.DeleteGRRTempFilesArgs class DeleteGRRTempFiles(flow.GRRFlow): """Delete all the GRR temp files in path. If path is a directory, look in the top level for filenames beginning with Client.tempfile_prefix, and delete them. If path is a regular file and starts with Client.tempfile_prefix, delete it. """ category = "/Administrative/" args_type = DeleteGRRTempFilesArgs @flow.StateHandler(next_state="Done") def Start(self): """Issue a request to delete tempfiles in directory.""" self.CallClient("DeleteGRRTempFiles", self.args.pathspec, next_state="Done") @flow.StateHandler() def Done(self, responses): if not responses.success: raise flow.FlowError(str(responses.status)) for response in responses: self.Log(response.data) class UninstallArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.UninstallArgs class Uninstall(flow.GRRFlow): """Removes the persistence mechanism which the client uses at boot. For Windows and OSX, this will disable the service, and then stop the service. For Linux this flow will fail as we haven't implemented it yet :) """ category = "/Administrative/" args_type = UninstallArgs @flow.StateHandler(next_state=["Kill"]) def Start(self): """Start the flow and determine OS support.""" client = aff4.FACTORY.Open(self.client_id, token=self.token) system = client.Get(client.Schema.SYSTEM) if system == "Darwin" or system == "Windows": self.CallClient("Uninstall", next_state="Kill") else: self.Log("Unsupported platform for Uninstall") @flow.StateHandler(next_state="Confirmation") def Kill(self, responses): """Call the kill function on the client.""" if not responses.success: self.Log("Failed to uninstall client.") elif self.args.kill: self.CallClient("Kill", next_state="Confirmation") @flow.StateHandler(next_state="End") def Confirmation(self, responses): """Confirmation of kill.""" if not responses.success: self.Log("Kill failed on the client.") class Kill(flow.GRRFlow): """Terminate a running client (does not disable, just kill).""" category = "/Administrative/" @flow.StateHandler(next_state=["Confirmation"]) def Start(self): """Call the kill function on the client.""" self.CallClient("Kill", next_state="Confirmation") @flow.StateHandler(next_state="End") def Confirmation(self, responses): """Confirmation of kill.""" if not responses.success: self.Log("Kill failed on the client.") class UpdateConfigurationArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.UpdateConfigurationArgs class UpdateConfiguration(flow.GRRFlow): """Update the configuration of the client. Note: This flow is somewhat dangerous, so we don't expose it in the UI. """ # Still accessible (e.g. via ajax but not visible in the UI.) category = None args_type = UpdateConfigurationArgs @flow.StateHandler(next_state=["Confirmation"]) def Start(self): """Call the UpdateConfiguration function on the client.""" self.CallClient("UpdateConfiguration", request=self.args.config, next_state="Confirmation") @flow.StateHandler(next_state="End") def Confirmation(self, responses): """Confirmation.""" if not responses.success: raise flow.FlowError("Failed to write config. Err: {0}".format( responses.status)) class ExecutePythonHackArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.ExecutePythonHackArgs class ExecutePythonHack(flow.GRRFlow): """Execute a signed python hack on a client.""" category = "/Administrative/" args_type = ExecutePythonHackArgs @flow.StateHandler(next_state=["Done"]) def Start(self): python_hack_root_urn = config_lib.CONFIG.Get("Config.python_hack_root") fd = aff4.FACTORY.Open(python_hack_root_urn.Add(self.args.hack_name), token=self.token) if not isinstance(fd, aff4.GRRSignedBlob): raise RuntimeError("Python hack %s not found." % self.args.hack_name) # TODO(user): This will break if someone wants to execute lots of Python. for python_blob in fd: self.CallClient("ExecutePython", python_code=python_blob, py_args=self.args.py_args, next_state="Done") @flow.StateHandler() def Done(self, responses): response = responses.First() if not responses.success: raise flow.FlowError("Execute Python hack failed: %s" % responses.status) if response: result = utils.SmartStr(response.return_val) # Send reply with full data, but only log the first 200 bytes. str_result = result[0:200] if len(result) >= 200: str_result += "...[truncated]" self.Log("Result: %s" % str_result) self.SendReply(rdfvalue.RDFBytes(utils.SmartStr(response.return_val))) class ExecuteCommandArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.ExecuteCommandArgs class ExecuteCommand(flow.GRRFlow): """Execute a predefined command on the client.""" args_type = ExecuteCommandArgs @flow.StateHandler(next_state=["Confirmation"]) def Start(self): """Call the execute function on the client.""" self.CallClient("ExecuteCommand", cmd=self.args.cmd, args=shlex.split(self.args.command_line), time_limit=self.args.time_limit, next_state="Confirmation") @flow.StateHandler(next_state="End") def Confirmation(self, responses): """Confirmation.""" if responses.success: response = responses.First() self.Log(("Execution of %s %s (return value %d, " "ran for %f seconds):"), response.request.cmd, " ".join(response.request.command_line), response.exit_status, # time_used is returned in microseconds. response.time_used / 1e6) try: # We don't want to overflow the log so we just save 100 bytes each. logout = response.stdout[:100] if len(response.stdout) > 100: logout += "..." logerr = response.stderr[:100] if len(response.stderr) > 100: logerr += "..." self.Log("Output: %s, %s", logout, logerr) except ValueError: # The received byte buffer does not convert to unicode. self.Log("Received output not convertible to unicode.") else: self.Log("Execute failed.") class Foreman(flow.WellKnownFlow): """The foreman assigns new flows to clients based on their type. Clients periodically call the foreman flow to ask for new flows that might be scheduled for them based on their types. This allows the server to schedule flows for entire classes of machines based on certain criteria. """ well_known_session_id = rdfvalue.SessionID(flow_name="Foreman") foreman_cache = None # How often we refresh the rule set from the data store. cache_refresh_time = 60 lock = threading.Lock() def ProcessMessage(self, message): """Run the foreman on the client.""" # Only accept authenticated messages if (message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): return now = time.time() # Maintain a cache of the foreman with self.lock: if (self.foreman_cache is None or now > self.foreman_cache.age + self.cache_refresh_time): self.foreman_cache = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token) self.foreman_cache.age = now if message.source: self.foreman_cache.AssignTasksToClient(message.source) class OnlineNotificationArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.OnlineNotificationArgs class OnlineNotification(flow.GRRFlow): """Notifies by email when a client comes online in GRR.""" category = "/Administrative/" behaviours = flow.GRRFlow.behaviours + "BASIC" template = """ <html><body><h1>GRR Client Online Notification.</h1> <p> Client %(client_id)s (%(hostname)s) just came online. Click <a href='%(admin_ui)s/#%(urn)s'> here </a> to access this machine. <br />This notification was created by %(creator)s. </p> <p>Thanks,</p> <p>%(signature)s</p> </body></html>""" args_type = OnlineNotificationArgs @classmethod def GetDefaultArgs(cls, token=None): return cls.args_type(email="%s@%s" % ( token.username, config_lib.CONFIG.Get("Logging.domain"))) @flow.StateHandler(next_state="SendMail") def Start(self): """Starts processing.""" if self.args.email is None: self.args.email = self.token.username self.CallClient("Echo", data="Ping", next_state="SendMail") @flow.StateHandler() def SendMail(self, responses): """Sends a mail when the client has responded.""" if responses.success: client = aff4.FACTORY.Open(self.client_id, token=self.token) hostname = client.Get(client.Schema.HOSTNAME) url = urllib.urlencode((("c", self.client_id), ("main", "HostInformation"))) subject = "GRR Client on %s became available." % hostname email_alerts.SendEmail( self.args.email, "grr-noreply", subject, self.template % dict( client_id=self.client_id, admin_ui=config_lib.CONFIG["AdminUI.url"], hostname=hostname, urn=url, creator=self.token.username, signature=config_lib.CONFIG["Email.signature"]), is_html=True) else: flow.FlowError("Error while pinging client.") class UpdateClientArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.UpdateClientArgs class UpdateClient(flow.GRRFlow): """Updates the GRR client to a new version replacing the current client. This will execute the specified installer on the client and then run an Interrogate flow. The new installer needs to be loaded into the database, generally in /config/executables/<platform>/installers and must be signed using the exec signing key. Signing and upload of the file is done with config_updater. """ category = "/Administrative/" AUTHORIZED_LABELS = ["admin"] system_platform_mapping = { "Darwin": "darwin", "Linux": "linux", "Windows": "windows"} args_type = UpdateClientArgs @flow.StateHandler(next_state="Interrogate") def Start(self): """Start.""" blob_path = self.args.blob_path if not blob_path: # No explicit path was given, we guess a reasonable default here. client = aff4.FACTORY.Open(self.client_id, token=self.token) client_platform = client.Get(client.Schema.SYSTEM) if not client_platform: raise RuntimeError("Can't determine client platform, please specify.") blob_urn = "aff4:/config/executables/%s/agentupdates" % ( self.system_platform_mapping[client_platform]) blob_dir = aff4.FACTORY.Open(blob_urn, token=self.token) updates = sorted(list(blob_dir.ListChildren())) if not updates: raise RuntimeError( "No matching updates found, please specify one manually.") blob_path = updates[-1] if not ("windows" in utils.SmartStr(self.args.blob_path) or "darwin" in utils.SmartStr(self.args.blob_path) or "linux" in utils.SmartStr(self.args.blob_path)): raise RuntimeError("Update not supported for this urn, use aff4:/config" "/executables/<platform>/agentupdates/<version>") aff4_blobs = aff4.FACTORY.Open(blob_path, token=self.token) if not isinstance(aff4_blobs, aff4.GRRSignedBlob): raise RuntimeError("%s is not a valid GRRSignedBlob." % blob_path) offset = 0 write_path = "%d_%s" % (time.time(), aff4_blobs.urn.Basename()) for i, blob in enumerate(aff4_blobs): self.CallClient( "UpdateAgent", executable=blob, more_data=i < aff4_blobs.chunks - 1, offset=offset, write_path=write_path, next_state="Interrogate", use_client_env=False) offset += len(blob.data) @flow.StateHandler(next_state="Done") def Interrogate(self, responses): if not responses.success: self.Log("Installer reported an error: %s" % responses.status) else: self.Log("Installer completed.") self.CallFlow("Interrogate", next_state="Done") @flow.StateHandler() def Done(self): client = aff4.FACTORY.Open(self.client_id, token=self.token) info = client.Get(client.Schema.CLIENT_INFO) self.Log("Client update completed, new version: %s" % info.client_version) class NannyMessageHandler(ClientCrashEventListener): """A listener for nanny messages.""" EVENTS = ["NannyMessage"] well_known_session_id = rdfvalue.SessionID(flow_name="NannyMessage") mail_template = """ <html><body><h1>GRR nanny message received.</h1> The nanny for client %(client_id)s (%(hostname)s) just sent a message:<br> <br> %(message)s <br> Click <a href='%(admin_ui)s/#%(urn)s'> here </a> to access this machine. <p>%(signature)s</p> </body></html>""" subject = "GRR nanny message received from %s." logline = "Nanny for client %s sent: %s" @flow.EventHandler(allow_client_access=True) def ProcessMessage(self, message=None, event=None): """Processes this event.""" _ = event client_id = message.source message = message.payload.string logging.info(self.logline, client_id, message) # Write crash data to AFF4. client = aff4.FACTORY.Open(client_id, token=self.token) client_info = client.Get(client.Schema.CLIENT_INFO) crash_details = rdf_client.ClientCrash( client_id=client_id, client_info=client_info, crash_message=message, timestamp=long(time.time() * 1e6), crash_type=self.well_known_session_id) self.WriteAllCrashDetails(client_id, crash_details) # Also send email. if config_lib.CONFIG["Monitoring.alert_email"]: client = aff4.FACTORY.Open(client_id, token=self.token) hostname = client.Get(client.Schema.HOSTNAME) url = urllib.urlencode((("c", client_id), ("main", "HostInformation"))) email_alerts.SendEmail( config_lib.CONFIG["Monitoring.alert_email"], "GRR server", self.subject % client_id, self.mail_template % dict( client_id=client_id, admin_ui=config_lib.CONFIG["AdminUI.url"], hostname=hostname, signature=config_lib.CONFIG["Email.signature"], urn=url, message=message), is_html=True) class ClientAlertHandler(NannyMessageHandler): """A listener for client messages.""" EVENTS = ["ClientAlert"] well_known_session_id = rdfvalue.SessionID(flow_name="ClientAlert") mail_template = """ <html><body><h1>GRR client message received.</h1> The client %(client_id)s (%(hostname)s) just sent a message:<br> <br> %(message)s <br> Click <a href='%(admin_ui)s/#%(urn)s'> here </a> to access this machine. <p>%(signature)s</p> </body></html>""" subject = "GRR client message received from %s." logline = "Client message from %s: %s" class ClientCrashHandler(ClientCrashEventListener): """A listener for client crashes.""" EVENTS = ["ClientCrash"] well_known_session_id = rdfvalue.SessionID(flow_name="CrashHandler") mail_template = """ <html><body><h1>GRR client crash report.</h1> Client %(client_id)s (%(hostname)s) just crashed while executing an action. Click <a href='%(admin_ui)s/#%(urn)s'> here </a> to access this machine. <p>Thanks,</p> <p>%(signature)s</p> <p> P.S. The state of the failing flow was: %(state)s %(nanny_msg)s </body></html>""" @flow.EventHandler(allow_client_access=True) def ProcessMessage(self, message=None, event=None): """Processes this event.""" _ = event client_id = message.source nanny_msg = "" flow_obj = aff4.FACTORY.Open(message.session_id, token=self.token) # Log. logging.info("Client crash reported, client %s.", client_id) # Export. stats.STATS.IncrementCounter("grr_client_crashes") # Write crash data to AFF4. client = aff4.FACTORY.Open(client_id, token=self.token) client_info = client.Get(client.Schema.CLIENT_INFO) status = rdf_flows.GrrStatus(message.payload) crash_details = rdf_client.ClientCrash( client_id=client_id, session_id=message.session_id, client_info=client_info, crash_message=status.error_message, timestamp=rdfvalue.RDFDatetime().Now(), crash_type=self.well_known_session_id) self.WriteAllCrashDetails(client_id, crash_details, flow_session_id=message.session_id) # Also send email. if config_lib.CONFIG["Monitoring.alert_email"]: if status.nanny_status: nanny_msg = "Nanny status: %s" % status.nanny_status client = aff4.FACTORY.Open(client_id, token=self.token) hostname = client.Get(client.Schema.HOSTNAME) url = urllib.urlencode((("c", client_id), ("main", "HostInformation"))) renderer = rendering.FindRendererForObject(flow_obj.state) email_alerts.SendEmail( config_lib.CONFIG["Monitoring.alert_email"], "GRR server", "Client %s reported a crash." % client_id, self.mail_template % dict( client_id=client_id, admin_ui=config_lib.CONFIG["AdminUI.url"], hostname=hostname, state=renderer.RawHTML(), urn=url, nanny_msg=nanny_msg, signature=config_lib.CONFIG["Email.signature"] ), is_html=True) if nanny_msg: msg = "Client crashed, " + nanny_msg else: msg = "Client crashed." # Now terminate the flow. flow.GRRFlow.TerminateFlow(message.session_id, reason=msg, token=self.token, force=True) class ClientStartupHandler(flow.EventListener): well_known_session_id = rdfvalue.SessionID(flow_name="Startup") @flow.EventHandler(allow_client_access=True, auth_required=False) def ProcessMessage(self, message=None, event=None): """Handle a startup event.""" _ = event # We accept unauthenticated messages so there are no errors but we don't # store the results. if (message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): return client_id = message.source client = aff4.FACTORY.Create(client_id, "VFSGRRClient", mode="rw", token=self.token) old_info = client.Get(client.Schema.CLIENT_INFO) old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0) startup_info = rdf_client.StartupInfo(message.payload) info = startup_info.client_info # Only write to the datastore if we have new information. new_data = (info.client_name, info.client_version, info.revision, info.build_time, info.client_description) old_data = (old_info.client_name, old_info.client_version, old_info.revision, old_info.build_time, old_info.client_description) if new_data != old_data: client.Set(client.Schema.CLIENT_INFO(info)) client.AddLabels(*info.labels, owner="GRR") # Allow for some drift in the boot times (5 minutes). if abs(int(old_boot) - int(startup_info.boot_time)) > 300 * 1e6: client.Set(client.Schema.LAST_BOOT_TIME(startup_info.boot_time)) client.Close() flow.Events.PublishEventInline("ClientStartup", message, token=self.token) class IgnoreResponses(flow.WellKnownFlow): """This flow exists so other well known flows can delegate their responses.""" category = None well_known_session_id = rdfvalue.SessionID(flow_name="DevNull") def ProcessMessage(self, message): pass class KeepAliveArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.KeepAliveArgs class KeepAlive(flow.GRRFlow): """Requests that the clients stays alive for a period of time.""" # We already want to run this flow while waiting for a client approval. # Note that this can potentially be abused to launch a DDOS attack against # the frontend server(s) by putting all clients into fastpoll mode. The load # of idle polling messages is not that high though and this can only be done # by users that have a GRR account already so the risk is acceptable. ACL_ENFORCED = False category = "/Administrative/" behaviours = flow.GRRFlow.behaviours + "BASIC" sleep_time = 60 args_type = KeepAliveArgs @flow.StateHandler(next_state="SendMessage") def Start(self): self.state.Register("end_time", self.args.duration.Expiry()) self.CallState(next_state="SendMessage") @flow.StateHandler(next_state="Sleep") def SendMessage(self, responses): if not responses.success: self.Log(responses.status.error_message) raise flow.FlowError(responses.status.error_message) self.CallClient("Echo", data="Wake up!", next_state="Sleep") @flow.StateHandler(next_state="SendMessage") def Sleep(self, responses): if not responses.success: self.Log(responses.status.error_message) raise flow.FlowError(responses.status.error_message) if rdfvalue.RDFDatetime().Now() < self.state.end_time - self.sleep_time: start_time = rdfvalue.RDFDatetime().Now() + self.sleep_time self.CallState(next_state="SendMessage", start_time=start_time) class TerminateFlowArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.TerminateFlowArgs class TerminateFlow(flow.GRRFlow): """Terminate a flow with a given URN.""" # This flow can run on any client without ACL enforcement (an SUID flow). ACL_ENFORCED = False args_type = TerminateFlowArgs @flow.StateHandler() def Start(self): """Terminate a flow. User has to have access to the flow.""" # We have to create special token here, because within the flow # token has supervisor access. check_token = access_control.ACLToken(username=self.token.username, reason=self.token.reason) # If we can read the flow, we're allowed to terminate it. data_store.DB.security_manager.CheckDataStoreAccess( check_token, [self.args.flow_urn], "r") flow.GRRFlow.TerminateFlow(self.args.flow_urn, reason=self.args.reason, token=self.token, force=True) class LaunchBinaryArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.LaunchBinaryArgs class LaunchBinary(flow.GRRFlow): """Launch a signed binary on a client.""" category = "/Administrative/" AUTHORIZED_LABELS = ["admin"] args_type = LaunchBinaryArgs @flow.StateHandler(next_state=["End"]) def Start(self): fd = aff4.FACTORY.Open(self.args.binary, token=self.token) if not isinstance(fd, collections.GRRSignedBlob): raise RuntimeError("Executable binary %s not found." % self.args.binary) offset = 0 write_path = "%d" % time.time() for i, blob in enumerate(fd): self.CallClient( "ExecuteBinaryCommand", executable=blob, more_data=i < fd.chunks - 1, args=shlex.split(self.args.command_line), offset=offset, write_path=write_path, next_state="End") offset += len(blob.data) def _TruncateResult(self, data): if len(data) > 2000: result = data[:2000] + "... [truncated]" else: result = data return result @flow.StateHandler() def End(self, responses): if not responses.success: raise IOError(responses.status) response = responses.First() if response: self.Log("Stdout: %s" % self._TruncateResult(response.stdout)) self.Log("Stderr: %s" % self._TruncateResult(response.stderr)) self.SendReply(response) class RunReportFlowArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.RunReportFlowArgs class RunReport(flow.GRRGlobalFlow): """Run a report and send the result via email.""" category = "/Reporting/" args_type = RunReportFlowArgs behaviours = flow.GRRGlobalFlow.behaviours + "BASIC" ACL_ENFORCED = False # Only admins are allows to run reports. AUTHORIZED_LABELS = ["admin"] @flow.StateHandler(next_state="RunReport") def Start(self): if self.state.args.report_name not in reports.Report.classes: raise flow.FlowError("No such report %s" % self.state.args.report_name) else: self.CallState(next_state="RunReport") @flow.StateHandler(next_state="EmailReport") def RunReport(self): """Run the report.""" report_cls = reports.Report.GetPlugin(self.state.args.report_name) report_obj = report_cls(token=self.token) report_obj.Run() report_obj.MailReport(self.state.args.email)
apache-2.0
tiagochiavericosta/edx-platform
cms/djangoapps/contentstore/management/commands/git_export.py
164
2816
""" This command exports a course from CMS to a git repository. It takes as arguments the course id to export (i.e MITx/999/2020 ) and the repository to commit too. It takes username as an option for identifying the commit, as well as a directory path to place the git repository. By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned directory. It is branch aware, but will reset all local changes to the repository before attempting to export the XML, add, and commit changes if any have taken place. This functionality is also available as an export view in studio if the giturl attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set. """ import logging from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.utils.translation import ugettext as _ import contentstore.git_export_utils as git_export_utils from opaque_keys.edx.locations import SlashSeparatedCourseKey from opaque_keys import InvalidKeyError from contentstore.git_export_utils import GitExportError from opaque_keys.edx.keys import CourseKey log = logging.getLogger(__name__) class Command(BaseCommand): """ Take a course from studio and export it to a git repository. """ option_list = BaseCommand.option_list + ( make_option('--username', '-u', dest='user', help=('Specify a username from LMS/Studio to be used ' 'as the commit author.')), make_option('--repo_dir', '-r', dest='repo', help='Specify existing git repo directory.'), ) help = _('Take the specified course and attempt to ' 'export it to a git repository\n. Course directory ' 'must already be a git repository. Usage: ' ' git_export <course_loc> <git_url>') def handle(self, *args, **options): """ Checks arguments and runs export function if they are good """ if len(args) != 2: raise CommandError('This script requires exactly two arguments: ' 'course_loc and git_url') # Rethrow GitExportError as CommandError for SystemExit try: course_key = CourseKey.from_string(args[0]) except InvalidKeyError: try: course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0]) except InvalidKeyError: raise CommandError(unicode(GitExportError.BAD_COURSE)) try: git_export_utils.export_to_git( course_key, args[1], options.get('user', ''), options.get('rdir', None) ) except git_export_utils.GitExportError as ex: raise CommandError(unicode(ex.message))
agpl-3.0
kayak/fireant
fireant/tests/database/test_base_database.py
2
2218
from unittest import TestCase from unittest.mock import ( Mock, patch, ) from pypika import Field from fireant.database import Database from fireant.middleware.decorators import connection_middleware @connection_middleware def test_fetch(database, query, **kwargs): return kwargs.get('connection') def test_connect(): mock_connection = Mock() mock_connection.__enter__ = Mock() mock_connection.__exit__ = Mock() return mock_connection class TestBaseDatabase(TestCase): def test_database_api(self): db = Database() with self.assertRaises(NotImplementedError): db.connect() with self.assertRaises(NotImplementedError): db.trunc_date(Field('abc'), 'day') def test_to_char(self): db = Database() to_char = db.to_char(Field('field')) self.assertEqual(str(to_char), 'CAST("field" AS VARCHAR)') def test_no_custom_middlewares_specified_still_gives_connection_middleware(self): db = Database() self.assertEqual(1, len(db.middlewares)) self.assertIs(db.middlewares[0], connection_middleware) @patch.object(Database, 'fetch') @patch.object(Database, 'connect') def test_database_reuse_passed_connection(self, mock_connect, mock_fetch): db = Database() mock_connect.side_effect = test_connect mock_fetch.side_effect = test_fetch with db.connect() as connection: connection_1 = db.fetch(db, 'SELECT a from abc', connection=connection) connection_2 = db.fetch(db, 'SELECT b from def', connection=connection) self.assertEqual(1, mock_connect.call_count) self.assertEqual(connection_1, connection_2) @patch.object(Database, 'fetch') @patch.object(Database, 'connect') def test_database_opens_new_connection(self, mock_connect, mock_fetch): db = Database() mock_connect.side_effect = test_connect mock_fetch.side_effect = test_fetch connection_1 = db.fetch(db, 'SELECT a from abc') connection_2 = db.fetch(db, 'SELECT b from def') self.assertEqual(2, mock_connect.call_count) self.assertNotEqual(connection_1, connection_2)
apache-2.0
cactusbin/nyt
matplotlib/examples/pylab_examples/line_collection2.py
9
1327
from pylab import * from matplotlib.collections import LineCollection # In order to efficiently plot many lines in a single set of axes, # Matplotlib has the ability to add the lines all at once. Here is a # simple example showing how it is done. N = 50 x = arange(N) # Here are many sets of y to plot vs x ys = [x+i for i in x] # We need to set the plot limits, they will not autoscale ax = axes() ax.set_xlim((amin(x),amax(x))) ax.set_ylim((amin(amin(ys)),amax(amax(ys)))) # colors is sequence of rgba tuples # linestyle is a string or dash tuple. Legal string values are # solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) # where onoffseq is an even length tuple of on and off ink in points. # If linestyle is omitted, 'solid' is used # See matplotlib.collections.LineCollection for more information line_segments = LineCollection([list(zip(x,y)) for y in ys], # Make a sequence of x,y pairs linewidths = (0.5,1,1.5,2), linestyles = 'solid') line_segments.set_array(x) ax.add_collection(line_segments) fig = gcf() axcb = fig.colorbar(line_segments) axcb.set_label('Line Number') ax.set_title('Line Collection with mapped colors') sci(line_segments) # This allows interactive changing of the colormap. show()
unlicense
S-Bahrasemani/hhana
mva/plotting/scatter.py
5
13001
def draw_scatter(fields, category, region, output_name, backgrounds, signals=None, data=None, signal_scale=1., signal_colors=cm.spring, classifier=None, cuts=None, unblind=False): nplots = 1 figheight = 6. figwidth = 6. background_arrays = [] background_clf_arrays = [] for background in backgrounds: background_arrays.append( background.merged_records( category, region, fields=fields, cuts=cuts)) if classifier is not None: background_clf_arrays.append( background.scores( classifier, category, region, cuts=cuts, systematics=False)['NOMINAL'][0]) if data is not None: nplots += 1 figwidth += 6. data_array = data.merged_records( category, region, fields=fields, cuts=cuts) if classifier is not None: data_clf_array = data.scores( classifier, category, region, cuts=cuts)[0] if signals is not None: nplots += 1 figwidth += 6. if data is not None: signal_index = 3 else: signal_index = 2 signal_arrays = [] signal_clf_arrays = [] for signal in signals: signal_arrays.append( signal.merged_records( category, region, fields=fields, cuts=cuts)) if classifier is not None: signal_clf_arrays.append( signal.scores( classifier, category, region, cuts=cuts, systematics=False)['NOMINAL'][0]) if classifier is not None: fields = fields + [classifier] all_pairs = list(itertools.combinations(fields, 2)) for x, y in all_pairs: # always make the classifier along the x axis if not isinstance(y, basestring): tmp = x x = y y = tmp with_classifier = not isinstance(x, basestring) plt.figure(figsize=(figwidth, figheight), dpi=200) axes = [] ax_bkg = plt.subplot(1, nplots, 1) axes.append(ax_bkg) if not with_classifier: xscale = VARIABLES[x].get('scale', 1.) yscale = VARIABLES[y].get('scale', 1.) xmin, xmax = float('inf'), float('-inf') ymin, ymax = float('inf'), float('-inf') for i, (array, background) in enumerate(zip(background_arrays, backgrounds)): if with_classifier: x_array = background_clf_arrays[i] else: x_array = array[x] * xscale y_array = array[y] * yscale # update max and min bounds lxmin, lxmax = x_array.min(), x_array.max() lymin, lymax = y_array.min(), y_array.max() if lxmin < xmin: xmin = lxmin if lxmax > xmax: xmax = lxmax if lymin < ymin: ymin = lymin if lymax > ymax: ymax = lymax weight = array['weight'] ax_bkg.scatter( x_array, y_array, c=background.hist_decor['color'], label=background.label, s=weight * 10, #edgecolors='', linewidths=1, marker='o', alpha=0.75) if data is not None: data_ax = plt.subplot(1, nplots, 2) axes.append(data_ax) if with_classifier: x_array = data_clf_array else: x_array = data_array[x] * xscale y_array = data_array[y] * yscale # if blinded don't show above the midpoint of the BDT score if with_classifier and not unblind: midpoint = (x_array.max() + x_array.min()) / 2. x_array = x_array[data_clf_array < midpoint] y_array = y_array[data_clf_array < midpoint] data_ax.text(0.9, 0.2, 'BLINDED', verticalalignment='center', horizontalalignment='right', transform=data_ax.transAxes, fontsize=20) # update max and min bounds lxmin, lxmax = x_array.min(), x_array.max() lymin, lymax = y_array.min(), y_array.max() if lxmin < xmin: xmin = lxmin if lxmax > xmax: xmax = lxmax if lymin < ymin: ymin = lymin if lymax > ymax: ymax = lymax weight = data_array['weight'] data_ax.scatter( x_array, y_array, c='black', label=data.label, s=weight * 10, #edgecolors='', linewidths=0, marker='.') if signal is not None: sig_ax = plt.subplot(1, nplots, signal_index) axes.append(sig_ax) for i, (array, signal) in enumerate(zip(signal_arrays, signals)): if with_classifier: x_array = signal_clf_arrays[i] else: x_array = array[x] * xscale y_array = array[y] * yscale # update max and min bounds lxmin, lxmax = x_array.min(), x_array.max() lymin, lymax = y_array.min(), y_array.max() if lxmin < xmin: xmin = lxmin if lxmax > xmax: xmax = lxmax if lymin < ymin: ymin = lymin if lymax > ymax: ymax = lymax color = signal_colors((i + 1) / float(len(signals) + 1)) weight = array['weight'] sig_ax.scatter( x_array, y_array, c=color, label=signal.label, s=weight * 10 * signal_scale, #edgecolors='', linewidths=0, marker='o', alpha=0.75) xwidth = xmax - xmin ywidth = ymax - ymin xpad = xwidth * .1 ypad = ywidth * .1 if with_classifier: x_name = "BDT Score" x_filename = "bdt_score" x_units = None else: x_name = VARIABLES[x]['title'] x_filename = VARIABLES[x]['filename'] x_units = VARIABLES[x].get('units', None) y_name = VARIABLES[y]['title'] y_filename = VARIABLES[y]['filename'] y_units = VARIABLES[y].get('units', None) for ax in axes: ax.set_xlim(xmin - xpad, xmax + xpad) ax.set_ylim(ymin - ypad, ymax + ypad) ax.legend(loc='upper right') if x_units is not None: ax.set_xlabel('%s [%s]' % (x_name, x_units)) else: ax.set_xlabel(x_name) if y_units is not None: ax.set_ylabel('%s [%s]' % (y_name, y_units)) else: ax.set_ylabel(y_name) plt.suptitle(category.label) plt.savefig(os.path.join(PLOTS_DIR, 'scatter_%s_%s_%s%s.png') % ( category.name, x_filename, y_filename, output_name), bbox_inches='tight') """ Romain Madar: Display the 1D histogram of (x_i - <x>)(y_i - <y>) over the events {i}. The mean of this distribution will be the "usual correlation" but this plot allows to look at the tails and asymmetry, for data and MC. """ def get_2d_field_hist(var): var_info = VARIABLES[var] bins = var_info['bins'] min, max = var_info['range'] hist = Hist2D(100, min, max, 100, -1, 1) return hist def draw_2d_hist(classifier, category, region, backgrounds, signals=None, data=None, cuts=None, y=MMC_MASS, output_suffix=''): fields = [y] background_arrays = [] background_clf_arrays = [] for background in backgrounds: sys_mass = {} for systematic in iter_systematics(True): sys_mass[systematic] = ( background.merged_records( category, region, fields=fields, cuts=cuts, systematic=systematic)) background_arrays.append(sys_mass) background_clf_arrays.append( background.scores( classifier, category, region, cuts=cuts, systematics=True)) if signals is not None: signal_arrays = [] signal_clf_arrays = [] for signal in signals: sys_mass = {} for systematic in iter_systematics(True): sys_mass[systematic] = ( signal.merged_records( category, region, fields=fields, cuts=cuts, systematic=systematic)) signal_arrays.append(sys_mass) signal_clf_arrays.append( signal.scores( classifier, category, region, cuts=cuts, systematics=True)) xmin, xmax = float('inf'), float('-inf') if data is not None: data_array = data.merged_records( category, region, fields=fields, cuts=cuts) data_clf_array = data.scores( classifier, category, region, cuts=cuts)[0] lxmin, lxmax = data_clf_array.min(), data_clf_array.max() if lxmin < xmin: xmin = lxmin if lxmax > xmax: xmax = lxmax for array_dict in background_clf_arrays + signal_clf_arrays: for sys, (array, _) in array_dict.items(): lxmin, lxmax = array.min(), array.max() if lxmin < xmin: xmin = lxmin if lxmax > xmax: xmax = lxmax yscale = VARIABLES[y].get('scale', 1.) if cuts: output_suffix += '_' + cuts.safe() output_name = "histos_2d_" + category.name + output_suffix + ".root" hist_template = get_2d_field_hist(y) # scale BDT scores such that they are between -1 and 1 xscale = max(abs(xmax), abs(xmin)) with root_open(output_name, 'recreate') as f: for background, array_dict, clf_dict in zip(backgrounds, background_arrays, background_clf_arrays): for systematic in iter_systematics(True): x_array = clf_dict[systematic][0] / xscale y_array = array_dict[systematic][y] * yscale weight = array_dict[systematic]['weight'] hist = hist_template.Clone(name=background.name + ('_%s' % systematic_name(systematic))) hist.fill_array(np.c_[y_array, x_array], weights=weight) hist.Write() if signal is not None: for signal, array_dict, clf_dict in zip(signals, signal_arrays, signal_clf_arrays): for systematic in iter_systematics(True): x_array = clf_dict[systematic][0] / xscale y_array = array_dict[systematic][y] * yscale weight = array_dict[systematic]['weight'] hist = hist_template.Clone(name=signal.name + ('_%s' % systematic_name(systematic))) hist.fill_array(np.c_[y_array, x_array], weights=weight) hist.Write() if data is not None: x_array = data_clf_array / xscale y_array = data_array[y] * yscale weight = data_array['weight'] hist = hist_template.Clone(name=data.name) hist.fill_array(np.c_[y_array, x_array], weights=weight) hist.Write()
gpl-3.0
hassoon3/odoo
addons/account/wizard/account_report_aged_partner_balance.py
378
4012
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from datetime import datetime from dateutil.relativedelta import relativedelta from openerp.osv import fields, osv from openerp.tools.translate import _ class account_aged_trial_balance(osv.osv_memory): _inherit = 'account.common.partner.report' _name = 'account.aged.trial.balance' _description = 'Account Aged Trial balance Report' _columns = { 'period_length':fields.integer('Period Length (days)', required=True), 'direction_selection': fields.selection([('past','Past'), ('future','Future')], 'Analysis Direction', required=True), 'journal_ids': fields.many2many('account.journal', 'account_aged_trial_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True), } _defaults = { 'period_length': 30, 'date_from': lambda *a: time.strftime('%Y-%m-%d'), 'direction_selection': 'past', } def _print_report(self, cr, uid, ids, data, context=None): res = {} if context is None: context = {} data = self.pre_print_report(cr, uid, ids, data, context=context) data['form'].update(self.read(cr, uid, ids, ['period_length', 'direction_selection'])[0]) period_length = data['form']['period_length'] if period_length<=0: raise osv.except_osv(_('User Error!'), _('You must set a period length greater than 0.')) if not data['form']['date_from']: raise osv.except_osv(_('User Error!'), _('You must set a start date.')) start = datetime.strptime(data['form']['date_from'], "%Y-%m-%d") if data['form']['direction_selection'] == 'past': for i in range(5)[::-1]: stop = start - relativedelta(days=period_length) res[str(i)] = { 'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))), 'stop': start.strftime('%Y-%m-%d'), 'start': (i!=0 and stop.strftime('%Y-%m-%d') or False), } start = stop - relativedelta(days=1) else: for i in range(5): stop = start + relativedelta(days=period_length) res[str(5-(i+1))] = { 'name': (i!=4 and str((i) * period_length)+'-' + str((i+1) * period_length) or ('+'+str(4 * period_length))), 'start': start.strftime('%Y-%m-%d'), 'stop': (i!=4 and stop.strftime('%Y-%m-%d') or False), } start = stop + relativedelta(days=1) data['form'].update(res) if data.get('form',False): data['ids']=[data['form'].get('chart_account_id',False)] return self.pool['report'].get_action(cr, uid, [], 'account.report_agedpartnerbalance', data=data, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
JiscPER/jper
OAUtils/src/utils/exception/handler.py
2
2160
''' Created on 26 Oct 2015 @author: Ruben.Alonso @Description: Module created to manage all different errors and exceptions. ''' import utils.logger.handler as LH class Error(Exception): """Base class for exceptions in this module.""" pass class InputError(Error): """Exception raised for errors in the input. Attributes: message -- explanation of the error error -- actual error information """ def __init__(self, message="Incorrect input exception", error=""): LH.logger.error(str(message) + ". " + str(error)) self.error = error self.message = message class IncorrectFormatError(Error): """Exception raised for errors in the input. Attributes: message -- explanation of the error error -- actual error information """ def __init__(self, message="Incorrect format exception", error=""): LH.logger.error(str(message) + ". " + str(error)) self.error = error self.message = message class DBConnectionError(Error): """Exception raised for errors in the input. Attributes: message -- explanation of the error error -- actual error information """ def __init__(self, message="DB connection exception", error=""): LH.logger.error(str(message) + ". " + str(error)) self.error = error self.message = message class GenericError(Error): """Exception raised for generic errors. Attributes: message -- explanation of the error error -- actual error information """ def __init__(self, message="Generic exception", error=""): LH.logger.error(str(message) + ". " + str(error)) self.error = error self.message = message class DeleteDocException(Error): """Exception raised when we have errors deleteing documents from DB. Attributes: message -- explanation of the error error -- actual error information """ def __init__(self, message="DeleteDoc exception", error=""): LH.logger.error(str(message) + ". " + str(error)) self.error = error self.message = message
apache-2.0
kuri65536/python-for-android
python-modules/twisted/twisted/conch/checkers.py
59
9838
# -*- test-case-name: twisted.conch.test.test_checkers -*- # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ Provide L{ICredentialsChecker} implementations to be used in Conch protocols. """ import os, base64, binascii, errno try: import pwd except ImportError: pwd = None else: import crypt try: # get this from http://www.twistedmatrix.com/users/z3p/files/pyshadow-0.2.tar.gz import shadow except: shadow = None try: from twisted.cred import pamauth except ImportError: pamauth = None from zope.interface import implements, providedBy from twisted.conch import error from twisted.conch.ssh import keys from twisted.cred.checkers import ICredentialsChecker from twisted.cred.credentials import IUsernamePassword, ISSHPrivateKey from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials from twisted.internet import defer from twisted.python import failure, reflect, log from twisted.python.util import runAsEffectiveUser from twisted.python.filepath import FilePath def verifyCryptedPassword(crypted, pw): if crypted[0] == '$': # md5_crypt encrypted salt = '$1$' + crypted.split('$')[2] else: salt = crypted[:2] return crypt.crypt(pw, salt) == crypted class UNIXPasswordDatabase: credentialInterfaces = IUsernamePassword, implements(ICredentialsChecker) def requestAvatarId(self, credentials): if pwd: try: cryptedPass = pwd.getpwnam(credentials.username)[1] except KeyError: return defer.fail(UnauthorizedLogin("invalid username")) else: if cryptedPass not in ['*', 'x'] and \ verifyCryptedPassword(cryptedPass, credentials.password): return defer.succeed(credentials.username) if shadow: gid = os.getegid() uid = os.geteuid() os.setegid(0) os.seteuid(0) try: shadowPass = shadow.getspnam(credentials.username)[1] except KeyError: os.setegid(gid) os.seteuid(uid) return defer.fail(UnauthorizedLogin("invalid username")) os.setegid(gid) os.seteuid(uid) if verifyCryptedPassword(shadowPass, credentials.password): return defer.succeed(credentials.username) return defer.fail(UnauthorizedLogin("invalid password")) return defer.fail(UnauthorizedLogin("unable to verify password")) class SSHPublicKeyDatabase: """ Checker that authenticates SSH public keys, based on public keys listed in authorized_keys and authorized_keys2 files in user .ssh/ directories. """ credentialInterfaces = ISSHPrivateKey, implements(ICredentialsChecker) def requestAvatarId(self, credentials): d = defer.maybeDeferred(self.checkKey, credentials) d.addCallback(self._cbRequestAvatarId, credentials) d.addErrback(self._ebRequestAvatarId) return d def _cbRequestAvatarId(self, validKey, credentials): """ Check whether the credentials themselves are valid, now that we know if the key matches the user. @param validKey: A boolean indicating whether or not the public key matches a key in the user's authorized_keys file. @param credentials: The credentials offered by the user. @type credentials: L{ISSHPrivateKey} provider @raise UnauthorizedLogin: (as a failure) if the key does not match the user in C{credentials}. Also raised if the user provides an invalid signature. @raise ValidPublicKey: (as a failure) if the key matches the user but the credentials do not include a signature. See L{error.ValidPublicKey} for more information. @return: The user's username, if authentication was successful. """ if not validKey: return failure.Failure(UnauthorizedLogin("invalid key")) if not credentials.signature: return failure.Failure(error.ValidPublicKey()) else: try: pubKey = keys.Key.fromString(credentials.blob) if pubKey.verify(credentials.signature, credentials.sigData): return credentials.username except: # any error should be treated as a failed login log.err() return failure.Failure(UnauthorizedLogin('error while verifying key')) return failure.Failure(UnauthorizedLogin("unable to verify key")) def getAuthorizedKeysFiles(self, credentials): """ Return a list of L{FilePath} instances for I{authorized_keys} files which might contain information about authorized keys for the given credentials. On OpenSSH servers, the default location of the file containing the list of authorized public keys is U{$HOME/.ssh/authorized_keys<http://www.openbsd.org/cgi-bin/man.cgi?query=sshd_config>}. I{$HOME/.ssh/authorized_keys2} is also returned, though it has been U{deprecated by OpenSSH since 2001<http://marc.info/?m=100508718416162>}. @return: A list of L{FilePath} instances to files with the authorized keys. """ pwent = pwd.getpwnam(credentials.username) root = FilePath(pwent.pw_dir).child('.ssh') files = ['authorized_keys', 'authorized_keys2'] return [root.child(f) for f in files] def checkKey(self, credentials): """ Retrieve files containing authorized keys and check against user credentials. """ uid, gid = os.geteuid(), os.getegid() ouid, ogid = pwd.getpwnam(credentials.username)[2:4] for filepath in self.getAuthorizedKeysFiles(credentials): if not filepath.exists(): continue try: lines = filepath.open() except IOError, e: if e.errno == errno.EACCES: lines = runAsEffectiveUser(ouid, ogid, filepath.open) else: raise for l in lines: l2 = l.split() if len(l2) < 2: continue try: if base64.decodestring(l2[1]) == credentials.blob: return True except binascii.Error: continue return False def _ebRequestAvatarId(self, f): if not f.check(UnauthorizedLogin): log.msg(f) return failure.Failure(UnauthorizedLogin("unable to get avatar id")) return f class SSHProtocolChecker: """ SSHProtocolChecker is a checker that requires multiple authentications to succeed. To add a checker, call my registerChecker method with the checker and the interface. After each successful authenticate, I call my areDone method with the avatar id. To get a list of the successful credentials for an avatar id, use C{SSHProcotolChecker.successfulCredentials[avatarId]}. If L{areDone} returns True, the authentication has succeeded. """ implements(ICredentialsChecker) def __init__(self): self.checkers = {} self.successfulCredentials = {} def get_credentialInterfaces(self): return self.checkers.keys() credentialInterfaces = property(get_credentialInterfaces) def registerChecker(self, checker, *credentialInterfaces): if not credentialInterfaces: credentialInterfaces = checker.credentialInterfaces for credentialInterface in credentialInterfaces: self.checkers[credentialInterface] = checker def requestAvatarId(self, credentials): """ Part of the L{ICredentialsChecker} interface. Called by a portal with some credentials to check if they'll authenticate a user. We check the interfaces that the credentials provide against our list of acceptable checkers. If one of them matches, we ask that checker to verify the credentials. If they're valid, we call our L{_cbGoodAuthentication} method to continue. @param credentials: the credentials the L{Portal} wants us to verify """ ifac = providedBy(credentials) for i in ifac: c = self.checkers.get(i) if c is not None: d = defer.maybeDeferred(c.requestAvatarId, credentials) return d.addCallback(self._cbGoodAuthentication, credentials) return defer.fail(UnhandledCredentials("No checker for %s" % \ ', '.join(map(reflect.qual, ifac)))) def _cbGoodAuthentication(self, avatarId, credentials): """ Called if a checker has verified the credentials. We call our L{areDone} method to see if the whole of the successful authentications are enough. If they are, we return the avatar ID returned by the first checker. """ if avatarId not in self.successfulCredentials: self.successfulCredentials[avatarId] = [] self.successfulCredentials[avatarId].append(credentials) if self.areDone(avatarId): del self.successfulCredentials[avatarId] return avatarId else: raise error.NotEnoughAuthentication() def areDone(self, avatarId): """ Override to determine if the authentication is finished for a given avatarId. @param avatarId: the avatar returned by the first checker. For this checker to function correctly, all the checkers must return the same avatar ID. """ return True
apache-2.0
Comcast/neutron
neutron/tests/unit/cisco/test_network_db.py
17
13079
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock import testtools from neutron.db import api as db from neutron.plugins.cisco.common import cisco_constants from neutron.plugins.cisco.common import cisco_credentials_v2 from neutron.plugins.cisco.common import cisco_exceptions as c_exc from neutron.plugins.cisco.common import config as config from neutron.plugins.cisco.db import network_db_v2 as cdb from neutron.plugins.cisco import network_plugin from neutron.tests import base class CiscoNetworkDbTest(base.BaseTestCase): """Base class for Cisco network database unit tests.""" def setUp(self): super(CiscoNetworkDbTest, self).setUp() db.configure_db() # The Cisco network plugin includes a thin layer of QoS and # credential API methods which indirectly call Cisco QoS and # credential database access methods. For better code coverage, # this test suite will make calls to the QoS and credential database # access methods indirectly through the network plugin. The network # plugin's init function can be mocked out for this purpose. def new_network_plugin_init(instance): pass with mock.patch.object(network_plugin.PluginV2, '__init__', new=new_network_plugin_init): self._network_plugin = network_plugin.PluginV2() self.addCleanup(db.clear_db) class CiscoNetworkQosDbTest(CiscoNetworkDbTest): """Unit tests for Cisco network QoS database model.""" QosObj = collections.namedtuple('QosObj', 'tenant qname desc') def _qos_test_obj(self, tnum, qnum, desc=None): """Create a Qos test object from a pair of numbers.""" if desc is None: desc = 'test qos %s-%s' % (str(tnum), str(qnum)) tenant = 'tenant_%s' % str(tnum) qname = 'qos_%s' % str(qnum) return self.QosObj(tenant, qname, desc) def _assert_equal(self, qos, qos_obj): self.assertEqual(qos.tenant_id, qos_obj.tenant) self.assertEqual(qos.qos_name, qos_obj.qname) self.assertEqual(qos.qos_desc, qos_obj.desc) def test_qos_add_remove(self): qos11 = self._qos_test_obj(1, 1) qos = self._network_plugin.create_qos(qos11.tenant, qos11.qname, qos11.desc) self._assert_equal(qos, qos11) qos_id = qos.qos_id qos = self._network_plugin.delete_qos(qos11.tenant, qos_id) self._assert_equal(qos, qos11) qos = self._network_plugin.delete_qos(qos11.tenant, qos_id) self.assertIsNone(qos) def test_qos_add_dup(self): qos22 = self._qos_test_obj(2, 2) qos = self._network_plugin.create_qos(qos22.tenant, qos22.qname, qos22.desc) self._assert_equal(qos, qos22) qos_id = qos.qos_id with testtools.ExpectedException(c_exc.QosNameAlreadyExists): self._network_plugin.create_qos(qos22.tenant, qos22.qname, "duplicate 22") qos = self._network_plugin.delete_qos(qos22.tenant, qos_id) self._assert_equal(qos, qos22) qos = self._network_plugin.delete_qos(qos22.tenant, qos_id) self.assertIsNone(qos) def test_qos_get(self): qos11 = self._qos_test_obj(1, 1) qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname, qos11.desc).qos_id qos21 = self._qos_test_obj(2, 1) qos21_id = self._network_plugin.create_qos(qos21.tenant, qos21.qname, qos21.desc).qos_id qos22 = self._qos_test_obj(2, 2) qos22_id = self._network_plugin.create_qos(qos22.tenant, qos22.qname, qos22.desc).qos_id qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id) self._assert_equal(qos, qos11) qos = self._network_plugin.get_qos_details(qos21.tenant, qos21_id) self._assert_equal(qos, qos21) qos = self._network_plugin.get_qos_details(qos21.tenant, qos22_id) self._assert_equal(qos, qos22) with testtools.ExpectedException(c_exc.QosNotFound): self._network_plugin.get_qos_details(qos11.tenant, "dummyQosId") with testtools.ExpectedException(c_exc.QosNotFound): self._network_plugin.get_qos_details(qos11.tenant, qos21_id) with testtools.ExpectedException(c_exc.QosNotFound): self._network_plugin.get_qos_details(qos21.tenant, qos11_id) qos_all_t1 = self._network_plugin.get_all_qoss(qos11.tenant) self.assertEqual(len(qos_all_t1), 1) qos_all_t2 = self._network_plugin.get_all_qoss(qos21.tenant) self.assertEqual(len(qos_all_t2), 2) qos_all_t3 = self._network_plugin.get_all_qoss("tenant3") self.assertEqual(len(qos_all_t3), 0) def test_qos_update(self): qos11 = self._qos_test_obj(1, 1) qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname, qos11.desc).qos_id self._network_plugin.rename_qos(qos11.tenant, qos11_id, new_name=None) new_qname = "new qos name" new_qos = self._network_plugin.rename_qos(qos11.tenant, qos11_id, new_qname) expected_qobj = self.QosObj(qos11.tenant, new_qname, qos11.desc) self._assert_equal(new_qos, expected_qobj) new_qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id) self._assert_equal(new_qos, expected_qobj) with testtools.ExpectedException(c_exc.QosNotFound): self._network_plugin.rename_qos(qos11.tenant, "dummyQosId", new_name=None) class CiscoNetworkCredentialDbTest(CiscoNetworkDbTest): """Unit tests for Cisco network credentials database model.""" CredObj = collections.namedtuple('CredObj', 'cname usr pwd ctype') def _cred_test_obj(self, tnum, cnum): """Create a Credential test object from a pair of numbers.""" cname = 'credential_%s_%s' % (str(tnum), str(cnum)) usr = 'User_%s_%s' % (str(tnum), str(cnum)) pwd = 'Password_%s_%s' % (str(tnum), str(cnum)) ctype = 'ctype_%s' % str(tnum) return self.CredObj(cname, usr, pwd, ctype) def _assert_equal(self, credential, cred_obj): self.assertEqual(credential.type, cred_obj.ctype) self.assertEqual(credential.credential_name, cred_obj.cname) self.assertEqual(credential.user_name, cred_obj.usr) self.assertEqual(credential.password, cred_obj.pwd) def test_credential_add_remove(self): cred11 = self._cred_test_obj(1, 1) cred = cdb.add_credential( cred11.cname, cred11.usr, cred11.pwd, cred11.ctype) self._assert_equal(cred, cred11) cred_id = cred.credential_id cred = cdb.remove_credential(cred_id) self._assert_equal(cred, cred11) cred = cdb.remove_credential(cred_id) self.assertIsNone(cred) def test_credential_add_dup(self): cred22 = self._cred_test_obj(2, 2) cred = cdb.add_credential( cred22.cname, cred22.usr, cred22.pwd, cred22.ctype) self._assert_equal(cred, cred22) cred_id = cred.credential_id with testtools.ExpectedException(c_exc.CredentialAlreadyExists): cdb.add_credential( cred22.cname, cred22.usr, cred22.pwd, cred22.ctype) cred = cdb.remove_credential(cred_id) self._assert_equal(cred, cred22) cred = cdb.remove_credential(cred_id) self.assertIsNone(cred) def test_credential_get_id(self): cred11 = self._cred_test_obj(1, 1) cred11_id = cdb.add_credential( cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id cred21 = self._cred_test_obj(2, 1) cred21_id = cdb.add_credential( cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id cred22 = self._cred_test_obj(2, 2) cred22_id = cdb.add_credential( cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id cred = self._network_plugin.get_credential_details(cred11_id) self._assert_equal(cred, cred11) cred = self._network_plugin.get_credential_details(cred21_id) self._assert_equal(cred, cred21) cred = self._network_plugin.get_credential_details(cred22_id) self._assert_equal(cred, cred22) with testtools.ExpectedException(c_exc.CredentialNotFound): self._network_plugin.get_credential_details("dummyCredentialId") cred_all_t1 = self._network_plugin.get_all_credentials() self.assertEqual(len(cred_all_t1), 3) def test_credential_get_name(self): cred11 = self._cred_test_obj(1, 1) cred11_id = cdb.add_credential( cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id cred21 = self._cred_test_obj(2, 1) cred21_id = cdb.add_credential( cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id cred22 = self._cred_test_obj(2, 2) cred22_id = cdb.add_credential( cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id self.assertNotEqual(cred11_id, cred21_id) self.assertNotEqual(cred11_id, cred22_id) self.assertNotEqual(cred21_id, cred22_id) cred = cdb.get_credential_name(cred11.cname) self._assert_equal(cred, cred11) cred = cdb.get_credential_name(cred21.cname) self._assert_equal(cred, cred21) cred = cdb.get_credential_name(cred22.cname) self._assert_equal(cred, cred22) with testtools.ExpectedException(c_exc.CredentialNameNotFound): cdb.get_credential_name("dummyCredentialName") def test_credential_update(self): cred11 = self._cred_test_obj(1, 1) cred11_id = cdb.add_credential( cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id self._network_plugin.rename_credential(cred11_id, new_name=None, new_password=None) new_usr = "new user name" new_pwd = "new password" new_credential = self._network_plugin.rename_credential( cred11_id, new_usr, new_pwd) expected_cred = self.CredObj( cred11.cname, new_usr, new_pwd, cred11.ctype) self._assert_equal(new_credential, expected_cred) new_credential = self._network_plugin.get_credential_details( cred11_id) self._assert_equal(new_credential, expected_cred) with testtools.ExpectedException(c_exc.CredentialNotFound): self._network_plugin.rename_credential( "dummyCredentialId", new_usr, new_pwd) def test_get_credential_not_found_exception(self): self.assertRaises(c_exc.CredentialNotFound, self._network_plugin.get_credential_details, "dummyCredentialId") class CiscoCredentialStoreTest(base.BaseTestCase): """Cisco Credential Store unit tests.""" def setUp(self): super(CiscoCredentialStoreTest, self).setUp() db.configure_db() self.addCleanup(db.clear_db) def test_cred_store_init_duplicate_creds_ignored(self): """Check that with multi store instances, dup creds are ignored.""" # Create a device dictionary containing credentials for 1 switch. dev_dict = { ('dev_id', '1.1.1.1', cisco_constants.USERNAME): 'user_1', ('dev_id', '1.1.1.1', cisco_constants.PASSWORD): 'password_1', ('dev_id', '1.1.1.1', 'host_a'): '1/1', ('dev_id', '1.1.1.1', 'host_b'): '1/2', ('dev_id', '1.1.1.1', 'host_c'): '1/3', } with mock.patch.object(config, 'get_device_dictionary', return_value=dev_dict): # Create and initialize 2 instances of credential store. cisco_credentials_v2.Store().initialize() cisco_credentials_v2.Store().initialize() # There should be only 1 switch credential in the database. self.assertEqual(len(cdb.get_all_credentials()), 1)
apache-2.0
nbareil/python-misp
mispy/misp.py
1
49690
#! /usr/bin/env python """ misp ~~~~ Interface module to MISP servers (https://github.com/MISP/MISP). :copyright: Nicolas Bareil :license: Apache Licence, Version 2.0 """ # This file is part of python-misp. # # Copyright 2015 Nicolas Bareil <nicolas.bareil@airbus.com> # while at Airbus Group CERT <http://www.airbus.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import datetime import uuid import os import lxml # type:ignore from lxml import objectify # type:ignore import requests import json from typing import Any, Iterator, Optional, List # Fix Python 3.x. try: UNICODE_EXISTS = bool(type(unicode)) # type:ignore except NameError: unicode = lambda s: str(s) TEST_NEEDLE = '68b329da9893e34099c7d8ad5cb9c940' TEST_EVT_ID = 540 TEST_ATTR_ID = 87516 TEST_LAST_EVT_ID = 534 DEFAULT_MISP_URL = 'https://misp.internal' DEFAULT_ORG = 'Default ACME Corp' DEFAULT_ORGC = DEFAULT_ORG try: MISP_API_KEY = open(os.path.join(os.environ['HOME'], '.misp_api_key')).read().strip() except (IOError, KeyError): MISP_API_KEY = 'abcdefghighklmnopqrst' MISP_SSL_CHAIN = '/etc/ssl/certs/ca-certificates.crt' # To remove this deprecation warning: # SecurityWarning: Certificate has no `subjectAltName`, falling back to check # for a `commonName` for now. This feature is being removed by major browsers # and deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 # for details.) requests.packages.urllib3.disable_warnings() class MispBaseObject(object): """ Inherited module regrouping shared variables. """ def __init__(self): self._uuid = None self._timestamp = None self._comment = None self._distribution = None self._threat_level_id = None self._analysis = None def to_xml(self): obj = self.to_xml_object() lxml.objectify.deannotate(obj, xsi_nil=True) lxml.etree.cleanup_namespaces(obj) return lxml.etree.tostring(obj) @property def uuid(self): return self._uuid @uuid.setter def uuid(self, value): self._uuid = value @property def comment(self): return self._comment @comment.setter def comment(self, value): self._comment = value @property def timestamp(self): if self._timestamp: return self._timestamp return int(time.time()) @timestamp.setter def timestamp(self, value): val = None if type(value) is int or type(value) is objectify.IntElement: val = int(value) elif type(value) is datetime.datetime: val = int(time.mktime(value.timetuple())) else: raise ValueError('Invalid date type: %s' % type(value)) self._timestamp = val @property def distribution(self): return self._distribution or 0 @distribution.setter def distribution(self, value): if int(value) not in [0, 1, 2, 3, 4, 5]: raise ValueError('Invalid distribution value for an attribute') self._distribution = value @property def threat_level_id(self): return self._threat_level_id or 1 @threat_level_id.setter def threat_level_id(self, value): if int(value) not in [1, 2, 3, 4]: raise ValueError('Invalid threat_level_id value for an attribute') self._threat_level_id = value @property def analysis(self): return self._analysis or 0 @analysis.setter def analysis(self, value): if value and int(value) not in [0, 1, 2]: raise ValueError('Invalid analysis value for an attribute') self._analysis = value or 0 class MispTag(MispBaseObject): """ Object for handling MISP tags in events """ def __init__(self): super(MispTag, self).__init__() self._id = None self._name = None self._colour = None self._org_id = None self._exportable = None @property def id(self): return self._id @id.setter def id(self, value): if value: self._id = int(value) @property def name(self): return self._name @name.setter def name(self, value): if value: self._name = value @property def colour(self): return self._colour @colour.setter def colour(self, value): if value: self._colour = value @property def org_id(self): return self._org_id @org_id.setter def org_id(self, value): if value is not None: self._org_id = int(value) @property def exportable(self): return self._exportable @exportable.setter def exportable(self, value): if value: self._exportable = (int(value) == 1) @staticmethod def from_xml(s): """ Static method converting a serialized XML string into a :class:`MispTag` object. :example: >>> s = '<Tag><id>3</id><name>TLP:GREEN</name><colour>#04cc18</colour><exportable>1</exportable><org_id>0</org_id></Tag>' >>> a = MispTag.from_xml(s) >>> type(a) <class 'misp.MispTag'> """ attr = objectify.fromstring(s) return MispTag.from_xml_object(attr) @staticmethod def from_xml_object(obj): if obj.tag.lower() != 'tag': raise ValueError('Invalid Tag XML') attr = MispTag() for field in ['id', 'name', 'colour', 'exportable', 'org_id']: val = getattr(obj, field) setattr(attr, field, val) return attr class MispObject(MispBaseObject): class Attributes(object): """ The module that provides glue between :class:`MispObject` and :class:`MispAttribute` """ def __init__(self, obj): self.object = obj self._attributes = [] def add(self, attr): """This function adds an attribute to the current object. It takes care of updating Object's timestamp and filling the blanks in the attribute object (timestamp, uuid, event id). :param attr: a :class:`MispAttribute`'s instance to be added to the Object """ if type(attr) is not MispAttribute: raise ValueError("object.attributes.add() only takes MispAttribute instance") self.event.timestamp = datetime.datetime.now() if not attr.uuid: attr.uuid = uuid.uuid4() attr.event_id = self.object.event_id attr.timestamp = self.object.timestamp+1 self._attributes.append(attr) def remove(self, attribute): """This function removes an attribute from the current object. :param attr: `MispAttribute` to be removed to the Object .. todo:: Implement it. """ raise NotImplementedError('Cannot remove attribute yet') def __iter__(self): return self._attributes.__iter__() def __len__(self): return len(self._attributes) def set(self, val): self._attributes = val def __init__(self): super(MispObject, self).__init__() self._id = None self._event_id = None self._name = None self._description = None self._comment = None self._timestamp = None self._meta_category = None self.attributes = MispObject.Attributes(self) self.shadowattributes = [] @property def id(self): return self._id @id.setter def id(self, value): if value is not None: self._id = int(value) @property def event_id(self): return self._event_id @event_id.setter def event_id(self, value): if value is not None: self._event_id = int(value) @property def name(self): return self._name @name.setter def name(self, value): if value is not None: self._name = value @property def description(self): return self._description @description.setter def description(self, value): if value is not None: self._description = value @property def comment(self): return self._comment @comment.setter def comment(self, value): if value is not None: self._comment = value @property def timestamp(self): return self._timestamp @timestamp.setter def timestamp(self, value): if value is not None: self._timestamp = int(value) @property def meta_category(self): return self._meta_category @meta_category.setter def meta_category(self, value): if value is not None: self._meta_category = value @staticmethod def from_xml(s): """ Static method converting a serialized XML string into a :class:`MispObject` object. :example: >>> s = 'updateMe' >>> a = MispObject.from_xml(s) >>> type(a) <class 'misp.MispObject'> """ attr = objectify.fromstring(s) return MispObject.from_xml_object(attr) @staticmethod def from_xml_object(xml_obj): if xml_obj.tag.lower() != 'object': raise ValueError('Invalid Tag XML') obj = MispObject() for field in ['id', 'event_id', 'name', 'description', 'comment', 'timestamp']: val = getattr(xml_obj, field) setattr(obj, field, val) obj.meta_category = getattr(xml_obj, "meta-category") attributes = [] for attr in xml_obj.Attribute: try: attr_obj = MispAttribute.from_xml_object(attr) attributes.append(attr_obj) except: # error creating attribute. It could mean the type is # invalid, or something else continue obj.attributes.set(attributes) if hasattr(xml_obj, 'ShadowAttribute'): for shadowattribute in xml_obj.ShadowAttribute: shadowattribute_obj = MispShadowAttribute.from_xml_object(shadowattribute) obj.shadowattributes.append(shadowattribute_obj) return obj def to_xml_object(self): obj = objectify.Element("Object") for field in ['id', 'event_id', 'name', 'description', 'comment', 'timestamp']: value = getattr(self, field) setattr(obj, field, value) setattr(obj, "meta-category", self.meta_category) for attr in self.attributes: attr_xml = attr.to_xml_object() obj.append(attr_xml) for shadow in self.shadowattributes: shadow_xml = shadow.to_xml_object() obj.append(shadow_xml) return obj class MispEvent(MispBaseObject): class Attributes(object): """ The module that provides glue between :class:`MispEvent` and :class:`MispAttribute` """ def __init__(self, event): self.event = event self._attributes = [] def add(self, attr): """This function adds an attribute to the current event. It takes care of updating Event's timestamp and filling the blanks in the attribute object (timestamp, uuid, event id). :param attr: a :class:`MispAttribute`'s instance to be added to the Event :example: >>> new_attr = MispAttribute() >>> new_attr.value = 'foobar.com' >>> new_attr.category = 'Network activity' >>> new_attr.type = 'domain' >>> server = MispServer() >>> event = server.events.get(12) >>> event.attributes.add(new_attr) >>> server.events.update(event) """ if type(attr) is not MispAttribute: raise ValueError("event.attributes.add() only takes MispAttribute instance") self.event.timestamp = datetime.datetime.now() if not attr.uuid: attr.uuid = uuid.uuid4() attr.event_id = self.event.id attr.timestamp = self.event.timestamp+1 self._attributes.append(attr) def remove(self, attribute): """This function removes an attribute from the current event. :param attr: `MispAttribute` to be removed to the Event .. todo:: Implement it. """ raise NotImplementedError('Cannot remove attribute yet') def __iter__(self): return self._attributes.__iter__() def __len__(self): return len(self._attributes) def set(self, val): self._attributes = val class Tags(object): """ Module that provides glue between :class:`MispEvent` and :class:`MispTag` """ def __init__(self, event): self.event = event self._tags = [] def __iter__(self): return self._tags.__iter__() def __len__(self): return len(self._tags) def set(self, val): self._tags = val class Objects(object): """ Module that provides glue between :class:`MispEvent` and :class:`MispObject` """ def __init__(self, event): self.event = event self._objects = [] def __iter__(self): return self._objects.__iter__() def __len__(self): return len(self._objects) def set(self, val): self._objects = val def __init__(self): super(MispEvent, self).__init__() self._id = None self._info = None self._org = None self._orgc = None self._publish = None self._proposal_email_lock = None self._locked = None self._date = None self._publish_timestamp = None self._published = None self.attributes = MispEvent.Attributes(self) self.tags = MispEvent.Tags(self) self.objects = MispEvent.Objects(self) self.shadowattributes = [] def __repr__(self): return "'%i: %s'" % (self._id or '-', self._info or '-') @property def attribute_count(self): """Read-only variable that counts the number of attributes""" return len(self.attributes) @property def id(self): return self._id @id.setter def id(self, value): if value: self._id = int(value) @property def info(self): return self._info @info.setter def info(self, value): self._info = unicode(value) @property def orgc(self): return self._orgc @orgc.setter def orgc(self, value): self._orgc = value @property def published(self): return self._published or 0 @published.setter def published(self, value): self._published = value @property def locked(self): return self._locked or 0 @locked.setter def locked(self, value): self._locked = value @property def proposal_email_lock(self): return self._proposal_email_lock or 0 @proposal_email_lock.setter def proposal_email_lock(self, value): self._proposal_email_lock = value @property def org(self): return self._org or DEFAULT_ORG @org.setter def org(self, value): self._org = value @property def date(self): """ Getter/setter for the date member. The setter can take a string or a :meth:`datetime.datetime` and will do the appropriate transformation. """ if self._date: return self._date return datetime.datetime.now().strftime('%Y-%m-%d') @date.setter def date(self, value): val = None if type(value) is str or type(value) is objectify.StringElement: val = value elif type(value) is datetime.datetime: val = value.strftime('%Y-%m-%d') else: raise ValueError('Invalid date type: %s' % type(value)) self._date = val @property def publish_timestamp(self): """ Getter/setter. The setter can take an integer (as an epoch timestamp) or a :class:`datetime.datetime`. instance. """ if self._publish_timestamp: return self._publish_timestamp return int(time.time()) @publish_timestamp.setter def publish_timestamp(self, value): val = None if type(value) is int or type(value) is objectify.IntElement: val = value elif type(value) is datetime.datetime: val = int(time.mktime(value.timetuple())) self._publish_timestamp = val @staticmethod def from_xml(s: str): """ Static method converting a serialized XML string into a :class:`MispEvent` object. :example: >>> s = '<Event><id>42</id><Org><name>ACME and bro.<name><uuid>564d9146-2c34-43df-906a-7bc40a3ac101</uuid><id>12</id></Org><Orgc><name>ACME and bro bis.<name><uuid>164d9146-2c34-43df-906a-7bc40a3ac101</uuid><id>13</id></Orgc><date>2015-10-20</date><threat_level_id>3</threat_level_id><info>AGNOSTIC PANDA</info><published>1</published><uuid>56278fd8-f2c0-4907-bcca-594e0a3ac101</uuid><attribute_count>8</attribute_count><analysis>2</analysis><timestamp>1445434988</timestamp><distribution>1</distribution><publish_timestamp>1445435155</publish_timestamp></Event>' >>> m = MispEvent.from_xml(s) >>> type(m) <class 'misp.MispEvent'> """ event = objectify.fromstring(s) return MispEvent.from_xml_object(event) @staticmethod def from_xml_object(obj): if obj.tag.lower() != 'event': raise ValueError('Invalid Event XML') event = MispEvent() for field in ['uuid', 'distribution', 'threat_level_id', 'date', 'info', 'published', 'analysis', 'timestamp', 'distribution', 'publish_timestamp', 'id']: val = getattr(obj, field) setattr(event, field, val) attributes = [] if hasattr(obj, 'Attribute'): for attr in obj.Attribute: attr_obj = MispAttribute.from_xml_object(attr) attributes.append(attr_obj) event.attributes.set(attributes) try: objects = [] for cur_obj in obj.Object: obj_obj = MispObject.from_xml_object(cur_obj) objects.append(obj_obj) event.objects.set(objects) except AttributeError: # No objects pass try: tags = [] for tag in obj.Tag: tag_obj = MispTag.from_xml_object(tag) tags.append(tag_obj) event.tags.set(tags) except AttributeError: # No tags pass try: event.org = obj.Org.name event.orgc = obj.Orgc.name except Exception as err: pass if hasattr(obj, 'ShadowAttribute'): for shadowattribute in obj.ShadowAttribute: shadowattribute_obj = MispShadowAttribute.from_xml_object(shadowattribute) event.shadowattributes.append(shadowattribute_obj) return event def to_xml_object(self): event = objectify.Element('Event') for field in ['uuid', 'distribution', 'threat_level_id', 'org', 'orgc', 'date', 'info', 'published', 'analysis', 'timestamp', 'distribution', 'proposal_email_lock', 'locked', 'publish_timestamp', 'id', 'attribute_count']: val = getattr(self, field) setattr(event, field, val) try: for shadowattribute in event.shadowattributes: event.append(shadowattribute.to_xml_object()) except Exception: pass for attr in self.attributes: event.append(attr.to_xml_object()) for obj in self.objects: event.append(obj.to_xml_object()) org = objectify.Element('Org') org.name = self.org event.append(org) orgc = objectify.Element('Orgc') orgc.name = self.orgc event.append(orgc) return event class MispTransportError(Exception): def __init__(self, message, path, status_code): super(MispTransportError, self).__init__(message, path, status_code) self.path = path self.status_code = status_code class MispServer(object): """ Module to communicate with the MISP instance. :members: .. automethod:: __init__ """ def __init__(self, url=DEFAULT_MISP_URL, apikey=MISP_API_KEY, ssl_chain=MISP_SSL_CHAIN): """Initializes a MispServer instance. :param url: Fully qualified URL to the MISP instance :param apikey: MISP API key :param ssl_chain: SSL certificate chain """ self.url = url self.headers = { 'Content-Type': 'application/xml', 'Accept': 'application/xml', 'Authorization': apikey, } self.events = MispServer.Events(self) self.attributes = MispServer.Attributes(self) self.shadowattributes = MispServer.ShadowAttributes(self) self.sightings = MispServer.Sightings(self) self.verify_ssl = ssl_chain def _absolute_url(self, path): return self.url + path def POST(self, path: str, body: Any, xml=True) -> bytes: """ Raw POST to the MISP server :param path: URL fragment (ie /events/) :param body: HTTP Body (raw bytes) :returns: HTTP raw content (as seen by :class:`requests.Response`) """ url = self._absolute_url(path) headers = dict(self.headers) if xml: headers['Content-Type'] = 'application/xml' headers['Accept'] = 'application/xml' else: headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' resp = requests.post(url, data=body, headers=headers, verify=self.verify_ssl) if resp.status_code != 200: raise MispTransportError('POST %s: returned status=%d', path, resp.status_code) return resp.content def GET(self, path: str) -> bytes: """ Raw GET to the MISP server :param path: URL fragment (ie /events/) :returns: HTTP raw content (as seen by :class:`requests.Response`) """ url = self._absolute_url(path) resp = requests.get(url, headers=self.headers, verify=self.verify_ssl) if resp.status_code != 200: raise MispTransportError('GET %s: returned status=%d', path, resp.status_code) return resp.content def tag(self, attr, tag: str) -> bool: """ Add a tag to an attribute. :param attr: Attribute to be modified :param tag: tag to be added :returns: success status """ data = { 'uuid': attr.uuid, 'tag': tag } raw = self.POST('/tags/attachTagToObject/', data, xml=False) return b'successfully' in raw def download(self, attr) -> bytes: """ Download an attribute attachment (if type is malware-sample or attachment only) :param attr: attribute (should be MispAttribute instance) :returns: value of the attachment """ if attr.type not in ['malware-sample', 'attachment']: raise ValueError('Only malware-sample and attachment can be downloaded') return self.GET('/attributes/downloadAttachment/download/%i' % attr.id) class ShadowAttributes(object): """ This module communicates shadow attributes with the MISP server """ def __init__(self, server): self.server = server def get(self, shadowattributeid: int): """ Fetches a shadow attribute from the MISP server. :param shadowattributeid: Shadow attribute id (as an integer) :returns: :class:`MispShadowAttribute` object """ raw = self.server.GET('/shadow_attributes/view/%d' % shadowattributeid) response = objectify.fromstring(raw) return MispShadowAttribute.from_xml_object(response.ShadowAttribute) def add(self, event: MispEvent, shadowattribute): """ Adds a shadow attribute to an event and send it to the MISP server. :param event: :class:`MispEvent` object to add :param shadowattribute: :class:`MispShadowAttribute` object to add :returns: The :class:`MispShadowAttribute` object as seen by the server. :example: >>> proposal = MispShadowAttribute() >>> proposal.value = 'foobar.com' >>> proposal.category = 'Network activity' >>> proposal.type = 'domain' >>> server = MispServer() >>> event = server.events.get(12) >>> event.attributes.add(new_attr) >>> server.shadowattributes.add(event, proposal) """ assert shadowattribute is not MispShadowAttribute assert event is not MispEvent raw = shadowattribute.to_xml() raw = self.server.POST('/shadow_attributes/add/%d' % event.id, raw) response = objectify.fromstring(raw) return MispShadowAttribute.from_xml_object(response.ShadowAttribute) def update(self, attr): """ Updates a shadow attribute on the MISP server. :param shadowattribute: :class:`MispShadowAttribute` object to update :returns: The :class:`MispShadowAttribute` object as seen by the server. :example: >>> server = MispServer() >>> proposal = server.shadowattributes.get(12) >>> proposal.to_ids = 0 >>> server.shadowattributes.update(proposal) """ assert attr is not MispShadowAttribute raw = attr.to_xml() raw = self.server.POST('/shadow_attributes/edit/%d' % attr.id, raw) response = objectify.fromstring(raw) return MispShadowAttribute.from_xml_object(response.ShadowAttribute) def accept(self, shadowattribute): """ Accepts a shadow attribute on the MISP server. :param shadowattribute: :class:`MispShadowAttribute` object to accept :example: >>> server = MispServer() >>> proposal = server.shadowattributes.get(12) >>> server.shadowattributes.accept(proposal) """ assert shadowattribute is not MispShadowAttribute raw = self.server.POST('/shadow_attributes/accept/%d' % shadowattribute.id, '') def discard(self, shadowattribute): """ Discards a shadow attribute on the MISP server. :param shadowattribute: :class:`MispShadowAttribute` object to discard :example: >>> server = MispServer() >>> proposal = server.shadowattributes.get(12) >>> server.shadowattributes.discard(proposal) """ assert shadowattribute is not MispShadowAttribute self.server.POST('/shadow_attributes/discard/%d' % shadowattribute.id, '') class Attributes(object): """ This modules communicates Attributes with the MISP server. """ def __init__(self, server): self.server = server def get(self, id: int): """ Fetches an attribute from the MISP server. :param id: Attribute id (as an integer) :returns: :class:`MispAttribute` object """ response = self.server.GET('/attributes/%d' % id) response = objectify.fromstring(response.content) return MispAttribute.from_xml_object(response.Attribute) def update(self, attr): """ Updates an attribute on the MISP server. :param attr: :class:`MispAttribute` object to update :example: >>> server = MispServer() >>> attr = server.attributes.get(12) >>> attr.comment = 'foobar' >>> server.attributes.update(attr) """ assert attr is not MispAttribute attr.timestamp = datetime.datetime.now() raw = attr.to_xml() raw = self.server.POST('/attributes/%d' % attr.id, raw) return MispAttribute.from_xml(raw) def search(self, value: Optional[str] = None, type: Optional[str] = None, category: Optional[str] = None, tag: Optional[str] = None, fromd: Optional[str] = None, tod: Optional[str] = None, last: Optional[str] = None) -> List[MispEvent]: """ Searches an attribute on the MISP server :param value: value of the attribute to be searched (as a string) :param type: Type of the attribute to be searched (as a string) :param category: Category of the attribute to be searched (as a string) :param tag: To include a tag in the results just write its names into this parameter. To exclude a tag prepend it with a '!'. You can also chain several tag commands together with the '&&' operator. Please be aware the colons (:) cannot be used in the tag search. Use semicolons instead (the search will automatically search for colons instead). :param fromd: Events with the date set to a date after the one specified in the from field (format: 2015-02-15). This filter will use the date of the event. :param tod: Events with the date set to a date before the one specified in the to field (format: 2015-02-15). This filter will use the date of the event. :param last: Events published within the last x amount of time, where x can be defined in days, hours, minutes (for example 5d or 12h or 30m). This filter will use the published timestamp of the event. .. todo:: support by type/category/tags :example: >>> server = MispServer() >>> attr = server.attributes.search("google.com") [MispEvent, MispEvent...] """ request = objectify.Element('request') if value: request.value = value if type: request.type = type if category: request.category = category if tag: request.tag = tag if fromd: setattr(request, 'from', fromd) if tod: request.to = tod if last: request.last = last lxml.objectify.deannotate(request, xsi_nil=True) lxml.etree.cleanup_namespaces(request) raw = lxml.etree.tostring(request) try: raw = self.server.POST( '/events/restSearch/download', raw ) except MispTransportError as err: if err.status_code == 404: # 404 not found return [] else: # Other problem keep the exception raise err response = objectify.fromstring(raw) events = [] try: for evtobj in response.Event: events.append(MispEvent.from_xml_object(evtobj)) except AttributeError: # No Event pass return events class Sightings: def __init__(self, server): self.server = server def report_sighting(self, *args, **kwargs): '''Reports a sighting. See :function:`add()` function for more details about the parameters.''' return self.add(type=0, *args, **kwargs) def report_false_positive(self, *args, **kwargs): '''Reports a false-positive finding. See :function:`add()` function for more details about the parameters.''' return self.add(type=1, *args, **kwargs) def add(self, id=None, uuid=None, value=None, timestamp=None, type=0): '''Adds a sighthing to an attribute. It can be selected using its id, uuid, or value(s). :param id: Attribute's id :param uuid: Attribute's UUID :param value: Attribute's value (can be a list) :param timestamp: The date/time of the sighting, if None, it will be set to now() :returns: Nothing ''' req = dict(type=type) if id is not None: req.update(id=id) elif uuid is not None: req.update(uuid=uuid) elif value is not None: req.update(value=value) else: raise Exception('No attribute selector, use id, uuid or value') if not timestamp: timestamp = time.mktime(datetime.datetime.now().timetuple()) req.update(timestamp=int(timestamp)) body = json.dumps(req) self.server.POST('/sightings/add/', body, xml=False) class Events(object): """ This modules communicates Events with the MISP server. """ def __init__(self, server): self.server = server def get(self, evtid: int) -> MispEvent: """Fetches an event from the MISP server. :param evtid: Event ID (as an integer) :returns: :class:`MispEvent` object >>> server = MispServer() >>> event = server.events.get(12) """ raw_evt = self.server.GET('/events/%d' % evtid) response = objectify.fromstring(raw_evt) return MispEvent.from_xml_object(response.Event) def update(self, event: MispEvent) -> None: """Modifies an event and propagate a change to the MISP server. It will update the event's timestamp and reset the publishing state (set to false). :param event: The modified :class:`MispEvent` object >>> server = MispServer() >>> event = server.events.get(12) >>> event.distribution = 2 >>> server.events.update(event) """ event.timestamp = datetime.datetime.now() event.published=0 raw_evt = event.to_xml() self.server.POST('/events/%d' % event.id, raw_evt) def publish(self, event: MispEvent, with_email: Optional[bool]=False) -> bytes: if type(event) is MispEvent: evtid = event.id elif type(event) is int: evtid = event if with_email: uri = '/events/alert/%d' % (evtid) else: uri = '/events/publish/%d' % (evtid) return self.server.POST(uri, '') def put(self, event: MispEvent) -> None: """Creates an event on the MISP server. It will find an Event ID for you. :param event: The :class:`MispEvent` object to push """ if not event.id: lastevent = self.last() event.id = lastevent.id+1 # XXX: race-condition possible raw_evt = event.to_xml() self.server.POST('/events', raw_evt) def last(self) -> MispEvent: """Returns the last event published on the MISP server. :returns: Last :class:`MispEvent` object published """ return self.list(limit=1, direction='desc')[0] def list(self, limit: int=10, sort: Optional[str]='date', direction: Optional[str] ='asc') -> List[MispEvent]: """List events on the MISP servers according to the given criteria. :param limit: Maximum number of events to fetch :param sort: Sorting criteria (can be: date) :returns: Last :class:`MispEvent` object published """ url = '/events/index/sort:%s/direction:%s/limit:%d' % (sort, direction, limit) raw = self.server.GET(url) response = objectify.fromstring(raw) events = [] for evtobj in response.Event: events.append(MispEvent.from_xml_object(evtobj)) return events def search(self, attr_type=None, tags=None, value=None, category=None, org=None, date_from=None, date_to=None, last=None, quickfilter=None, evtid=None) -> List[MispEvent]: """Search events on the MISP server. Searching criteria: :param attr_type: The attribute type, any valid MISP attribute type is accepted. :param tags: To include a tag in the results just write its names into this parameter. To exclude a tag prepend it with a '!'. You can also chain several tag commands together with the '&&' operator. Please be aware the colons (:) cannot be used in the tag search. Use semicolons instead (the search will automatically search for colons instead). :param value: Search for the given value in the attributes' value field. :param category: The attribute category, any valid MISP attribute category is accepted. :param org: Search by the creator organisation by supplying the organisation idenfitier. :param date_from: Events with the date set to a date after the one specified in the from field (format: 2015-02-15) :param date_to: Events with the date set to a date before the one specified in the to field (format: 2015-02-15) :param last: Events published within the last x amount of time, where x can be defined in days, hours, minutes (for example 5d or 12h or 30m) :param quickfilter: Enabling this (by passing "1" as the argument) will make the search ignore all of the other arguments, except for the auth key and value. :param evtid: :returns: List of :class:`MispEvent` objects """ request = objectify.Element('request') #request.searchall = 1 if attr_type: request.type = attr_type if evtid: request.evtid = evtid if tags: request.tags = tags if value: request.value = value if category: request.category = category if org: request.org = org if date_to: request.date_to = date_to if date_from: request.date_from = date_from if last: request.last = last if quickfilter: request.quickfilter = quickfilter lxml.objectify.deannotate(request, xsi_nil=True) lxml.etree.cleanup_namespaces(request) raw = lxml.etree.tostring(request) raw = self.server.POST('/events/restSearch/download', raw) response = objectify.fromstring(raw) events = [] for evtobj in response.Event: events.append(MispEvent.from_xml_object(evtobj)) return events attr_categories = ['Internal reference', 'Targeting data', 'Antivirus detection', 'Payload delivery', 'Payload installation', 'Artifacts dropped', 'Persistence mechanism', 'Network activity', 'Payload type', 'Attribution', 'External analysis', 'Other', 'Advisory PDF', 'Advisory YAML', 'Financial fraud', 'Person', 'Social network', 'Support Tool'] attr_types = ['AS', 'aba-rtn', 'anonymised', 'attachment', 'authentihash', 'bank-account-nr', 'bic', 'bin', 'boolean', 'bro', 'btc', 'campaign-id', 'campaign-name', 'cc-number', 'cdhash', 'comment', 'cookie', 'cortex', 'counter', 'country-of-residence', 'cpe', 'date-of-birth', 'datetime', 'dns-soa-email', 'domain', 'domain|ip', 'email-attachment', 'email-body', 'email-dst', 'email-dst-display-name', 'email-header', 'email-message-id', 'email-mime-boundary', 'email-reply-to', 'email-src', 'email-src-display-name', 'email-subject', 'email-thread-index', 'email-x-mailer', 'filename', 'filename|authentihash', 'filename|impfuzzy', 'filename|imphash', 'filename|md5', 'filename|pehash', 'filename|sha1', 'filename|sha224', 'filename|sha256', 'filename|sha384', 'filename|sha512', 'filename|sha512/224', 'filename|sha512/256', 'filename|ssdeep', 'filename|tlsh', 'first-name', 'float', 'frequent-flyer-number', 'gender', 'gene', 'github-organisation', 'github-repository', 'github-username', 'hassh-md5', 'hasshserver-md5', 'hex', 'hostname', 'hostname|port', 'http-method', 'iban', 'identity-card-number', 'impfuzzy', 'imphash', 'ip-dst', 'ip-dst|port', 'ip-src', 'ip-src|port', 'issue-date-of-the-visa', 'ja3-fingerprint-md5', 'jabber-id', 'last-name', 'link', 'mac-address', 'mac-eui-64', 'malware-sample', 'malware-type', 'md5', 'middle-name', 'mime-type', 'mobile-application-id', 'mutex', 'named', 'nationality', 'other', 'passenger-name-record-locator-number', 'passport-country', 'passport-expiration', 'passport-number', 'pattern-in-file', 'pattern-in-memory', 'pattern-in-traffic', 'payment-details', 'pdb', 'pehash', 'phone-number', 'place-of-birth', 'place-port-of-clearance', 'place-port-of-onward-foreign-destination', 'place-port-of-original-embarkation', 'port', 'primary-residence', 'prtn', 'redress-number', 'regkey', 'regkey|value', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'sha512/224', 'sha512/256', 'sigma', 'size-in-bytes', 'snort', 'special-service-request', 'ssdeep', 'stix2-pattern', 'target-email', 'target-external', 'target-location', 'target-machine', 'target-org', 'target-user', 'text', 'threat-actor', 'tlsh', 'travel-details', 'twitter-id', 'uri', 'url', 'user-agent', 'visa-number', 'vulnerability', 'whois-creation-date', 'whois-registrant-email', 'whois-registrant-name', 'whois-registrant-org', 'whois-registrant-phone', 'whois-registrar', 'windows-scheduled-task', 'windows-service-displayname', 'windows-service-name', 'x509-fingerprint-md5', 'x509-fingerprint-sha1', 'x509-fingerprint-sha256', 'xmr', 'yara', 'zeek'] class MispAttribute(MispBaseObject): def __init__(self): super(MispAttribute, self).__init__() self._value = None self._category = None self._type = None self._comment = None self._to_ids = None self._ShadowAttribute = None self._id = None self._event_id = None self.uuid = str(uuid.uuid1()) @property def id(self) -> int: return self._id or 0 @id.setter def id(self, value: int) -> None: self._id = value @property def comment(self) -> str: return self._comment or '' @comment.setter def comment(self, value: str): self._comment = value @property def event_id(self) -> int: return self._event_id @event_id.setter def event_id(self, value: int) -> None: self._event_id = value @property def value(self) -> str: return self._value @value.setter def value(self, value: str) -> None: """The value of the IOC. .. todo:: Note that no check is performed on the format of this value, we delegate this verification to the MISP server. """ self._value = value @property def category(self) -> str: return self._category @category.setter def category(self, value: str): if value not in attr_categories: raise ValueError('Invalid category for an attribute') self._category = value @property def type(self) -> str: """Getter/setter The setter will verify that the given value is valid. """ return self._type @type.setter def type(self, value: str): if value not in attr_types: raise ValueError('Invalid type for an attribute: ' + str(value)) self._type = value @property def to_ids(self) -> bool: """Boolean variable """ return self._to_ids @to_ids.setter def to_ids(self, value: bool): self._to_ids = int(value) @property def ShadowAttribute(self): return None @staticmethod def from_xml(s: str): """ Static method converting a serialized XML string into a :class:`MispAttribute` object. :example: >>> s = '<Attribute><id>87183</id><type>regkey|value</type><category>Persistencemechanism</category><to_ids>1</to_ids><uuid>562795f9-5723-4b96-8940-599b0a3ac101</uuid><event_id>486</event_id><distribution>1</distribution><timestamp>1445434872</timestamp><comment>loooool</comment><value>lol</value><ShadowAttribute/></Attribute>' >>> a = MispAttribute.from_xml(s) >>> type(a) <class 'misp.MispAttribute'> """ attr = objectify.fromstring(s) return MispAttribute.from_xml_object(attr) @staticmethod def from_xml_object(obj): if obj.tag.lower() != 'attribute': raise ValueError('Invalid Attribute XML') attr = MispAttribute() # String fields for field in ['uuid', 'type', 'category', 'comment', 'value']: val = getattr(obj, field) setattr(attr, field, str(val)) # Integer fields for field in ['distribution', 'to_ids', 'event_id', 'id', 'timestamp']: val = getattr(obj, field) setattr(attr, field, val) return attr def to_xml_object(self): attr = objectify.Element('Attribute') for field in ['distribution', 'type', 'category', 'to_ids', 'comment', 'value', 'event_id', 'timestamp', 'uuid', 'id']: val = getattr(self, field) setattr(attr, field, val) return attr class MispShadowAttribute(MispAttribute): """A shadow attribute is what human beings call proposal :) It is the same thing than a :class:`MispAttribute`. This module basically inherits everything from it. """ def __init__(self): super(MispShadowAttribute, self).__init__() @staticmethod def from_xml(s): """ Static method converting a serialized XML string into a :class:`MispShadowAttribute` object. :example: >>> s = '<ShadowAttribute>...</ShadowAttribute> >>> a = MispShadowAttribute.from_xml(s) >>> type(a) <class 'misp.MispShadowAttribute'> """ attr = objectify.fromstring(s) return MispShadowAttribute.from_xml_object(attr) @staticmethod def from_attribute(attr): """ Converts an attribute into a shadow attribute. :param attr: :class:`MispAttribute` instance to be converted :returns: Converted :class:`MispShadowAttribute` :example: >>> server = MispServer() >>> event = server.events.get(12) >>> attr = event.attributes[0] >>> prop = MispShadowAttribute.from_attribute(attr) """ assert attr is not MispAttribute prop = MispShadowAttribute() prop.distribution = attr.distribution prop.type = attr.type prop.comment = attr.comment prop.value = attr.value prop.category = attr.category prop.to_ids = attr.to_ids return prop @staticmethod def from_xml_object(obj): if obj.tag.lower() != 'shadowattribute': raise ValueError('Invalid ShadowAttribute XML (tag="%s")' % obj.tag.lower()) shadowattribute = MispShadowAttribute() for field in ['type', 'category', 'to_ids', 'comment', 'value', 'id']: try: val = getattr(obj, field) setattr(shadowattribute, field, val) except AttributeError: pass return shadowattribute def to_xml_object(self): attr = objectify.Element('ShadowAttribute') for field in ['type', 'category', 'to_ids', 'comment', 'value']: val = getattr(self, field) setattr(attr, field, val) return attr
apache-2.0
epitron/youtube-dl
youtube_dl/extractor/sohu.py
50
6911
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_urlencode, ) from ..utils import ( ExtractorError, int_or_none, try_get, ) class SohuIE(InfoExtractor): _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' # Sohu videos give different MD5 sums on Travis CI and my machine _TESTS = [{ 'note': 'This video is available only in Mainland China', 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', 'info_dict': { 'id': '382479172', 'ext': 'mp4', 'title': 'MV:Far East Movement《The Illest》', }, 'skip': 'On available in China', }, { 'url': 'http://tv.sohu.com/20150305/n409385080.shtml', 'info_dict': { 'id': '409385080', 'ext': 'mp4', 'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》', } }, { 'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml', 'info_dict': { 'id': '78693464', 'ext': 'mp4', 'title': '【爱范品】第31期:MWC见不到的奇葩手机', } }, { 'note': 'Multipart video', 'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml', 'info_dict': { 'id': '78910339', 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, 'playlist': [{ 'info_dict': { 'id': '78910339_part1', 'ext': 'mp4', 'duration': 294, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'info_dict': { 'id': '78910339_part2', 'ext': 'mp4', 'duration': 300, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'info_dict': { 'id': '78910339_part3', 'ext': 'mp4', 'duration': 150, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }] }, { 'note': 'Video with title containing dash', 'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml', 'info_dict': { 'id': '78932792', 'ext': 'mp4', 'title': 'youtube-dl testing video', }, 'params': { 'skip_download': True } }] def _real_extract(self, url): def _fetch_data(vid_id, mytv=False): if mytv: base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' return self._download_json( base_data_url + vid_id, video_id, 'Downloading JSON data for %s' % vid_id, headers=self.geo_verification_headers()) mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') mytv = mobj.group('mytv') is not None webpage = self._download_webpage(url, video_id) title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage)) vid = self._html_search_regex( r'var vid ?= ?["\'](\d+)["\']', webpage, 'video path') vid_data = _fetch_data(vid, mytv) if vid_data['play'] != 1: if vid_data.get('status') == 12: raise ExtractorError( '%s said: There\'s something wrong in the video.' % self.IE_NAME, expected=True) else: self.raise_geo_restricted( '%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME) formats_json = {} for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): vid_id = vid_data['data'].get('%sVid' % format_id) if not vid_id: continue vid_id = compat_str(vid_id) formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv) part_count = vid_data['data']['totalBlocks'] playlist = [] for i in range(part_count): formats = [] for format_id, format_data in formats_json.items(): allot = format_data['allot'] data = format_data['data'] clips_url = data['clipsURL'] su = data['su'] video_url = 'newflv.sohu.ccgslb.net' cdnId = None retries = 0 while 'newflv.sohu.ccgslb.net' in video_url: params = { 'prot': 9, 'file': clips_url[i], 'new': su[i], 'prod': 'flash', 'rb': 1, } if cdnId is not None: params['idc'] = cdnId download_note = 'Downloading %s video URL part %d of %d' % ( format_id, i + 1, part_count) if retries > 0: download_note += ' (retry #%d)' % retries part_info = self._parse_json(self._download_webpage( 'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)), video_id, download_note), video_id) video_url = part_info['url'] cdnId = part_info.get('nid') retries += 1 if retries > 5: raise ExtractorError('Failed to get video URL') formats.append({ 'url': video_url, 'format_id': format_id, 'filesize': int_or_none( try_get(data, lambda x: x['clipsBytes'][i])), 'width': int_or_none(data.get('width')), 'height': int_or_none(data.get('height')), 'fps': int_or_none(data.get('fps')), }) self._sort_formats(formats) playlist.append({ 'id': '%s_part%d' % (video_id, i + 1), 'title': title, 'duration': vid_data['data']['clipsDuration'][i], 'formats': formats, }) if len(playlist) == 1: info = playlist[0] info['id'] = video_id else: info = { '_type': 'multi_video', 'entries': playlist, 'id': video_id, 'title': title, } return info
unlicense
ROGUE-JCTD/geonode
geonode/maps/tests.py
26
22426
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### from lxml import etree from django.core.urlresolvers import reverse from django.test import TestCase from django.utils import simplejson as json from django.contrib.contenttypes.models import ContentType from agon_ratings.models import OverallRating from django.contrib.auth import get_user_model from geonode.layers.models import Layer from geonode.maps.models import Map from geonode.utils import default_map_config from geonode.base.populate_test_data import create_models from geonode.maps.tests_populate_maplayers import create_maplayers class MapsTest(TestCase): """Tests geonode.maps app/module """ fixtures = ['initial_data.json', 'bobby'] def setUp(self): self.user = 'admin' self.passwd = 'admin' create_models(type='map') create_models(type='layer') create_maplayers() default_abstract = "This is a demonstration of GeoNode, an application \ for assembling and publishing web based maps. After adding layers to the map, \ use the Save Map button above to contribute your map to the GeoNode \ community." default_title = "GeoNode Default Map" # This is a valid map viewer config, based on the sample data provided # by andreas in issue 566. -dwins viewer_config = """ { "defaultSourceType": "gx_wmssource", "about": { "title": "Title", "abstract": "Abstract" }, "sources": { "capra": { "url":"http://localhost:8080/geoserver/wms" } }, "map": { "projection":"EPSG:900913", "units":"m", "maxResolution":156543.0339, "maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34], "center":[-9428760.8688778,1436891.8972581], "layers":[{ "source":"capra", "buffer":0, "wms":"capra", "name":"base:nic_admin" }], "keywords":["saving", "keywords"], "zoom":7 } } """ viewer_config_alternative = """ { "defaultSourceType": "gx_wmssource", "about": { "title": "Title2", "abstract": "Abstract2" }, "sources": { "capra": { "url":"http://localhost:8080/geoserver/wms" } }, "map": { "projection":"EPSG:900913", "units":"m", "maxResolution":156543.0339, "maxExtent":[-20037508.34,-20037508.34,20037508.34,20037508.34], "center":[-9428760.8688778,1436891.8972581], "layers":[{ "source":"capra", "buffer":0, "wms":"capra", "name":"base:nic_admin" }], "zoom":7 } } """ perm_spec = { "users": { "admin": [ "change_resourcebase", "change_resourcebase_permissions", "view_resourcebase"]}, "groups": {}} def test_map_json(self): # Test that saving a map when not logged in gives 401 response = self.client.put( reverse( 'map_json', args=( '1', )), data=self.viewer_config, content_type="text/json") self.assertEqual(response.status_code, 401) self.client.login(username=self.user, password=self.passwd) response = self.client.put( reverse( 'map_json', args=( '1', )), data=self.viewer_config_alternative, content_type="text/json") self.assertEqual(response.status_code, 200) map_obj = Map.objects.get(id=1) self.assertEquals(map_obj.title, "Title2") self.assertEquals(map_obj.abstract, "Abstract2") self.assertEquals(map_obj.layer_set.all().count(), 1) def test_map_save(self): """POST /maps/new/data -> Test saving a new map""" new_map = reverse("new_map_json") # Test that saving a map when not logged in gives 401 response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") self.assertEqual(response.status_code, 401) # Test successful new map creation self.client.login(username=self.user, password=self.passwd) response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") self.assertEquals(response.status_code, 200) map_id = int(json.loads(response.content)['id']) self.client.logout() # We have now 9 maps and 8 layers so the next pk will be 18 self.assertEquals(map_id, 18) map_obj = Map.objects.get(id=map_id) self.assertEquals(map_obj.title, "Title") self.assertEquals(map_obj.abstract, "Abstract") self.assertEquals(map_obj.layer_set.all().count(), 1) self.assertEquals(map_obj.keyword_list(), [u"keywords", u"saving"]) self.assertNotEquals(map_obj.bbox_x0, None) # Test an invalid map creation request self.client.login(username=self.user, password=self.passwd) response = self.client.post( new_map, data="not a valid viewer config", content_type="text/json") self.assertEquals(response.status_code, 400) self.client.logout() def test_map_fetch(self): """/maps/[id]/data -> Test fetching a map in JSON""" map_obj = Map.objects.get(id=1) map_obj.set_default_permissions() response = self.client.get(reverse('map_json', args=(map_obj.id,))) self.assertEquals(response.status_code, 200) cfg = json.loads(response.content) self.assertEquals( cfg["about"]["abstract"], 'GeoNode default map abstract') self.assertEquals(cfg["about"]["title"], 'GeoNode Default Map') self.assertEquals(len(cfg["map"]["layers"]), 5) def test_map_to_json(self): """ Make some assertions about the data structure produced for serialization to a JSON map configuration""" map_obj = Map.objects.get(id=1) cfg = map_obj.viewer_json(None) self.assertEquals( cfg['about']['abstract'], 'GeoNode default map abstract') self.assertEquals(cfg['about']['title'], 'GeoNode Default Map') def is_wms_layer(x): return cfg['sources'][x['source']]['ptype'] == 'gxp_wmscsource' layernames = [x['name'] for x in cfg['map']['layers'] if is_wms_layer(x)] self.assertEquals(layernames, ['geonode:CA', ]) def test_map_to_wmc(self): """ /maps/1/wmc -> Test map WMC export Make some assertions about the data structure produced for serialization to a Web Map Context Document """ map_obj = Map.objects.get(id=1) map_obj.set_default_permissions() response = self.client.get(reverse('map_wmc', args=(map_obj.id,))) self.assertEquals(response.status_code, 200) # check specific XPaths wmc = etree.fromstring(response.content) namespace = '{http://www.opengis.net/context}' title = '{ns}General/{ns}Title'.format(ns=namespace) abstract = '{ns}General/{ns}Abstract'.format(ns=namespace) self.assertEquals(wmc.attrib.get('id'), '1') self.assertEquals(wmc.find(title).text, 'GeoNode Default Map') self.assertEquals( wmc.find(abstract).text, 'GeoNode default map abstract') def test_newmap_to_json(self): """ Make some assertions about the data structure produced for serialization to a new JSON map configuration""" response = self.client.get(reverse('new_map_json')) cfg = json.loads(response.content) self.assertEquals(cfg['defaultSourceType'], "gxp_wmscsource") def test_map_details(self): """/maps/1 -> Test accessing the map browse view function""" map_obj = Map.objects.get(id=1) map_obj.set_default_permissions() response = self.client.get(reverse('map_detail', args=(map_obj.id,))) self.assertEquals(response.status_code, 200) def test_new_map_without_layers(self): # TODO: Should this test have asserts in it? self.client.get(reverse('new_map')) def test_new_map_with_layer(self): layer = Layer.objects.all()[0] self.client.get(reverse('new_map') + '?layer=' + layer.typename) def test_new_map_with_empty_bbox_layer(self): layer = Layer.objects.all()[0] self.client.get(reverse('new_map') + '?layer=' + layer.typename) def test_ajax_map_permissions(self): """Verify that the ajax_layer_permissions view is behaving as expected """ # Setup some layer names to work with mapid = Map.objects.all()[0].pk invalid_mapid = "42" def url(id): return reverse('resource_permissions', args=[id]) # Test that an invalid layer.typename is handled for properly response = self.client.post( url(invalid_mapid), data=json.dumps(self.perm_spec), content_type="application/json") self.assertEquals(response.status_code, 404) # Test that GET returns permissions response = self.client.get(url(mapid)) assert('permissions' in response.content) # Test that a user is required to have permissions # First test un-authenticated response = self.client.post( url(mapid), data=json.dumps(self.perm_spec), content_type="application/json") self.assertEquals(response.status_code, 401) # Next Test with a user that does NOT have the proper perms logged_in = self.client.login(username='bobby', password='bob') self.assertEquals(logged_in, True) response = self.client.post( url(mapid), data=json.dumps(self.perm_spec), content_type="application/json") self.assertEquals(response.status_code, 401) # Login as a user with the proper permission and test the endpoint logged_in = self.client.login(username='admin', password='admin') self.assertEquals(logged_in, True) response = self.client.post( url(mapid), data=json.dumps(self.perm_spec), content_type="application/json") # Test that the method returns 200 self.assertEquals(response.status_code, 200) # Test that the permissions specification is applied def test_map_metadata(self): """Test that map metadata can be properly rendered """ # first create a map # Test successful new map creation self.client.login(username=self.user, password=self.passwd) new_map = reverse('new_map_json') response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") self.assertEquals(response.status_code, 200) map_id = int(json.loads(response.content)['id']) self.client.logout() url = reverse('map_metadata', args=(map_id,)) # test unauthenticated user to modify map metadata response = self.client.post(url) self.assertEquals(response.status_code, 302) # test a user without metadata modify permission self.client.login(username='norman', password='norman') response = self.client.post(url) self.assertEquals(response.status_code, 302) self.client.logout() # Now test with a valid user using GET method self.client.login(username=self.user, password=self.passwd) response = self.client.get(url) self.assertEquals(response.status_code, 200) # Now test with a valid user using POST method self.client.login(username=self.user, password=self.passwd) response = self.client.post(url) self.assertEquals(response.status_code, 200) # TODO: only invalid mapform is tested def test_map_remove(self): """Test that map can be properly removed """ # first create a map # Test successful new map creation self.client.login(username=self.user, password=self.passwd) new_map = reverse('new_map_json') response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") self.assertEquals(response.status_code, 200) map_id = int(json.loads(response.content)['id']) self.client.logout() url = reverse('map_remove', args=(map_id,)) # test unauthenticated user to remove map response = self.client.post(url) self.assertEquals(response.status_code, 302) # test a user without map removal permission self.client.login(username='norman', password='norman') response = self.client.post(url) self.assertEquals(response.status_code, 302) self.client.logout() # Now test with a valid user using GET method self.client.login(username=self.user, password=self.passwd) response = self.client.get(url) self.assertEquals(response.status_code, 200) # Now test with a valid user using POST method, # which removes map and associated layers, and redirects webpage response = self.client.post(url) self.assertEquals(response.status_code, 302) self.assertEquals(response['Location'], 'http://testserver/maps/') # After removal, map is not existent response = self.client.get(url) self.assertEquals(response.status_code, 404) # Prepare map object for later test that if it is completely removed # map_obj = Map.objects.get(id=1) # TODO: Also associated layers are not existent # self.assertEquals(map_obj.layer_set.all().count(), 0) def test_map_embed(self): """Test that map can be properly embedded """ # first create a map # Test successful new map creation self.client.login(username=self.user, password=self.passwd) new_map = reverse('new_map_json') response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") self.assertEquals(response.status_code, 200) map_id = int(json.loads(response.content)['id']) self.client.logout() url = reverse('map_embed', args=(map_id,)) url_no_id = reverse('map_embed') # Now test with a map id self.client.login(username=self.user, password=self.passwd) response = self.client.get(url) self.assertEquals(response.status_code, 200) # The embedded map is exempt from X-FRAME-OPTIONS restrictions. if hasattr(response, 'xframe_options_exempt'): self.assertTrue(response.xframe_options_exempt) # Config equals to that of the map whose id is given map_obj = Map.objects.get(id=map_id) config_map = map_obj.viewer_json(None) response_config_dict = json.loads(response.context['config']) self.assertEquals( config_map['about']['abstract'], response_config_dict['about']['abstract']) self.assertEquals( config_map['about']['title'], response_config_dict['about']['title']) # Now test without a map id response = self.client.get(url_no_id) self.assertEquals(response.status_code, 200) # Config equals to that of the default map config_default = default_map_config()[0] response_config_dict = json.loads(response.context['config']) self.assertEquals( config_default['about']['abstract'], response_config_dict['about']['abstract']) self.assertEquals( config_default['about']['title'], response_config_dict['about']['title']) def test_map_view(self): """Test that map view can be properly rendered """ # first create a map # Test successful new map creation self.client.login(username=self.user, password=self.passwd) new_map = reverse('new_map_json') response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") self.assertEquals(response.status_code, 200) map_id = int(json.loads(response.content)['id']) self.client.logout() url = reverse('map_view', args=(map_id,)) # test unauthenticated user to view map response = self.client.get(url) self.assertEquals(response.status_code, 200) # TODO: unauthenticated user can still access the map view # test a user without map view permission self.client.login(username='norman', password='norman') response = self.client.get(url) self.assertEquals(response.status_code, 200) self.client.logout() # TODO: the user can still access the map view without permission # Now test with a valid user using GET method self.client.login(username=self.user, password=self.passwd) response = self.client.get(url) self.assertEquals(response.status_code, 200) # Config equals to that of the map whose id is given map_obj = Map.objects.get(id=map_id) config_map = map_obj.viewer_json(None) response_config_dict = json.loads(response.context['config']) self.assertEquals( config_map['about']['abstract'], response_config_dict['about']['abstract']) self.assertEquals( config_map['about']['title'], response_config_dict['about']['title']) def test_new_map_config(self): """Test that new map config can be properly assigned """ self.client.login(username='admin', password='admin') # Test successful new map creation m = Map() admin_user = get_user_model().objects.get(username='admin') layer_name = Layer.objects.all()[0].typename m.create_from_layer_list(admin_user, [layer_name], "title", "abstract") map_id = m.id url = reverse('new_map_json') # Test GET method with COPY response = self.client.get(url, {'copy': map_id}) self.assertEquals(response.status_code, 200) map_obj = Map.objects.get(id=map_id) config_map = map_obj.viewer_json(None) response_config_dict = json.loads(response.content) self.assertEquals( config_map['map']['layers'], response_config_dict['map']['layers']) # Test GET method no COPY and no layer in params response = self.client.get(url) self.assertEquals(response.status_code, 200) config_default = default_map_config()[0] response_config_dict = json.loads(response.content) self.assertEquals( config_default['about']['abstract'], response_config_dict['about']['abstract']) self.assertEquals( config_default['about']['title'], response_config_dict['about']['title']) # Test GET method no COPY but with layer in params response = self.client.get(url, {'layer': layer_name}) self.assertEquals(response.status_code, 200) response_dict = json.loads(response.content) self.assertEquals(response_dict['fromLayer'], True) # Test POST method without authentication self.client.logout() response = self.client.post(url, {'layer': layer_name}) self.assertEquals(response.status_code, 401) # Test POST method with authentication and a layer in params self.client.login(username='admin', password='admin') response = self.client.post(url, {'layer': layer_name}) # Should not accept the request self.assertEquals(response.status_code, 400) # Test POST method with map data in json format response = self.client.post( url, data=self.viewer_config, content_type="text/json") self.assertEquals(response.status_code, 200) map_id = int(json.loads(response.content)['id']) # Test methods other than GET or POST and no layer in params response = self.client.put(url) self.assertEquals(response.status_code, 405) def test_rating_map_remove(self): """Test map rating is removed on map remove """ self.client.login(username=self.user, password=self.passwd) new_map = reverse('new_map_json') # Create the map response = self.client.post( new_map, data=self.viewer_config, content_type="text/json") map_id = int(json.loads(response.content)['id']) # Create the rating with the correct content type ctype = ContentType.objects.get(model='map') OverallRating.objects.create( category=1, object_id=map_id, content_type=ctype, rating=3) # Remove the map response = self.client.post(reverse('map_remove', args=(map_id,))) self.assertEquals(response.status_code, 302) # Check there are no ratings matching the removed map rating = OverallRating.objects.filter(category=1, object_id=map_id) self.assertEquals(rating.count(), 0)
gpl-3.0
ypu/UATFramework
steps/common.py
3
3982
'''Common test methods''' from behave import * @given(u'"{host}" hosts from dynamic inventory') def step_impl(context, host): context.inventory = "dynamic" context.target_host = host @given(u'"{host}" hosts from static inventory') def step_impl(context, host): context.inventory = "static" context.target_host = host @given(u'"{rpm}" is already installed on "{host}"') def step_impl(context, rpm, host): '''Install RPM on host but fail if not already installed''' r = context.remote_cmd("yum", host, remote_user="root", module_args='name=%s state=present' % rpm) if r: for i in r: assert i['msg'] == '' and i['results'] != [] else: assert False @given(u'"{rpm}" is already installed') def step_impl(context, rpm): '''Install RPM on host but fail if not already installed''' context.execute_steps(u""" given "{package_name}" is already installed on "{host}" """.format(package_name=rpm,host=context.target_host)) @given(u'"{rpms}" are already installed on "{host}"') def step_impl(context, rpms, host): '''Install RPM on host but fail if not already installed''' r = context.remote_cmd("yum", host, remote_user="root", module_args='name=%s' % rpms) if r: for i in r: assert i['msg'] == '' and i['results'] != [] else: assert False @given(u'"{rpms}" are already installed') def step_impl(context, rpms): '''Install RPM on host but fail if not already installed''' context.execute_steps(u""" "given {package_names}" are already installed on "{host}" """.format(package_names=rpms,host=context.target_host)) @given(u'"{unit}" is already running on "{host}"') def step_impl(context, unit, host): '''Ensure service is running but fail if not''' r = context.remote_cmd("service", host, module_args='name=%s state=running enabled=yes' % unit) if r: for i in r: assert i['changed'] is False else: assert False @then(u'"{unit}" is started and enabled on "{host}"') def step_impl(context, unit, host): '''Start service but fail if already running''' r = context.remote_cmd('service', host, module_args='name=%s state=running enabled=yes' % unit) if r: for i in r: assert i['state'] == 'started' and i['enabled'] is True else: assert False @then(u'"{unit}" is restarted on "{host}"') def step_impl(context, unit, host): '''Restart service''' r = context.remote_cmd('service', host, module_args='name=%s state=restarted' % unit) if r: for i in r: assert i['state'] == 'started' and i['changed'] is True else: assert False @given(u'"{host}" hosts can be pinged') @given('"{host}" host') def step(context, host): '''Verify we can ping the host host: a host from the ansible inventory file''' assert context.remote_cmd('ping', host) @given('run command "{cmd}" on "{host}"') @when('run command "{cmd}" on "{host}"') @then('run command "{cmd}" on "{host}"') def step(context, cmd, host): '''Run an Ansible module on a host directly from scenario cmd: a module name plus arguments <module> key=value [key=value ...] or... <module> <param> host: a host from the inventory file''' module, args = None, None if ' ' in cmd: # we only split on the first space to get the module name # since module_args are also space-delimited module, args = cmd.split(' ', 1) else: module = cmd assert context.remote_cmd(module, host, module_args=args)
gpl-2.0
andrewyoung1991/abjad
abjad/tools/scoretools/apply_full_measure_tuplets_to_contents_of_measures_in_expr.py
2
1842
# -*- encoding: utf-8 -*- import copy from abjad.tools.topleveltools import iterate from abjad.tools.topleveltools import mutate def apply_full_measure_tuplets_to_contents_of_measures_in_expr( expr, supplement=None): r'''Applies full-measure tuplets to contents of measures in `expr`: :: >>> staff = Staff([ ... Measure((2, 8), "c'8 d'8"), ... Measure((3, 8), "e'8 f'8 g'8")]) >>> show(staff) # doctest: +SKIP .. doctest:: >>> print(format(staff)) \new Staff { { \time 2/8 c'8 d'8 } { \time 3/8 e'8 f'8 g'8 } } :: >>> scoretools.apply_full_measure_tuplets_to_contents_of_measures_in_expr(staff) >>> show(staff) # doctest: +SKIP .. doctest:: >>> print(format(staff)) \new Staff { { \time 2/8 { c'8 d'8 } } { \time 3/8 { e'8 f'8 g'8 } } } Returns none. ''' from abjad.tools import selectiontools from abjad.tools import scoretools supplement = selectiontools.ContiguousSelection(supplement) assert isinstance(supplement, selectiontools.ContiguousSelection) for measure in iterate(expr).by_class(scoretools.Measure): target_duration = measure._preprolated_duration tuplet = scoretools.FixedDurationTuplet(target_duration, measure[:]) if supplement: new_supplement = mutate(supplement).copy() tuplet.extend(new_supplement)
gpl-3.0
le9i0nx/ansible
lib/ansible/modules/network/sros/sros_rollback.py
20
6906
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: sros_rollback version_added: "2.2" author: "Peter Sprygada (@privateip)" short_description: Configure Nokia SR OS rollback description: - Configure the rollback feature on remote Nokia devices running the SR OS operating system. this module provides a stateful implementation for managing the configuration of the rollback feature extends_documentation_fragment: sros options: rollback_location: description: - The I(rollback_location) specifies the location and filename of the rollback checkpoint files. This argument supports any valid local or remote URL as specified in SR OS required: false default: null remote_max_checkpoints: description: - The I(remote_max_checkpoints) argument configures the maximum number of rollback files that can be transferred and saved to a remote location. Valid values for this argument are in the range of 1 to 50 required: false default: null local_max_checkpoints: description: - The I(local_max_checkpoints) argument configures the maximum number of rollback files that can be saved on the devices local compact flash. Valid values for this argument are in the range of 1 to 50 required: false default: null rescue_location: description: - The I(rescue_location) specifies the location of the rescue file. This argument supports any valid local or remote URL as specified in SR OS required: false default: null state: description: - The I(state) argument specifies the state of the configuration entries in the devices active configuration. When the state value is set to C(true) the configuration is present in the devices active configuration. When the state value is set to C(false) the configuration values are removed from the devices active configuration. required: false default: present choices: ['present', 'absent'] """ EXAMPLES = """ # Note: examples below use the following provider dict to handle # transport and authentication to the node. --- vars: cli: host: "{{ inventory_hostname }}" username: admin password: admin transport: cli --- - name: configure rollback location sros_rollback: rollback_location: "cb3:/ansible" provider: "{{ cli }}" - name: remove all rollback configuration sros_rollback: state: absent provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['...', '...'] """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.config import NetworkConfig, dumps from ansible.module_utils.network.sros.sros import load_config, get_config, sros_argument_spec, check_args def invoke(name, *args, **kwargs): func = globals().get(name) if func: return func(*args, **kwargs) def sanitize_config(lines): commands = list() for line in lines: for index, entry in enumerate(commands): if line.startswith(entry): del commands[index] break commands.append(line) return commands def present(module, commands): setters = set() for key, value in module.argument_spec.items(): if module.params[key] is not None: setter = value.get('setter') or 'set_%s' % key if setter not in setters: setters.add(setter) invoke(setter, module, commands) def absent(module, commands): config = get_config(module) if 'rollback-location' in config: commands.append('configure system rollback no rollback-location') if 'rescue-location' in config: commands.append('configure system rollback no rescue-location') if 'remote-max-checkpoints' in config: commands.append('configure system rollback no remote-max-checkpoints') if 'local-max-checkpoints' in config: commands.append('configure system rollback no remote-max-checkpoints') def set_rollback_location(module, commands): value = module.params['rollback_location'] commands.append('configure system rollback rollback-location "%s"' % value) def set_local_max_checkpoints(module, commands): value = module.params['local_max_checkpoints'] if not 1 <= value <= 50: module.fail_json(msg='local_max_checkpoints must be between 1 and 50') commands.append('configure system rollback local-max-checkpoints %s' % value) def set_remote_max_checkpoints(module, commands): value = module.params['remote_max_checkpoints'] if not 1 <= value <= 50: module.fail_json(msg='remote_max_checkpoints must be between 1 and 50') commands.append('configure system rollback remote-max-checkpoints %s' % value) def set_rescue_location(module, commands): value = module.params['rescue_location'] commands.append('configure system rollback rescue-location "%s"' % value) def get_device_config(module): contents = get_config(module) return NetworkConfig(indent=4, contents=contents) def main(): """ main entry point for module execution """ argument_spec = dict( rollback_location=dict(), local_max_checkpoints=dict(type='int'), remote_max_checkpoints=dict(type='int'), rescue_location=dict(), state=dict(default='present', choices=['present', 'absent']) ) argument_spec.update(sros_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params['state'] result = dict(changed=False) commands = list() invoke(state, module, commands) candidate = NetworkConfig(indent=4, contents='\n'.join(commands)) config = get_device_config(module) configobjs = candidate.difference(config) if configobjs: # commands = dumps(configobjs, 'lines') commands = dumps(configobjs, 'commands') commands = sanitize_config(commands.split('\n')) result['updates'] = commands result['commands'] = commands # send the configuration commands to the device and merge # them with the current running config if not module.check_mode: load_config(module, commands) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
tushartushar/Puppeteer
SmellDetector/HieSmellDectector.py
1
1705
import os import SourceModel.SM_File from SmellDetector import Constants as CONSTS, Utilities def detectSmells(folder, outputFile): detectBrokenHie(folder, outputFile) def detectBrokenHie(folder, outputFile): modulesFolder = getModulesFolder(folder) if modulesFolder: for dir in os.listdir(modulesFolder): if os.path.isdir(os.path.join(modulesFolder, dir)): detectBroHierarchy(os.path.join(modulesFolder, dir), outputFile) def detectBroHierarchy(folder, outputFile): classNames, superClassNames = collectClassNames(folder) #print("classNames: " + str(classNames)) #print("superClassNames: " + str(superClassNames)) for superClass in superClassNames: if not classNames.__contains__(superClass): Utilities.reportSmell(outputFile, folder, CONSTS.SMELL_BRO_HIE, CONSTS.MODULE_RES) def getModulesFolder(folder): for aFile in os.listdir(folder): if os.path.isdir(os.path.join(folder,aFile)): if aFile.__contains__(CONSTS.MODULES): return os.path.join(folder, aFile) return "" def collectClassNames(folder): classNames = [] parentClassNames = [] for root, dirs, files in os.walk(folder): for file in files: if file.endswith(".pp") and not os.path.islink(os.path.join(root, file)): fileObj = SourceModel.SM_File.SM_File(os.path.join(root, file)) classes, pClasses = fileObj.getClassHierarchyInfo() if len(classes) > 0: classNames.extend(classes) if len(pClasses) > 0: parentClassNames.extend(pClasses) return classNames, parentClassNames
apache-2.0
smishenk/blink-crosswalk
Tools/Scripts/webkitpy/layout_tests/port/config.py
68
2993
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # FIXME: Remove this file altogether. It's useless in a Blink checkout. import logging from webkitpy.common import webkit_finder _log = logging.getLogger(__name__) class Config(object): _FLAGS_FROM_CONFIGURATIONS = { "Debug": "--debug", "Release": "--release", } def __init__(self, executive, filesystem, port_implementation=None): self._executive = executive self._filesystem = filesystem self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem) self._default_configuration = None self._build_directories = {} self._port_implementation = port_implementation def build_directory(self, configuration): """Returns the path to the build directory for the configuration.""" if configuration: flags = ["--configuration", self.flag_for_configuration(configuration)] else: configuration = "" flags = [] if self._port_implementation: flags.append('--' + self._port_implementation) if not self._build_directories.get(configuration): self._build_directories[configuration] = self._webkit_finder.path_from_webkit_base('out', configuration) return self._build_directories[configuration] def flag_for_configuration(self, configuration): return self._FLAGS_FROM_CONFIGURATIONS[configuration] def default_configuration(self): return 'Release'
bsd-3-clause
jackru/pybrain
pybrain/tests/unittests/datasets/test_datasets_datasets.py
25
4534
# -*- coding: utf-8 -*- """ >>> from scipy import array >>> from pybrain import datasets >>> from copy import deepcopy >>> d = datasets.dataset.DataSet() >>> d.addField('input', 2) >>> type(d.data['input']) <type 'numpy.ndarray'> >>> len(d.data['input']) 0 >>> x, y = d.data['input'].shape >>> str(x) 0 >>> str(y) 2 Build up a DataSet for testing: >>> d.append('input', (array((0, 0)))) >>> d.append('input', (array((1, 1)))) >>> d.append('input', (array((2, 2)))) >>> d.append('input', (array((3, 3)))) >>> d.append('input', (array((4, 4)))) >>> d.append('input', (array((5, 5)))) >>> d.append('input', (array((6, 6)))) >>> d.append('input', (array((7, 7)))) >>> list(d.batches('input', 3)) [array([[ 0., 0.], [ 1., 1.], [ 2., 2.]]), array([[ 3., 3.], [ 4., 4.], [ 5., 5.]]), array([[ 6., 6.], [ 7., 7.]])] >>> list(d.batches('input', 2)) [array([[ 0., 0.], [ 1., 1.]]), array([[ 2., 2.], [ 3., 3.]]), array([[ 4., 4.], [ 5., 5.]]), array([[ 6., 6.], [ 7., 7.]])] >>> p = reversed(range(4)) >>> print('\\n'.join(repr(b) for b in d.batches('input', 2, p))) array([[ 6., 6.], [ 7., 7.]]) array([[ 4., 4.], [ 5., 5.]]) array([[ 2., 2.], [ 3., 3.]]) array([[ 0., 0.], [ 1., 1.]]) Serialization ============= >>> from cStringIO import StringIO UnsupervisedDataSet ----------------- >>> d = datasets.UnsupervisedDataSet(2) >>> d.addSample([0,0]) >>> d.addSample([0,1]) >>> d.addSample([1,0]) >>> d.addSample([1,1]) >>> for sample in d: ... print(sample) ... [array([ 0., 0.])] [array([ 0., 1.])] [array([ 1., 0.])] [array([ 1., 1.])] ClassificationDataSet --------------------- >>> class_labels = 'Urd', 'Verdandi', 'Skuld' >>> d = datasets.ClassificationDataSet(2,1, class_labels=class_labels) >>> d.appendLinked( [ 0.1, 0.5 ] , [0] ) >>> d.appendLinked( [ 1.2, 1.2 ] , [1] ) >>> d.appendLinked( [ 1.4, 1.6 ] , [1] ) >>> d.appendLinked( [ 1.6, 1.8 ] , [1] ) >>> d.appendLinked( [ 0.10, 0.80 ] , [2] ) >>> d.appendLinked( [ 0.20, 0.90 ] , [2] ) >>> saveInvariant(d) True ImportanceDataSet ----------------- SequentialDataSet ----------------- >>> d = datasets.SequentialDataSet(0, 1) >>> d.addSample([],[0]) >>> d.addSample([],[1]) >>> d.addSample([],[0]) >>> d.addSample([],[1]) >>> d.addSample([],[0]) >>> d.addSample([],[1]) >>> d.newSequence() >>> d.addSample([],[0]) >>> d.addSample([],[1]) >>> d.addSample([],[0]) >>> d.addSample([],[1]) >>> d.addSample([],[0]) >>> d.addSample([],[1]) >>> saveInvariant(d) True ReinforcementDataSet -------------------- >>> d = datasets.ReinforcementDataSet(1, 1) >>> d.addSample([1,], [1,], [1,]) >>> d.addSample([1,], [1,], [1,]) >>> d.addSample([1,], [1,], [1,]) >>> saveInvariant(d) True """ __author__ = 'Justin Bayer, bayerj@in.tum.de' from io import StringIO from pybrain.tests import runModuleTestSuite def saveInvariant(dataset): # Save and reconstruct s = StringIO() dataset.saveToFileLike(s) s.seek(0) reconstructed = dataset.__class__.loadFromFileLike(s) orig_array_data = sorted(dataset.data.items()) rec_array_data = sorted(reconstructed.data.items()) equal = True for (k, v), (k_, v_) in zip(orig_array_data, rec_array_data): if k != k_: print(("Differing keys: %s <=> %s" % (list(dataset.dataset.keys()), list(rec_array_data.dataset.keys())))) equal = False break if not (v == v_).all(): print(("Differing values for %s" % k)) print(v) print(v_) equal = False break if not equal: return False rec_dict = reconstructed.__dict__ orig_dict = dataset.__dict__ del rec_dict['_convert'] del orig_dict['_convert'] del rec_dict['data'] del orig_dict['data'] if rec_dict == orig_dict: return True else: print(rec_dict) print(orig_dict) return False if __name__ == "__main__": runModuleTestSuite(__import__('__main__'))
bsd-3-clause
olemis/brython
www/src/Lib/encodings/iso8859_4.py
37
13683
""" Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-4', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK '\u0138' # 0xA2 -> LATIN SMALL LETTER KRA '\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA '\xa4' # 0xA4 -> CURRENCY SIGN '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE '\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON '\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON '\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA '\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE '\xad' # 0xAD -> SOFT HYPHEN '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK '\u02db' # 0xB2 -> OGONEK '\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA '\xb4' # 0xB4 -> ACUTE ACCENT '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE '\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA '\u02c7' # 0xB7 -> CARON '\xb8' # 0xB8 -> CEDILLA '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON '\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON '\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA '\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE '\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON '\u014b' # 0xBF -> LATIN SMALL LETTER ENG '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON '\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE '\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE '\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA '\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON '\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0xF7 -> DIVISION SIGN '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE '\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE '\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON '\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
bcorbet/SickRage
lib/hachoir_parser/program/prc.py
90
2778
""" PRC (Palm resource) parser. Author: Sebastien Ponce Creation date: 29 october 2008 """ from lib.hachoir_parser import Parser from lib.hachoir_core.field import (FieldSet, UInt16, UInt32, TimestampMac32, String, RawBytes) from lib.hachoir_core.endian import BIG_ENDIAN class PRCHeader(FieldSet): static_size = 78*8 def createFields(self): yield String(self, "name", 32, "Name") yield UInt16(self, "flags", "Flags") yield UInt16(self, "version", "Version") yield TimestampMac32(self, "create_time", "Creation time") yield TimestampMac32(self, "mod_time", "Modification time") yield TimestampMac32(self, "backup_time", "Backup time") yield UInt32(self, "mod_num", "mod num") yield UInt32(self, "app_info", "app info") yield UInt32(self, "sort_info", "sort info") yield UInt32(self, "type", "type") yield UInt32(self, "id", "id") yield UInt32(self, "unique_id_seed", "unique_id_seed") yield UInt32(self, "next_record_list", "next_record_list") yield UInt16(self, "num_records", "num_records") class ResourceHeader(FieldSet): static_size = 10*8 def createFields(self): yield String(self, "name", 4, "Name of the resource") yield UInt16(self, "flags", "ID number of the resource") yield UInt32(self, "offset", "Pointer to the resource data") def createDescription(self): return "Resource Header (%s)" % self["name"] class PRCFile(Parser): PARSER_TAGS = { "id": "prc", "category": "program", "file_ext": ("prc", ""), "min_size": ResourceHeader.static_size, # At least one program header "mime": ( u"application/x-pilot-prc", u"application/x-palmpilot"), "description": "Palm Resource File" } endian = BIG_ENDIAN def validate(self): # FIXME: Implement the validation function! return False def createFields(self): # Parse header and program headers yield PRCHeader(self, "header", "Header") lens = [] firstOne = True poff = 0 for index in xrange(self["header/num_records"].value): r = ResourceHeader(self, "res_header[]") if firstOne: firstOne = False else: lens.append(r["offset"].value - poff) poff = r["offset"].value yield r lens.append(self.size/8 - poff) yield UInt16(self, "placeholder", "Place holder bytes") for i in range(len(lens)): yield RawBytes(self, "res[]", lens[i], '"'+self["res_header["+str(i)+"]/name"].value+"\" Resource") def createDescription(self): return "Palm Resource file"
gpl-3.0
paolodedios/tensorflow
tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py
6
7110
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.from_sparse_tensor_slices()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.kernel_tests import checkpoint_test_base from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class FromSparseTensorSlicesTest(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( combinations.combine(tf_api_version=1, mode=["graph"]), combinations.combine(slices=[[ [1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], [] ], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]]))) def testFromSparseTensorSlices(self, slices): """Test a dataset based on slices of a `tf.sparse.SparseTensor`.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer get_next = sparse_tensor.SparseTensor(*iterator.get_next()) with self.cached_session() as sess: # Test with sparse tensor in the appropriate order. # pylint: disable=g-complex-comprehension indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))]) values = np.array([val for s in slices for val in s]) # pylint: enable=g-complex-comprehension dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1]) sparse_feed = sparse_tensor.SparseTensorValue(indices, values, dense_shape) sess.run(init_op, feed_dict={st: sparse_feed}) for i, s in enumerate(slices): results = sess.run(get_next) self.assertAllEqual(s, results.values) expected_indices = np.array( [[j] for j in range(len(slices[i]))]).reshape([-1, 1]) self.assertAllEqual(expected_indices, results.indices) self.assertAllEqual(dense_shape[1:], results.dense_shape) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate( combinations.times( combinations.combine(tf_api_version=1, mode=["graph"]), combinations.combine(slices=[[ [1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], [] ], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]]))) def testFromSparseTensorSlicesInReverse(self, slices): """Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer with self.cached_session() as sess: # pylint: disable=g-complex-comprehension indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))]) values = np.array([val for s in slices for val in s]) # pylint: enable=g-complex-comprehension dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1]) # Test with sparse tensor in the reverse order, which is not # currently supported. reverse_order_indices = indices[::-1, :] reverse_order_values = values[::-1] sparse_feed = sparse_tensor.SparseTensorValue( reverse_order_indices, reverse_order_values, dense_shape) with self.assertRaises(errors.UnimplementedError): sess.run(init_op, feed_dict={st: sparse_feed}) @combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"])) def testEmptySparseTensorSlices(self): """Test a dataset based on slices of an empty `tf.sparse.SparseTensor`.""" st = array_ops.sparse_placeholder(dtypes.float64) iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.from_sparse_tensor_slices(st)) init_op = iterator.initializer get_next = sparse_tensor.SparseTensor(*iterator.get_next()) with self.cached_session() as sess: # Test with an empty sparse tensor. empty_indices = np.empty((0, 4), dtype=np.int64) empty_values = np.empty((0,), dtype=np.float64) empty_dense_shape = [0, 4, 37, 9] sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values, empty_dense_shape) sess.run(init_op, feed_dict={st: sparse_feed}) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) @combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"])) def testFromSparseTensorSlicesError(self): with self.assertRaises(AttributeError): dataset_ops.Dataset.from_sparse_tensor_slices(None) class FromSparseTensorSlicesCheckpointTest( checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def _build_sparse_tensor_slice_dataset(self, slices): # pylint: disable=g-complex-comprehension indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))], dtype=np.int64) values = np.array([val for s in slices for val in s], dtype=np.float64) # pylint: enable=g-complex-comprehension dense_shape = np.array( [len(slices), max(len(s) for s in slices) + 1], dtype=np.int64) sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape) return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components) @combinations.generate( combinations.times(test_base.v1_only_combinations(), checkpoint_test_base.default_test_combinations())) def test(self, verify_fn): slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []] verify_fn( self, lambda: self._build_sparse_tensor_slice_dataset(slices), num_outputs=9, sparse_tensors=True) if __name__ == "__main__": test.main()
apache-2.0
bguillot/OpenUpgrade
addons/base_action_rule/test_models.py
40
1088
from openerp.osv import fields, osv AVAILABLE_STATES = [ ('draft', 'New'), ('cancel', 'Cancelled'), ('open', 'In Progress'), ('pending', 'Pending'), ('done', 'Closed') ] class lead_test(osv.Model): _name = "base.action.rule.lead.test" _columns = { 'name': fields.char('Subject', size=64, required=True, select=1), 'user_id': fields.many2one('res.users', 'Responsible'), 'state': fields.selection(AVAILABLE_STATES, string="Status", readonly=True), 'active': fields.boolean('Active', required=False), 'partner_id': fields.many2one('res.partner', 'Partner', ondelete='set null'), 'date_action_last': fields.datetime('Last Action', readonly=1), } _defaults = { 'state' : 'draft', 'active' : True, } def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs): pass def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None): pass
agpl-3.0
QuickSander/CouchPotatoServer
libs/requests/packages/chardet/mbcssm.py
982
19608
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart, eError, eItsMe # BIG5 BIG5_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,1, # 78 - 7f 4,4,4,4,4,4,4,4, # 80 - 87 4,4,4,4,4,4,4,4, # 88 - 8f 4,4,4,4,4,4,4,4, # 90 - 97 4,4,4,4,4,4,4,4, # 98 - 9f 4,3,3,3,3,3,3,3, # a0 - a7 3,3,3,3,3,3,3,3, # a8 - af 3,3,3,3,3,3,3,3, # b0 - b7 3,3,3,3,3,3,3,3, # b8 - bf 3,3,3,3,3,3,3,3, # c0 - c7 3,3,3,3,3,3,3,3, # c8 - cf 3,3,3,3,3,3,3,3, # d0 - d7 3,3,3,3,3,3,3,3, # d8 - df 3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,3,3,3, # e8 - ef 3,3,3,3,3,3,3,3, # f0 - f7 3,3,3,3,3,3,3,0 # f8 - ff ) BIG5_st = ( eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17 ) Big5CharLenTable = (0, 1, 1, 2, 0) Big5SMModel = {'classTable': BIG5_cls, 'classFactor': 5, 'stateTable': BIG5_st, 'charLenTable': Big5CharLenTable, 'name': 'Big5'} # CP949 CP949_cls = ( 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff ) CP949_st = ( #cls= 0 1 2 3 4 5 6 7 8 9 # previous state = eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3 eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4 eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5 eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6 ) CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) CP949SMModel = {'classTable': CP949_cls, 'classFactor': 10, 'stateTable': CP949_st, 'charLenTable': CP949CharLenTable, 'name': 'CP949'} # EUC-JP EUCJP_cls = ( 4,4,4,4,4,4,4,4, # 00 - 07 4,4,4,4,4,4,5,5, # 08 - 0f 4,4,4,4,4,4,4,4, # 10 - 17 4,4,4,5,4,4,4,4, # 18 - 1f 4,4,4,4,4,4,4,4, # 20 - 27 4,4,4,4,4,4,4,4, # 28 - 2f 4,4,4,4,4,4,4,4, # 30 - 37 4,4,4,4,4,4,4,4, # 38 - 3f 4,4,4,4,4,4,4,4, # 40 - 47 4,4,4,4,4,4,4,4, # 48 - 4f 4,4,4,4,4,4,4,4, # 50 - 57 4,4,4,4,4,4,4,4, # 58 - 5f 4,4,4,4,4,4,4,4, # 60 - 67 4,4,4,4,4,4,4,4, # 68 - 6f 4,4,4,4,4,4,4,4, # 70 - 77 4,4,4,4,4,4,4,4, # 78 - 7f 5,5,5,5,5,5,5,5, # 80 - 87 5,5,5,5,5,5,1,3, # 88 - 8f 5,5,5,5,5,5,5,5, # 90 - 97 5,5,5,5,5,5,5,5, # 98 - 9f 5,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 0,0,0,0,0,0,0,0, # e0 - e7 0,0,0,0,0,0,0,0, # e8 - ef 0,0,0,0,0,0,0,0, # f0 - f7 0,0,0,0,0,0,0,5 # f8 - ff ) EUCJP_st = ( 3, 4, 3, 5,eStart,eError,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17 eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f 3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27 ) EUCJPCharLenTable = (2, 2, 2, 3, 1, 0) EUCJPSMModel = {'classTable': EUCJP_cls, 'classFactor': 6, 'stateTable': EUCJP_st, 'charLenTable': EUCJPCharLenTable, 'name': 'EUC-JP'} # EUC-KR EUCKR_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 1,1,1,1,1,1,1,1, # 40 - 47 1,1,1,1,1,1,1,1, # 48 - 4f 1,1,1,1,1,1,1,1, # 50 - 57 1,1,1,1,1,1,1,1, # 58 - 5f 1,1,1,1,1,1,1,1, # 60 - 67 1,1,1,1,1,1,1,1, # 68 - 6f 1,1,1,1,1,1,1,1, # 70 - 77 1,1,1,1,1,1,1,1, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,0,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,3,3,3, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,3,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,0 # f8 - ff ) EUCKR_st = ( eError,eStart, 3,eError,eError,eError,eError,eError,#00-07 eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f ) EUCKRCharLenTable = (0, 1, 2, 0) EUCKRSMModel = {'classTable': EUCKR_cls, 'classFactor': 4, 'stateTable': EUCKR_st, 'charLenTable': EUCKRCharLenTable, 'name': 'EUC-KR'} # EUC-TW EUCTW_cls = ( 2,2,2,2,2,2,2,2, # 00 - 07 2,2,2,2,2,2,0,0, # 08 - 0f 2,2,2,2,2,2,2,2, # 10 - 17 2,2,2,0,2,2,2,2, # 18 - 1f 2,2,2,2,2,2,2,2, # 20 - 27 2,2,2,2,2,2,2,2, # 28 - 2f 2,2,2,2,2,2,2,2, # 30 - 37 2,2,2,2,2,2,2,2, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,2, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,6,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,3,4,4,4,4,4,4, # a0 - a7 5,5,1,1,1,1,1,1, # a8 - af 1,1,1,1,1,1,1,1, # b0 - b7 1,1,1,1,1,1,1,1, # b8 - bf 1,1,3,1,3,3,3,3, # c0 - c7 3,3,3,3,3,3,3,3, # c8 - cf 3,3,3,3,3,3,3,3, # d0 - d7 3,3,3,3,3,3,3,3, # d8 - df 3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,3,3,3, # e8 - ef 3,3,3,3,3,3,3,3, # f0 - f7 3,3,3,3,3,3,3,0 # f8 - ff ) EUCTW_st = ( eError,eError,eStart, 3, 3, 3, 4,eError,#00-07 eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17 eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f 5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27 eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f ) EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3) EUCTWSMModel = {'classTable': EUCTW_cls, 'classFactor': 7, 'stateTable': EUCTW_st, 'charLenTable': EUCTWCharLenTable, 'name': 'x-euc-tw'} # GB2312 GB2312_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 3,3,3,3,3,3,3,3, # 30 - 37 3,3,1,1,1,1,1,1, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,4, # 78 - 7f 5,6,6,6,6,6,6,6, # 80 - 87 6,6,6,6,6,6,6,6, # 88 - 8f 6,6,6,6,6,6,6,6, # 90 - 97 6,6,6,6,6,6,6,6, # 98 - 9f 6,6,6,6,6,6,6,6, # a0 - a7 6,6,6,6,6,6,6,6, # a8 - af 6,6,6,6,6,6,6,6, # b0 - b7 6,6,6,6,6,6,6,6, # b8 - bf 6,6,6,6,6,6,6,6, # c0 - c7 6,6,6,6,6,6,6,6, # c8 - cf 6,6,6,6,6,6,6,6, # d0 - d7 6,6,6,6,6,6,6,6, # d8 - df 6,6,6,6,6,6,6,6, # e0 - e7 6,6,6,6,6,6,6,6, # e8 - ef 6,6,6,6,6,6,6,6, # f0 - f7 6,6,6,6,6,6,6,0 # f8 - ff ) GB2312_st = ( eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07 eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17 4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27 eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f ) # To be accurate, the length of class 6 can be either 2 or 4. # But it is not necessary to discriminate between the two since # it is used for frequency analysis only, and we are validing # each code range there as well. So it is safe to set it to be # 2 here. GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2) GB2312SMModel = {'classTable': GB2312_cls, 'classFactor': 7, 'stateTable': GB2312_st, 'charLenTable': GB2312CharLenTable, 'name': 'GB2312'} # Shift_JIS SJIS_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 2,2,2,2,2,2,2,2, # 40 - 47 2,2,2,2,2,2,2,2, # 48 - 4f 2,2,2,2,2,2,2,2, # 50 - 57 2,2,2,2,2,2,2,2, # 58 - 5f 2,2,2,2,2,2,2,2, # 60 - 67 2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,1, # 78 - 7f 3,3,3,3,3,3,3,3, # 80 - 87 3,3,3,3,3,3,3,3, # 88 - 8f 3,3,3,3,3,3,3,3, # 90 - 97 3,3,3,3,3,3,3,3, # 98 - 9f #0xa0 is illegal in sjis encoding, but some pages does #contain such byte. We need to be more error forgiven. 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,4,4,4, # e8 - ef 4,4,4,4,4,4,4,4, # f0 - f7 4,4,4,4,4,0,0,0 # f8 - ff ) SJIS_st = ( eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17 ) SJISCharLenTable = (0, 1, 1, 2, 0, 0) SJISSMModel = {'classTable': SJIS_cls, 'classFactor': 6, 'stateTable': SJIS_st, 'charLenTable': SJISCharLenTable, 'name': 'Shift_JIS'} # UCS2-BE UCS2BE_cls = ( 0,0,0,0,0,0,0,0, # 00 - 07 0,0,1,0,0,2,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,3,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,3,3,3,3,3,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,0,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,0,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,0,0,0,0,0,0,0, # a0 - a7 0,0,0,0,0,0,0,0, # a8 - af 0,0,0,0,0,0,0,0, # b0 - b7 0,0,0,0,0,0,0,0, # b8 - bf 0,0,0,0,0,0,0,0, # c0 - c7 0,0,0,0,0,0,0,0, # c8 - cf 0,0,0,0,0,0,0,0, # d0 - d7 0,0,0,0,0,0,0,0, # d8 - df 0,0,0,0,0,0,0,0, # e0 - e7 0,0,0,0,0,0,0,0, # e8 - ef 0,0,0,0,0,0,0,0, # f0 - f7 0,0,0,0,0,0,4,5 # f8 - ff ) UCS2BE_st = ( 5, 7, 7,eError, 4, 3,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17 6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f 6, 6, 6, 6, 5, 7, 7,eError,#20-27 5, 8, 6, 6,eError, 6, 6, 6,#28-2f 6, 6, 6, 6,eError,eError,eStart,eStart #30-37 ) UCS2BECharLenTable = (2, 2, 2, 0, 2, 2) UCS2BESMModel = {'classTable': UCS2BE_cls, 'classFactor': 6, 'stateTable': UCS2BE_st, 'charLenTable': UCS2BECharLenTable, 'name': 'UTF-16BE'} # UCS2-LE UCS2LE_cls = ( 0,0,0,0,0,0,0,0, # 00 - 07 0,0,1,0,0,2,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,3,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,3,3,3,3,3,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,0,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 0,0,0,0,0,0,0,0, # 80 - 87 0,0,0,0,0,0,0,0, # 88 - 8f 0,0,0,0,0,0,0,0, # 90 - 97 0,0,0,0,0,0,0,0, # 98 - 9f 0,0,0,0,0,0,0,0, # a0 - a7 0,0,0,0,0,0,0,0, # a8 - af 0,0,0,0,0,0,0,0, # b0 - b7 0,0,0,0,0,0,0,0, # b8 - bf 0,0,0,0,0,0,0,0, # c0 - c7 0,0,0,0,0,0,0,0, # c8 - cf 0,0,0,0,0,0,0,0, # d0 - d7 0,0,0,0,0,0,0,0, # d8 - df 0,0,0,0,0,0,0,0, # e0 - e7 0,0,0,0,0,0,0,0, # e8 - ef 0,0,0,0,0,0,0,0, # f0 - f7 0,0,0,0,0,0,4,5 # f8 - ff ) UCS2LE_st = ( 6, 6, 7, 6, 4, 3,eError,eError,#00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17 5, 5, 5,eError, 5,eError, 6, 6,#18-1f 7, 6, 8, 8, 5, 5, 5,eError,#20-27 5, 5, 5,eError,eError,eError, 5, 5,#28-2f 5, 5, 5,eError, 5,eError,eStart,eStart #30-37 ) UCS2LECharLenTable = (2, 2, 2, 2, 2, 2) UCS2LESMModel = {'classTable': UCS2LE_cls, 'classFactor': 6, 'stateTable': UCS2LE_st, 'charLenTable': UCS2LECharLenTable, 'name': 'UTF-16LE'} # UTF-8 UTF8_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 1,1,1,0,1,1,1,1, # 18 - 1f 1,1,1,1,1,1,1,1, # 20 - 27 1,1,1,1,1,1,1,1, # 28 - 2f 1,1,1,1,1,1,1,1, # 30 - 37 1,1,1,1,1,1,1,1, # 38 - 3f 1,1,1,1,1,1,1,1, # 40 - 47 1,1,1,1,1,1,1,1, # 48 - 4f 1,1,1,1,1,1,1,1, # 50 - 57 1,1,1,1,1,1,1,1, # 58 - 5f 1,1,1,1,1,1,1,1, # 60 - 67 1,1,1,1,1,1,1,1, # 68 - 6f 1,1,1,1,1,1,1,1, # 70 - 77 1,1,1,1,1,1,1,1, # 78 - 7f 2,2,2,2,3,3,3,3, # 80 - 87 4,4,4,4,4,4,4,4, # 88 - 8f 4,4,4,4,4,4,4,4, # 90 - 97 4,4,4,4,4,4,4,4, # 98 - 9f 5,5,5,5,5,5,5,5, # a0 - a7 5,5,5,5,5,5,5,5, # a8 - af 5,5,5,5,5,5,5,5, # b0 - b7 5,5,5,5,5,5,5,5, # b8 - bf 0,0,6,6,6,6,6,6, # c0 - c7 6,6,6,6,6,6,6,6, # c8 - cf 6,6,6,6,6,6,6,6, # d0 - d7 6,6,6,6,6,6,6,6, # d8 - df 7,8,8,8,8,8,8,8, # e0 - e7 8,8,8,8,8,9,8,8, # e8 - ef 10,11,11,11,11,11,11,11, # f0 - f7 12,13,13,13,14,15,0,0 # f8 - ff ) UTF8_st = ( eError,eStart,eError,eError,eError,eError, 12, 10,#00-07 9, 11, 8, 7, 6, 5, 4, 3,#08-0f eError,eError,eError,eError,eError,eError,eError,eError,#10-17 eError,eError,eError,eError,eError,eError,eError,eError,#18-1f eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27 eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f eError,eError, 5, 5, 5, 5,eError,eError,#30-37 eError,eError,eError,eError,eError,eError,eError,eError,#38-3f eError,eError,eError, 5, 5, 5,eError,eError,#40-47 eError,eError,eError,eError,eError,eError,eError,eError,#48-4f eError,eError, 7, 7, 7, 7,eError,eError,#50-57 eError,eError,eError,eError,eError,eError,eError,eError,#58-5f eError,eError,eError,eError, 7, 7,eError,eError,#60-67 eError,eError,eError,eError,eError,eError,eError,eError,#68-6f eError,eError, 9, 9, 9, 9,eError,eError,#70-77 eError,eError,eError,eError,eError,eError,eError,eError,#78-7f eError,eError,eError,eError,eError, 9,eError,eError,#80-87 eError,eError,eError,eError,eError,eError,eError,eError,#88-8f eError,eError, 12, 12, 12, 12,eError,eError,#90-97 eError,eError,eError,eError,eError,eError,eError,eError,#98-9f eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7 eError,eError,eError,eError,eError,eError,eError,eError,#a8-af eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7 eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7 eError,eError,eError,eError,eError,eError,eError,eError #c8-cf ) UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) UTF8SMModel = {'classTable': UTF8_cls, 'classFactor': 16, 'stateTable': UTF8_st, 'charLenTable': UTF8CharLenTable, 'name': 'UTF-8'} # flake8: noqa
gpl-3.0
crosswalk-project/chromium-crosswalk-efl
tools/safely-roll-deps.py
60
5639
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generate a CL to roll a DEPS entry to the specified revision number and post it to Rietveld so that the CL will land automatically if it passes the commit-queue's checks. """ import logging import optparse import os import re import sys import find_depot_tools import scm import subprocess2 def die_with_error(msg): print >> sys.stderr, msg sys.exit(1) def process_deps(path, project, new_rev, is_dry_run): """Update project_revision to |new_issue|. A bit hacky, could it be made better? """ content = open(path).read() # Hack for Blink to get the AutoRollBot running again. if project == "blink": project = "webkit" old_line = r"(\s+)'%s_revision': '([0-9a-f]{2,40})'," % project new_line = r"\1'%s_revision': '%s'," % (project, new_rev) new_content = re.sub(old_line, new_line, content, 1) old_rev = re.search(old_line, content).group(2) if not old_rev or new_content == content: die_with_error('Failed to update the DEPS file') if not is_dry_run: open(path, 'w').write(new_content) return old_rev class PrintSubprocess(object): """Wrapper for subprocess2 which prints out every command.""" def __getattr__(self, attr): def _run_subprocess2(cmd, *args, **kwargs): print cmd sys.stdout.flush() return getattr(subprocess2, attr)(cmd, *args, **kwargs) return _run_subprocess2 prnt_subprocess = PrintSubprocess() def main(): tool_dir = os.path.dirname(os.path.abspath(__file__)) parser = optparse.OptionParser(usage='%prog [options] <project> <new rev>', description=sys.modules[__name__].__doc__) parser.add_option('-v', '--verbose', action='count', default=0) parser.add_option('--dry-run', action='store_true') parser.add_option('-f', '--force', action='store_true', help='Make destructive changes to the local checkout if ' 'necessary.') parser.add_option('--commit', action='store_true', default=True, help='(default) Put change in commit queue on upload.') parser.add_option('--no-commit', action='store_false', dest='commit', help='Don\'t put change in commit queue on upload.') parser.add_option('-r', '--reviewers', default='', help='Add given users as either reviewers or TBR as' ' appropriate.') parser.add_option('--upstream', default='origin/master', help='(default "%default") Use given start point for change' ' to upload. For instance, if you use the old git workflow,' ' you might set it to "origin/trunk".') parser.add_option('--cc', help='CC email addresses for issue.') parser.add_option('-m', '--message', help='Custom commit message.') options, args = parser.parse_args() logging.basicConfig( level= [logging.WARNING, logging.INFO, logging.DEBUG][ min(2, options.verbose)]) if len(args) != 2: parser.print_help() exit(0) root_dir = os.path.dirname(tool_dir) os.chdir(root_dir) project = args[0] new_rev = args[1] # Silence the editor. os.environ['EDITOR'] = 'true' if options.force and not options.dry_run: prnt_subprocess.check_call(['git', 'clean', '-d', '-f']) prnt_subprocess.call(['git', 'rebase', '--abort']) old_branch = scm.GIT.GetBranch(root_dir) new_branch = '%s_roll' % project if options.upstream == new_branch: parser.error('Cannot set %s as its own upstream.' % new_branch) if old_branch == new_branch: if options.force: if not options.dry_run: prnt_subprocess.check_call(['git', 'checkout', options.upstream, '-f']) prnt_subprocess.call(['git', 'branch', '-D', old_branch]) else: parser.error('Please delete the branch %s and move to a different branch' % new_branch) if not options.dry_run: prnt_subprocess.check_call(['git', 'fetch', 'origin']) prnt_subprocess.call(['git', 'svn', 'fetch']) branch_cmd = ['git', 'checkout', '-b', new_branch, options.upstream] if options.force: branch_cmd.append('-f') prnt_subprocess.check_output(branch_cmd) try: old_rev = process_deps(os.path.join(root_dir, 'DEPS'), project, new_rev, options.dry_run) print '%s roll %s:%s' % (project.title(), old_rev, new_rev) review_field = 'TBR' if options.commit else 'R' commit_msg = options.message or '%s roll %s:%s\n' % (project.title(), old_rev, new_rev) commit_msg += '\n%s=%s\n' % (review_field, options.reviewers) if options.dry_run: print 'Commit message: ' + commit_msg return 0 prnt_subprocess.check_output(['git', 'commit', '-m', commit_msg, 'DEPS']) prnt_subprocess.check_call(['git', 'diff', '--no-ext-diff', options.upstream]) upload_cmd = ['git', 'cl', 'upload', '--bypass-hooks'] if options.commit: upload_cmd.append('--use-commit-queue') if options.reviewers: upload_cmd.append('--send-mail') if options.cc: upload_cmd.extend(['--cc', options.cc]) prnt_subprocess.check_call(upload_cmd) finally: if not options.dry_run: prnt_subprocess.check_output(['git', 'checkout', old_branch]) prnt_subprocess.check_output(['git', 'branch', '-D', new_branch]) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
andmos/ansible
lib/ansible/modules/cloud/amazon/redshift.py
21
19859
#!/usr/bin/python # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: - "Jens Carl (@j-carl), Hothead Games Inc." - "Rafael Driutti (@rafaeldriutti)" module: redshift version_added: "2.2" short_description: create, delete, or modify an Amazon Redshift instance description: - Creates, deletes, or modifies amazon Redshift cluster instances. options: command: description: - Specifies the action to take. required: true choices: [ 'create', 'facts', 'delete', 'modify' ] identifier: description: - Redshift cluster identifier. required: true node_type: description: - The node type of the cluster. Must be specified when command=create. choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dc2.large', 'dc2.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'] username: description: - Master database username. Used only when command=create. password: description: - Master database password. Used only when command=create. cluster_type: description: - The type of cluster. choices: ['multi-node', 'single-node' ] default: 'single-node' db_name: description: - Name of the database. availability_zone: description: - availability zone in which to launch cluster aliases: ['zone', 'aws_zone'] number_of_nodes: description: - Number of nodes. Only used when cluster_type=multi-node. cluster_subnet_group_name: description: - which subnet to place the cluster aliases: ['subnet'] cluster_security_groups: description: - in which security group the cluster belongs aliases: ['security_groups'] vpc_security_group_ids: description: - VPC security group aliases: ['vpc_security_groups'] skip_final_cluster_snapshot: description: - skip a final snapshot before deleting the cluster. Used only when command=delete. aliases: ['skip_final_snapshot'] default: 'no' version_added: "2.4" final_cluster_snapshot_identifier: description: - identifier of the final snapshot to be created before deleting the cluster. If this parameter is provided, final_cluster_snapshot_identifier must be false. Used only when command=delete. aliases: ['final_snapshot_id'] version_added: "2.4" preferred_maintenance_window: description: - maintenance window aliases: ['maintance_window', 'maint_window'] cluster_parameter_group_name: description: - name of the cluster parameter group aliases: ['param_group_name'] automated_snapshot_retention_period: description: - period when the snapshot take place aliases: ['retention_period'] port: description: - which port the cluster is listining cluster_version: description: - which version the cluster should have aliases: ['version'] choices: ['1.0'] allow_version_upgrade: description: - flag to determinate if upgrade of version is possible aliases: ['version_upgrade'] default: 'yes' publicly_accessible: description: - if the cluster is accessible publicly or not default: 'no' encrypted: description: - if the cluster is encrypted or not default: 'no' elastic_ip: description: - if the cluster has an elastic IP or not new_cluster_identifier: description: - Only used when command=modify. aliases: ['new_identifier'] wait: description: - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. type: bool default: 'no' wait_timeout: description: - how long before wait gives up, in seconds default: 300 requirements: [ 'boto3' ] extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Basic cluster provisioning example - redshift: > command=create node_type=ds1.xlarge identifier=new_cluster username=cluster_admin password=1nsecure # Cluster delete example - redshift: command: delete identifier: new_cluster skip_final_cluster_snapshot: true wait: true ''' RETURN = ''' cluster: description: dictionary containing all the cluster information returned: success type: complex contains: identifier: description: Id of the cluster. returned: success type: str sample: "new_redshift_cluster" create_time: description: Time of the cluster creation as timestamp. returned: success type: float sample: 1430158536.308 status: description: Stutus of the cluster. returned: success type: str sample: "available" db_name: description: Name of the database. returned: success type: str sample: "new_db_name" availability_zone: description: Amazon availability zone where the cluster is located. "None" until cluster is available. returned: success type: str sample: "us-east-1b" maintenance_window: description: Time frame when maintenance/upgrade are done. returned: success type: str sample: "sun:09:30-sun:10:00" private_ip_address: description: Private IP address of the main node. returned: success type: str sample: "10.10.10.10" public_ip_address: description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled. returned: success type: str sample: "0.0.0.0" port: description: Port of the cluster. "None" until cluster is available. returned: success type: int sample: 5439 url: description: FQDN of the main cluster node. "None" until cluster is available. returned: success type: str sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com" enhanced_vpc_routing: description: status of the enhanced vpc routing feature. returned: success type: boolean ''' try: import botocore except ImportError: pass # handled by AnsibleAWSModule from ansible.module_utils.ec2 import ec2_argument_spec, snake_dict_to_camel_dict from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code def _collect_facts(resource): """Transfrom cluster information to dict.""" facts = { 'identifier': resource['ClusterIdentifier'], 'status': resource['ClusterStatus'], 'username': resource['MasterUsername'], 'db_name': resource['DBName'], 'maintenance_window': resource['PreferredMaintenanceWindow'], 'enhanced_vpc_routing': resource['EnhancedVpcRouting'] } for node in resource['ClusterNodes']: if node['NodeRole'] in ('SHARED', 'LEADER'): facts['private_ip_address'] = node['PrivateIPAddress'] if facts['enhanced_vpc_routing'] is False: facts['public_ip_address'] = node['PublicIPAddress'] else: facts['public_ip_address'] = None break # Some parameters are not ready instantly if you don't wait for available # cluster status facts['create_time'] = None facts['url'] = None facts['port'] = None facts['availability_zone'] = None if resource['ClusterStatus'] != "creating": facts['create_time'] = resource['ClusterCreateTime'] facts['url'] = resource['Endpoint']['Address'] facts['port'] = resource['Endpoint']['Port'] facts['availability_zone'] = resource['AvailabilityZone'] return facts def create_cluster(module, redshift): """ Create a new cluster module: AnsibleModule object redshift: authenticated redshift connection object Returns: """ identifier = module.params.get('identifier') node_type = module.params.get('node_type') username = module.params.get('username') password = module.params.get('password') d_b_name = module.params.get('db_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') changed = True # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'publicly_accessible', 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: params['d_b_name'] = d_b_name try: redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] changed = False except is_boto3_error_code('ClusterNotFound'): try: redshift.create_cluster(ClusterIdentifier=identifier, NodeType=node_type, MasterUsername=username, MasterUserPassword=password, **snake_dict_to_camel_dict(params, capitalize_first=True)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_available') try: waiter.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") try: resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe cluster") return(changed, _collect_facts(resource)) def describe_cluster(module, redshift): """ Collect data about the cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') try: resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Error describing cluster") return(True, _collect_facts(resource)) def delete_cluster(module, redshift): """ Delete a cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') params = {} for p in ('skip_final_cluster_snapshot', 'final_cluster_snapshot_identifier'): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) try: redshift.delete_cluster( ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) ) except is_boto3_error_code('ClusterNotFound'): return(False, {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_deleted') try: waiter.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") return(True, {}) def modify_cluster(module, redshift): """ Modify an existing cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) # enhanced_vpc_routing parameter change needs an exclusive request if module.params.get('enhanced_vpc_routing') is not None: try: redshift.modify_cluster(ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get('enhanced_vpc_routing')) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_available') try: waiter.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification" ) # change the rest try: redshift.modify_cluster(ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if module.params.get('new_cluster_identifier'): identifier = module.params.get('new_cluster_identifier') if wait: attempts = wait_timeout // 60 waiter2 = redshift.get_waiter('cluster_available') try: waiter2.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster modification") try: resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier) return(True, _collect_facts(resource)) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), identifier=dict(required=True), node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(require=False), cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), cluster_security_groups=dict(aliases=['security_groups'], type='list'), vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'), skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False), final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False), cluster_subnet_group_name=dict(aliases=['subnet']), availability_zone=dict(aliases=['aws_zone', 'zone']), preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']), cluster_parameter_group_name=dict(aliases=['param_group_name']), automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'), port=dict(type='int'), cluster_version=dict(aliases=['version'], choices=['1.0']), allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), number_of_nodes=dict(type='int'), publicly_accessible=dict(type='bool', default=False), encrypted=dict(type='bool', default=False), elastic_ip=dict(required=False), new_cluster_identifier=dict(aliases=['new_identifier']), enhanced_vpc_routing=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), )) required_if = [ ('command', 'delete', ['skip_final_cluster_snapshot']), ('command', 'create', ['node_type', 'username', 'password']) ] module = AnsibleAWSModule( argument_spec=argument_spec, required_if=required_if ) command = module.params.get('command') skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot') final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier') # can't use module basic required_if check for this case if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: module.fail_json(msg="Need to specifiy final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False") conn = module.client('redshift') changed = True if command == 'create': (changed, cluster) = create_cluster(module, conn) elif command == 'facts': (changed, cluster) = describe_cluster(module, conn) elif command == 'delete': (changed, cluster) = delete_cluster(module, conn) elif command == 'modify': (changed, cluster) = modify_cluster(module, conn) module.exit_json(changed=changed, cluster=cluster) if __name__ == '__main__': main()
gpl-3.0
chushao/Gradesource-Uploader
requests/utils.py
3
17843
# -*- coding: utf-8 -*- """ requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. """ import cgi import codecs import os import platform import re import sys import zlib from netrc import netrc, NetrcParseError from . import __version__ from .compat import parse_http_list as _parse_list_header from .compat import quote, urlparse, bytes, str, OrderedDict from .cookies import RequestsCookieJar, cookiejar_from_dict _hush_pyflakes = (RequestsCookieJar,) CERTIFI_BUNDLE_PATH = None try: # see if requests's own CA certificate bundle is installed from . import certs path = certs.where() if os.path.exists(path): CERTIFI_BUNDLE_PATH = certs.where() except ImportError: pass NETRC_FILES = ('.netrc', '_netrc') # common paths for the OS's CA certificate bundle POSSIBLE_CA_BUNDLE_PATHS = [ # Red Hat, CentOS, Fedora and friends (provided by the ca-certificates package): '/etc/pki/tls/certs/ca-bundle.crt', # Ubuntu, Debian, and friends (provided by the ca-certificates package): '/etc/ssl/certs/ca-certificates.crt', # FreeBSD (provided by the ca_root_nss package): '/usr/local/share/certs/ca-root-nss.crt', # openSUSE (provided by the ca-certificates package), the 'certs' directory is the # preferred way but may not be supported by the SSL module, thus it has 'ca-bundle.pem' # as a fallback (which is generated from pem files in the 'certs' directory): '/etc/ssl/ca-bundle.pem', ] def get_os_ca_bundle_path(): """Try to pick an available CA certificate bundle provided by the OS.""" for path in POSSIBLE_CA_BUNDLE_PATHS: if os.path.exists(path): return path return None # if certifi is installed, use its CA bundle; # otherwise, try and use the OS bundle DEFAULT_CA_BUNDLE_PATH = CERTIFI_BUNDLE_PATH or get_os_ca_bundle_path() def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" if hasattr(d, 'items'): d = d.items() return d def get_netrc_auth(url): """Returns the Requests tuple auth for a given url from netrc.""" try: locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES) netrc_path = None for loc in locations: if os.path.exists(loc) and not netrc_path: netrc_path = loc # Abort early if there isn't one. if netrc_path is None: return netrc_path ri = urlparse(url) # Strip port numbers from netloc host = ri.netloc.split(':')[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password login_i = (0 if _netrc[0] else 1) return (_netrc[login_i], _netrc[2]) except (NetrcParseError, IOError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth pass # AppEngine hackiness. except (ImportError, AttributeError): pass def guess_filename(obj): """Tries to guess the filename of the given object.""" name = getattr(obj, 'name', None) if name and name[0] != '<' and name[-1] != '>': return name def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value) def to_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, dict): value = value.items() return list(value) # From mitsuhiko/werkzeug (used with permission). def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission). def parse_dict_header(value): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` """ result = {} for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result # From mitsuhiko/werkzeug (used with permission). def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. """ cookie_dict = {} for cookie in cj: cookie_dict[cookie.name] = cookie.value return cookie_dict def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. """ cj2 = cookiejar_from_dict(cookie_dict) for cookie in cj2: cj.set_cookie(cookie) return cj def get_encodings_from_content(content): """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) return charset_re.findall(content) def get_encoding_from_headers(headers): """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. """ content_type = headers.get('content-type') if not content_type: return None content_type, params = cgi.parse_header(content_type) if 'charset' in params: return params['charset'].strip("'\"") if 'text' in content_type: return 'ISO-8859-1' def stream_decode_response_unicode(iterator, r): """Stream decodes a iterator.""" if r.encoding is None: for item in iterator: yield item return decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv rv = decoder.decode('', final=True) if rv: yield rv def iter_slices(string, slice_length): """Iterate over slices of a string.""" pos = 0 while pos < len(string): yield string[pos:pos + slice_length] pos += slice_length def get_unicode_from_response(r): """Returns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. every encodings from ``<meta ... charset=XXX>`` 3. fall back and replace all unicode characters """ tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors='replace') except TypeError: return r.content def stream_decompress(iterator, mode='gzip'): """Stream decodes an iterator over compressed data :param iterator: An iterator over compressed data :param mode: 'gzip' or 'deflate' :return: An iterator over decompressed data """ if mode not in ['gzip', 'deflate']: raise ValueError('stream_decompress mode must be gzip or deflate') zlib_mode = 16 + zlib.MAX_WBITS if mode == 'gzip' else -zlib.MAX_WBITS dec = zlib.decompressobj(zlib_mode) try: for chunk in iterator: rv = dec.decompress(chunk) if rv: yield rv except zlib.error: # If there was an error decompressing, just return the raw chunk yield chunk # Continue to return the rest of the raw data for chunk in iterator: yield chunk else: # Make sure everything has been returned from the decompression object buf = dec.decompress(bytes()) rv = buf + dec.flush() if rv: yield rv def stream_untransfer(gen, resp): ce = resp.headers.get('content-encoding', '').lower() if 'gzip' in ce: gen = stream_decompress(gen, mode='gzip') elif 'deflate' in ce: gen = stream_decompress(gen, mode='deflate') return gen # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. """ parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): c = chr(int(h, 16)) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = '%' + parts[i] else: parts[i] = '%' + parts[i] return ''.join(parts) def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. """ # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, unreserved, # or '%') return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~") def get_environ_proxies(url): """Return a dict of environment proxies.""" proxy_keys = [ 'all', 'http', 'https', 'ftp', 'socks' ] get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy = get_proxy('no_proxy') if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the netloc, both with and without the port. no_proxy = no_proxy.split(',') netloc = urlparse(url).netloc for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return {} # If we get here, we either didn't have no_proxy set or we're not going # anywhere that no_proxy applies to. proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys] return dict([(key, val) for (key, val) in proxies if val]) def default_user_agent(): """Return a string representing the default user agent.""" _implementation = platform.python_implementation() if _implementation == 'CPython': _implementation_version = platform.python_version() elif _implementation == 'PyPy': _implementation_version = '%s.%s.%s' % ( sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro ) if sys.pypy_version_info.releaselevel != 'final': _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel]) elif _implementation == 'Jython': _implementation_version = platform.python_version() # Complete Guess elif _implementation == 'IronPython': _implementation_version = platform.python_version() # Complete Guess else: _implementation_version = 'Unknown' try: p_system = platform.system() p_release = platform.release() except IOError: p_system = 'Unknown' p_release = 'Unknown' return " ".join([ 'python-requests/%s' % __version__, '%s/%s' % (_implementation, _implementation_version), '%s/%s' % (p_system, p_release), ]) def default_headers(): return { 'User-Agent': default_user_agent(), 'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')), 'Accept': '*/*' } def parse_header_links(value): """Return a dict of parsed link headers proxies. i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" """ links = [] replace_chars = " '\"" for val in value.split(","): try: url, params = val.split(";", 1) except ValueError: url, params = val, '' link = {} link["url"] = url.strip("<> '\"") for param in params.split(";"): try: key,value = param.split("=") except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links # Null bytes; no need to recreate these on each call to guess_json_utf _null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 def guess_json_utf(data): # JSON always starts with two ASCII characters, so detection is as # easy as counting the nulls and from their location and count # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE): return 'utf-32' # BOM included if sample[:3] == codecs.BOM_UTF8: return 'utf-8-sig' # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): return 'utf-16' # BOM included nullcount = sample.count(_null) if nullcount == 0: return 'utf-8' if nullcount == 2: if sample[::2] == _null2: # 1st and 3rd are null return 'utf-16-be' if sample[1::2] == _null2: # 2nd and 4th are null return 'utf-16-le' # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: return 'utf-32-be' if sample[1:] == _null3: return 'utf-32-le' # Did not detect a valid UTF-32 ascii-range character return None
mit
mxOBS/deb-pkg_trusty_chromium-browser
native_client/build/update_pnacl_tool_revisions.py
1
16612
#!/usr/bin/python # Copyright (c) 2013 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import collections import datetime import email.mime.text import getpass import os import re import smtplib import subprocess import sys import tempfile BUILD_DIR = os.path.dirname(__file__) NACL_DIR = os.path.dirname(BUILD_DIR) TOOLCHAIN_REV_DIR = os.path.join(NACL_DIR, 'toolchain_revisions') PKG_VER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py') PNACL_PACKAGE = 'pnacl_newlib' def ParseArgs(args): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="""Update pnacl_newlib.json PNaCl version. LLVM and other projects are checked-in to the NaCl repository, but their head isn't necessarily the one that we currently use in PNaCl. The pnacl_newlib.json and pnacl_translator.json files point at subversion revisions to use for tools such as LLVM. Our build process then downloads pre-built tool tarballs from the toolchain build waterfall. git repository before running this script: ______________________ | | v | ...----A------B------C------D------ NaCl HEAD ^ ^ ^ ^ | | | |__ Latest pnacl_{newlib,translator}.json update. | | | | | |__ A newer LLVM change (LLVM repository HEAD). | | | |__ Oldest LLVM change since this PNaCl version. | |__ pnacl_{newlib,translator}.json points at an older LLVM change. git repository after running this script: _______________ | | v | ...----A------B------C------D------E------ NaCl HEAD Note that there could be any number of non-PNaCl changes between each of these changelists, and that the user can also decide to update the pointer to B instead of C. There is further complication when toolchain builds are merged. """) parser.add_argument('--email', metavar='ADDRESS', type=str, default=getpass.getuser()+'@chromium.org', help="Email address to send errors to.") parser.add_argument('--svn-id', metavar='SVN_ID', type=int, default=0, help="Update to a specific SVN ID instead of the most " "recent SVN ID with a PNaCl change. This value must " "be more recent than the one in the current " "pnacl_newlib.json. This option is useful when multiple " "changelists' toolchain builds were merged, or when " "too many PNaCl changes would be pulled in at the " "same time.") parser.add_argument('--dry-run', default=False, action='store_true', help="Print the changelist that would be sent, but " "don't actually send anything to review.") # TODO(jfb) The following options come from download_toolchain.py and # should be shared in some way. parser.add_argument('--filter_out_predicates', default=[], help="Toolchains to filter out.") return parser.parse_args() def ExecCommand(command): try: return subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: sys.stderr.write('\nRunning `%s` returned %i, got:\n%s\n' % (' '.join(e.cmd), e.returncode, e.output)) raise def GetCurrentRevision(): return ExecCommand([sys.executable, PKG_VER, 'getrevision', '--revision-package', PNACL_PACKAGE]).strip() def SetCurrentRevision(revision_num): ExecCommand([sys.executable, PKG_VER, 'setrevision', '--revision-set', PNACL_PACKAGE, '--revision', str(revision_num)]) def GetRevisionPackageFiles(): out = ExecCommand([sys.executable, PKG_VER, 'revpackages', '--revision-set', PNACL_PACKAGE]) package_list = [package.strip() for package in out.strip().split('\n')] return [os.path.join(TOOLCHAIN_REV_DIR, '%s.json' % package) for package in package_list] def GitCurrentBranch(): return ExecCommand(['git', 'symbolic-ref', 'HEAD', '--short']).strip() def GitStatus(): """List of statuses, one per path, of paths in the current git branch. Ignores untracked paths.""" out = ExecCommand(['git', 'status', '--porcelain']).strip().split('\n') return [f.strip() for f in out if not re.match('^\?\? (.*)$', f.strip())] def SyncSources(): """Assumes a git-svn checkout of NaCl. See: www.chromium.org/nativeclient/how-tos/how-to-use-git-svn-with-native-client """ ExecCommand(['gclient', 'sync']) def GitCommitInfo(info='', obj=None, num=None, extra=[]): """Commit information, where info is one of the shorthands in git_formats. obj can be a path or a hash. num is the number of results to return. extra is a list of optional extra arguments.""" # Shorthands for git's pretty formats. # See PRETTY FORMATS format:<string> in `git help log`. git_formats = { '': '', 'hash': '%H', 'date': '%ci', 'author': '%aN', 'subject': '%s', 'body': '%b', } cmd = ['git', 'log', '--format=format:%s' % git_formats[info]] + extra if num: cmd += ['-n'+str(num)] if obj: cmd += [obj] return ExecCommand(cmd).strip() def GitCommitsSince(date): """List of commit hashes since a particular date, in reverse chronological order.""" return GitCommitInfo(info='hash', extra=['--since="%s"' % date]).split('\n') def GitFilesChanged(commit_hash): """List of files changed in a commit.""" return GitCommitInfo(obj=commit_hash, num=1, extra=['--name-only']).split('\n') def GitChangesPath(commit_hash, path): """Returns True if the commit changes a file under the given path.""" return any([ re.search('^' + path, f.strip()) for f in GitFilesChanged(commit_hash)]) def GitBranchExists(name): return len(ExecCommand(['git', 'branch', '--list', name]).strip()) != 0 def GitCheckout(branch, force=False): """Checkout an existing branch. force throws away local changes.""" ExecCommand(['git', 'checkout'] + (['--force'] if force else []) + [branch]) def GitCheckoutNewBranch(branch): """Create and checkout a new git branch.""" ExecCommand(['git', 'checkout', '-b', branch]) def GitDeleteBranch(branch, force=False): """Force-delete a branch.""" ExecCommand(['git', 'branch', '-D' if force else '-d', branch]) def GitAdd(file): ExecCommand(['git', 'add', file]) def GitCommit(message): with tempfile.NamedTemporaryFile() as tmp: tmp.write(message) tmp.flush() ExecCommand(['git', 'commit', '--file=%s' % tmp.name]) def UploadChanges(): """Upload changes, don't prompt.""" # TODO(jfb) Using the commit queue and avoiding git try + manual commit # would be much nicer. See '--use-commit-queue' return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f']) def GitTry(): return ExecCommand(['git', 'cl', 'try']) def FindCommitWithGitSvnId(git_svn_id): while True: # This command needs to retry because git-svn partially rebuild its # revision map for every commit. Asking it a second time fixes the # issue. out = ExecCommand(['git', 'svn', 'find-rev', 'r' + git_svn_id]).strip() if not re.match('^Partial-rebuilding ', out): break return out def CommitMessageToCleanDict(commit_message): """Extract and clean commit message fields that follow the NaCl commit message convention. Don't repeat them as-is, to avoid confusing our infrastructure.""" res = {} fields = [ ['git svn id', ('\s*git-svn-id: ' 'svn://[^@]+@([0-9]+) [a-f0-9\-]+'), '<none>'], ['reviewers tbr', '\s*TBR=([^\n]+)', ''], ['reviewers', '\s*R=([^\n]+)', ''], ['review url', '\s*Review URL: *([^\n]+)', '<none>'], ['bug', '\s*BUG=([^\n]+)', '<none>'], ['test', '\s*TEST=([^\n]+)', '<none>'], ] for key, regex, none in fields: found = re.search(regex, commit_message) if found: commit_message = commit_message.replace(found.group(0), '') res[key] = found.group(1).strip() else: res[key] = none res['body'] = commit_message.strip() return res def SendEmail(user_email, out): if user_email: sys.stderr.write('\nSending email to %s.\n' % user_email) msg = email.mime.text.MIMEText(out) msg['Subject'] = '[PNaCl revision updater] failure!' msg['From'] = 'tool_revisions-bot@chromium.org' msg['To'] = user_email s = smtplib.SMTP('localhost') s.sendmail(msg['From'], [msg['To']], msg.as_string()) s.quit() else: sys.stderr.write('\nNo email address specified.') def DryRun(out): sys.stdout.write("DRY RUN: " + out + "\n") def Done(out): sys.stdout.write(out) sys.exit(0) class CLInfo: """Changelist information: sorted dictionary of NaCl-standard fields.""" def __init__(self, desc): self._desc = desc self._vals = collections.OrderedDict([ ('git svn id', None), ('hash', None), ('author', None), ('date', None), ('subject', None), ('commits since', None), ('bug', None), ('test', None), ('review url', None), ('reviewers tbr', None), ('reviewers', None), ('body', None), ]) def __getitem__(self, key): return self._vals[key] def __setitem__(self, key, val): assert key in self._vals.keys() self._vals[key] = str(val) def __str__(self): """Changelist to string. A short description of the change, e.g.: r12345: (tom@example.com) Subject of the change. If the change is itself pulling in other changes from sub-repositories then take its relevant description and append it to the string. These sub-directory updates are also script-generated and therefore have a predictable format. e.g.: r12345: (tom@example.com) Subject of the change. | dead123: (dick@example.com) Other change in another repository. | beef456: (harry@example.com) Yet another cross-repository change. """ desc = (' r' + self._vals['git svn id'] + ': (' + self._vals['author'] + ') ' + self._vals['subject']) if GitChangesPath(self._vals['hash'], 'pnacl/COMPONENT_REVISIONS'): git_hash_abbrev = '[0-9a-fA-F]{7}' email = '[^@)]+@[^)]+\.[^)]+' desc = '\n'.join([desc] + [ ' | ' + line for line in self._vals['body'].split('\n') if re.match('^ *%s: \(%s\) .*$' % (git_hash_abbrev, email), line)]) return desc def FmtOut(tr_points_at, pnacl_changes, new_svn_id, err=[], msg=[]): assert isinstance(err, list) assert isinstance(msg, list) old_svn_id = tr_points_at['git svn id'] changes = '\n'.join([str(cl) for cl in pnacl_changes]) bugs = '\n'.join(list(set( ['BUG= ' + cl['bug'].strip() if cl['bug'] else '' for cl in pnacl_changes]) - set(['']))) reviewers = ', '.join(list(set( [r.strip() for r in (','.join([ cl['author'] + ',' + cl['reviewers tbr'] + ',' + cl['reviewers'] for cl in pnacl_changes])).split(',')]) - set(['']))) return (('*** ERROR ***\n' if err else '') + '\n\n'.join(err) + '\n\n'.join(msg) + ('\n\n' if err or msg else '') + ('Update revision for PNaCl r%s->r%s\n\n' 'Pull the following PNaCl changes into NaCl:\n%s\n\n' '%s\n' 'R= %s\n' 'TEST=git try\n' 'NOTRY=true\n' '(Please LGTM this change and tick the "commit" box)\n' % (old_svn_id, new_svn_id, changes, bugs, reviewers))) def Main(): args = ParseArgs(sys.argv[1:]) tr_points_at = CLInfo('revision update points at PNaCl version') pnacl_changes = [] msg = [] branch = GitCurrentBranch() assert branch == 'master', ('Must be on branch master, currently on %s' % branch) try: status = GitStatus() assert len(status) == 0, ("Repository isn't clean:\n %s" % '\n '.join(status)) SyncSources() # The current revision file points at a specific PNaCl LLVM # version. LLVM is checked-in to the NaCl repository, but its head # isn't necessarily the one that we currently use in PNaCl. pnacl_revision = GetCurrentRevision() tr_points_at['git svn id'] = pnacl_revision tr_points_at['hash'] = FindCommitWithGitSvnId(tr_points_at['git svn id']) tr_points_at['date'] = GitCommitInfo( info='date', obj=tr_points_at['hash'], num=1) tr_points_at['subject'] = GitCommitInfo( info='subject', obj=tr_points_at['hash'], num=1) recent_commits = GitCommitsSince(tr_points_at['date']) tr_points_at['commits since'] = len(recent_commits) assert len(recent_commits) > 1 if args.svn_id and args.svn_id <= int(tr_points_at['git svn id']): Done(FmtOut(tr_points_at, pnacl_changes, args.svn_id, err=["Can't update to SVN ID r%s, the current " "PNaCl revision's SVN ID (r%s) is more recent." % (args.svn_id, tr_points_at['git svn id'])])) # Find the commits changing PNaCl files that follow the previous # PNaCl revision pointer. pnacl_pathes = ['pnacl/', 'toolchain_build/'] pnacl_hashes = list(set(reduce( lambda acc, lst: acc + lst, [[cl for cl in recent_commits[:-1] if GitChangesPath(cl, path)] for path in pnacl_pathes]))) for hash in pnacl_hashes: cl = CLInfo('PNaCl change ' + hash) cl['hash'] = hash for i in ['author', 'date', 'subject']: cl[i] = GitCommitInfo(info=i, obj=hash, num=1) for k,v in CommitMessageToCleanDict( GitCommitInfo(info='body', obj=hash, num=1)).iteritems(): cl[k] = v pnacl_changes.append(cl) # The PNaCl hashes weren't ordered chronologically, make sure the # changes are. pnacl_changes.sort(key=lambda x: int(x['git svn id'])) if args.svn_id: pnacl_changes = [cl for cl in pnacl_changes if int(cl['git svn id']) <= args.svn_id] if len(pnacl_changes) == 0: Done(FmtOut(tr_points_at, pnacl_changes, pnacl_revision, msg=['No PNaCl change since r%s.' % tr_points_at['git svn id']])) new_pnacl_revision = args.svn_id or pnacl_changes[-1]['git svn id'] new_branch_name = ('pnacl-revision-update-to-%s' % new_pnacl_revision) if GitBranchExists(new_branch_name): # TODO(jfb) Figure out if git-try succeeded, checkout the branch # and dcommit. raise Exception("Branch %s already exists, the change hasn't " "landed yet.\nPlease check trybots and dcommit it " "manually." % new_branch_name) if args.dry_run: DryRun("Would check out branch: " + new_branch_name) else: GitCheckoutNewBranch(new_branch_name) if args.dry_run: DryRun("Would update PNaCl revision to: %s" % new_pnacl_revision) else: SetCurrentRevision(new_pnacl_revision) for f in GetRevisionPackageFiles(): GitAdd(f) GitCommit(FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision)) upload_res = UploadChanges() msg += ['Upload result:\n%s' % upload_res] try_res = GitTry() msg += ['Try result:\n%s' % try_res] GitCheckout('master', force=False) Done(FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision, msg=msg)) except SystemExit as e: # Normal exit. raise except (BaseException, Exception) as e: # Leave the branch around, if any was created: it'll prevent next # runs of the cronjob from succeeding until the failure is fixed. out = FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision, msg=msg, err=['Failed at %s: %s' % (datetime.datetime.now(), e)]) sys.stderr.write(out) if not args.dry_run: SendEmail(args.email, out) GitCheckout('master', force=True) raise if __name__ == '__main__': Main()
bsd-3-clause
carljm/django
django/utils/version.py
115
2455
from __future__ import unicode_literals import datetime import os import subprocess from django.utils.lru_cache import lru_cache def get_version(version=None): "Returns a PEP 440-compliant version number from VERSION." version = get_complete_version(version) # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|rc}N - for alpha, beta, and rc releases main = get_main_version(version) sub = '' if version[3] == 'alpha' and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: sub = '.dev%s' % git_changeset elif version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} sub = mapping[version[3]] + str(version[4]) return str(main + sub) def get_main_version(version=None): "Returns main version (X.Y[.Z]) from VERSION." version = get_complete_version(version) parts = 2 if version[2] == 0 else 3 return '.'.join(str(x) for x in version[:parts]) def get_complete_version(version=None): """Returns a tuple of the django version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version is None: from django import VERSION as version else: assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') return version def get_docs_version(version=None): version = get_complete_version(version) if version[3] != 'final': return 'dev' else: return '%d.%d' % version[:2] @lru_cache() def get_git_changeset(): """Returns a numeric identifier of the latest git changeset. The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. This value isn't guaranteed to be unique, but collisions are very unlikely, so it's sufficient for generating the development version numbers. """ repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) git_log = subprocess.Popen( 'git log --pretty=format:%ct --quiet -1 HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=repo_dir, universal_newlines=True, ) timestamp = git_log.communicate()[0] try: timestamp = datetime.datetime.utcfromtimestamp(int(timestamp)) except ValueError: return None return timestamp.strftime('%Y%m%d%H%M%S')
bsd-3-clause
Venturi/cms
env/lib/python2.7/site-packages/phonenumbers/shortdata/region_JE.py
11
1065
"""Auto-generated file, do not edit by hand. JE metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_JE = PhoneMetadata(id='JE', country_code=None, international_prefix=None, general_desc=PhoneNumberDesc(national_number_pattern='[129]\\d{2,5}', possible_number_pattern='\\d{3,6}'), toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), emergency=PhoneNumberDesc(national_number_pattern='112|999', possible_number_pattern='\\d{3}', example_number='999'), short_code=PhoneNumberDesc(national_number_pattern='1(?:00|1(?:2|8\\d{3})|23|4(?:[14]|28|7\\d)|5\\d|7(?:0[12]|[128]|35?)|808|9[135])|23[234]|999', possible_number_pattern='\\d{3,6}', example_number='150'), standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), short_data=True)
gpl-2.0
BoraALAP/ercu_site
node_modules/node-gyp/gyp/pylib/gyp/win_tool.py
1417
12751
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions for Windows builds. These functions are executed via gyp-win-tool when using the ninja generator. """ import os import re import shutil import subprocess import stat import string import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # A regex matching an argument corresponding to the output filename passed to # link.exe. _LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE) def main(args): executor = WinTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class WinTool(object): """This class performs all the Windows tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def _UseSeparateMspdbsrv(self, env, args): """Allows to use a unique instance of mspdbsrv.exe per linker instead of a shared one.""" if len(args) < 1: raise Exception("Not enough arguments") if args[0] != 'link.exe': return # Use the output filename passed to the linker to generate an endpoint name # for mspdbsrv.exe. endpoint_name = None for arg in args: m = _LINK_EXE_OUT_ARG.match(arg) if m: endpoint_name = re.sub(r'\W+', '', '%s_%d' % (m.group('out'), os.getpid())) break if endpoint_name is None: return # Adds the appropriate environment variable. This will be read by link.exe # to know which instance of mspdbsrv.exe it should connect to (if it's # not set then the default endpoint is used). env['_MSPDBSRV_ENDPOINT_'] = endpoint_name def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like recursive-mirror to RecursiveMirror.""" return name_string.title().replace('-', '') def _GetEnv(self, arch): """Gets the saved environment from a file for a given architecture.""" # The environment is saved as an "environment block" (see CreateProcess # and msvs_emulation for details). We convert to a dict here. # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. pairs = open(arch).read()[:-2].split('\0') kvs = [item.split('=', 1) for item in pairs] return dict(kvs) def ExecStamp(self, path): """Simple stamp command.""" open(path, 'w').close() def ExecRecursiveMirror(self, source, dest): """Emulation of rm -rf out && cp -af in out.""" if os.path.exists(dest): if os.path.isdir(dest): def _on_error(fn, path, excinfo): # The operation failed, possibly because the file is set to # read-only. If that's why, make it writable and try the op again. if not os.access(path, os.W_OK): os.chmod(path, stat.S_IWRITE) fn(path) shutil.rmtree(dest, onerror=_on_error) else: if not os.access(dest, os.W_OK): # Attempt to make the file writable before deleting it. os.chmod(dest, stat.S_IWRITE) os.unlink(dest) if os.path.isdir(source): shutil.copytree(source, dest) else: shutil.copy2(source, dest) def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args): """Filter diagnostic output from link that looks like: ' Creating library ui.dll.lib and object ui.dll.exp' This happens when there are exports from the dll or exe. """ env = self._GetEnv(arch) if use_separate_mspdbsrv == 'True': self._UseSeparateMspdbsrv(env, args) link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]), shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = link.communicate() for line in out.splitlines(): if (not line.startswith(' Creating library ') and not line.startswith('Generating code') and not line.startswith('Finished generating code')): print line return link.returncode def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname, mt, rc, intermediate_manifest, *manifests): """A wrapper for handling creating a manifest resource and then executing a link command.""" # The 'normal' way to do manifests is to have link generate a manifest # based on gathering dependencies from the object files, then merge that # manifest with other manifests supplied as sources, convert the merged # manifest to a resource, and then *relink*, including the compiled # version of the manifest resource. This breaks incremental linking, and # is generally overly complicated. Instead, we merge all the manifests # provided (along with one that includes what would normally be in the # linker-generated one, see msvs_emulation.py), and include that into the # first and only link. We still tell link to generate a manifest, but we # only use that to assert that our simpler process did not miss anything. variables = { 'python': sys.executable, 'arch': arch, 'out': out, 'ldcmd': ldcmd, 'resname': resname, 'mt': mt, 'rc': rc, 'intermediate_manifest': intermediate_manifest, 'manifests': ' '.join(manifests), } add_to_ld = '' if manifests: subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(manifests)s -out:%(out)s.manifest' % variables) if embed_manifest == 'True': subprocess.check_call( '%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest' ' %(out)s.manifest.rc %(resname)s' % variables) subprocess.check_call( '%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s ' '%(out)s.manifest.rc' % variables) add_to_ld = ' %(out)s.manifest.res' % variables subprocess.check_call(ldcmd + add_to_ld) # Run mt.exe on the theoretically complete manifest we generated, merging # it with the one the linker generated to confirm that the linker # generated one does not add anything. This is strictly unnecessary for # correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not # used in a #pragma comment. if manifests: # Merge the intermediate one with ours to .assert.manifest, then check # that .assert.manifest is identical to ours. subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(out)s.manifest %(intermediate_manifest)s ' '-out:%(out)s.assert.manifest' % variables) assert_manifest = '%(out)s.assert.manifest' % variables our_manifest = '%(out)s.manifest' % variables # Load and normalize the manifests. mt.exe sometimes removes whitespace, # and sometimes doesn't unfortunately. with open(our_manifest, 'rb') as our_f: with open(assert_manifest, 'rb') as assert_f: our_data = our_f.read().translate(None, string.whitespace) assert_data = assert_f.read().translate(None, string.whitespace) if our_data != assert_data: os.unlink(out) def dump(filename): sys.stderr.write('%s\n-----\n' % filename) with open(filename, 'rb') as f: sys.stderr.write(f.read() + '\n-----\n') dump(intermediate_manifest) dump(our_manifest) dump(assert_manifest) sys.stderr.write( 'Linker generated manifest "%s" added to final manifest "%s" ' '(result in "%s"). ' 'Were /MANIFEST switches used in #pragma statements? ' % ( intermediate_manifest, our_manifest, assert_manifest)) return 1 def ExecManifestWrapper(self, arch, *args): """Run manifest tool with environment set. Strip out undesirable warning (some XML blocks are recognized by the OS loader, but not the manifest tool).""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if line and 'manifest authoring warning 81010002' not in line: print line return popen.returncode def ExecManifestToRc(self, arch, *args): """Creates a resource file pointing a SxS assembly manifest. |args| is tuple containing path to resource file, path to manifest file and resource name which can be "1" (for executables) or "2" (for DLLs).""" manifest_path, resource_path, resource_name = args with open(resource_path, 'wb') as output: output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % ( resource_name, os.path.abspath(manifest_path).replace('\\', '/'))) def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl, *flags): """Filter noisy filenames output from MIDL compile step that isn't quietable via command line flags. """ args = ['midl', '/nologo'] + list(flags) + [ '/out', outdir, '/tlb', tlb, '/h', h, '/dlldata', dlldata, '/iid', iid, '/proxy', proxy, idl] env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() # Filter junk out of stdout, and write filtered versions. Output we want # to filter is pairs of lines that look like this: # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl # objidl.idl lines = out.splitlines() prefixes = ('Processing ', '64 bit Processing ') processing = set(os.path.basename(x) for x in lines if x.startswith(prefixes)) for line in lines: if not line.startswith(prefixes) and line not in processing: print line return popen.returncode def ExecAsmWrapper(self, arch, *args): """Filter logo banner from invocations of asm.exe.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Copyright (C) Microsoft Corporation') and not line.startswith('Microsoft (R) Macro Assembler') and not line.startswith(' Assembling: ') and line): print line return popen.returncode def ExecRcWrapper(self, arch, *args): """Filter logo banner from invocations of rc.exe. Older versions of RC don't support the /nologo flag.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and not line.startswith('Copyright (C) Microsoft Corporation') and line): print line return popen.returncode def ExecActionWrapper(self, arch, rspfile, *dir): """Runs an action command line from a response file using the environment for |arch|. If |dir| is supplied, use that as the working directory.""" env = self._GetEnv(arch) # TODO(scottmg): This is a temporary hack to get some specific variables # through to actions that are set after gyp-time. http://crbug.com/333738. for k, v in os.environ.iteritems(): if k not in env: env[k] = v args = open(rspfile).read() dir = dir[0] if dir else None return subprocess.call(args, shell=True, env=env, cwd=dir) def ExecClCompile(self, project_dir, selected_files): """Executed by msvs-ninja projects when the 'ClCompile' target is used to build selected C/C++ files.""" project_dir = os.path.relpath(project_dir, BASE_DIR) selected_files = selected_files.split(';') ninja_targets = [os.path.join(project_dir, filename) + '^^' for filename in selected_files] cmd = ['ninja.exe'] cmd.extend(ninja_targets) return subprocess.call(cmd, shell=True, cwd=BASE_DIR) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mit
numerigraphe/odoo
addons/mrp/wizard/mrp_price.py
381
2132
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class mrp_price(osv.osv_memory): _name = 'mrp.product_price' _description = 'Product Price' _columns = { 'number': fields.integer('Quantity', required=True, help="Specify quantity of products to produce or buy. Report of Cost structure will be displayed base on this quantity."), } _defaults = { 'number': 1, } def print_report(self, cr, uid, ids, context=None): """ To print the report of Product cost structure @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return : Report """ if context is None: context = {} datas = {'ids' : context.get('active_ids',[])} res = self.read(cr, uid, ids, ['number']) res = res and res[0] or {} datas['form'] = res return { 'type' : 'ir.actions.report.xml', 'report_name':'product.price', 'datas' : datas, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
brengarajalu/GrpcAPI
protobuf/protobuf-3.0.0-alpha-2/gtest/scripts/gen_gtest_pred_impl.py
2538
21986
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """gen_gtest_pred_impl.py v0.1 Generates the implementation of Google Test predicate assertions and accompanying tests. Usage: gen_gtest_pred_impl.py MAX_ARITY where MAX_ARITY is a positive integer. The command generates the implementation of up-to MAX_ARITY-ary predicate assertions, and writes it to file gtest_pred_impl.h in the directory where the script is. It also generates the accompanying unit test in file gtest_pred_impl_unittest.cc. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import sys import time # Where this script is. SCRIPT_DIR = os.path.dirname(sys.argv[0]) # Where to store the generated header. HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h') # Where to store the generated unit test. UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc') def HeaderPreamble(n): """Returns the preamble for the header file. Args: n: the maximum arity of the predicate macros to be generated. """ # A map that defines the values used in the preamble template. DEFS = { 'today' : time.strftime('%m/%d/%Y'), 'year' : time.strftime('%Y'), 'command' : '%s %s' % (os.path.basename(sys.argv[0]), n), 'n' : n } return ( """// Copyright 2006, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // This file is AUTOMATICALLY GENERATED on %(today)s by command // '%(command)s'. DO NOT EDIT BY HAND! // // Implements a family of generic predicate assertion macros. #ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ #define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ // Makes sure this header is not included before gtest.h. #ifndef GTEST_INCLUDE_GTEST_GTEST_H_ # error Do not include gtest_pred_impl.h directly. Include gtest.h instead. #endif // GTEST_INCLUDE_GTEST_GTEST_H_ // This header implements a family of generic predicate assertion // macros: // // ASSERT_PRED_FORMAT1(pred_format, v1) // ASSERT_PRED_FORMAT2(pred_format, v1, v2) // ... // // where pred_format is a function or functor that takes n (in the // case of ASSERT_PRED_FORMATn) values and their source expression // text, and returns a testing::AssertionResult. See the definition // of ASSERT_EQ in gtest.h for an example. // // If you don't care about formatting, you can use the more // restrictive version: // // ASSERT_PRED1(pred, v1) // ASSERT_PRED2(pred, v1, v2) // ... // // where pred is an n-ary function or functor that returns bool, // and the values v1, v2, ..., must support the << operator for // streaming to std::ostream. // // We also define the EXPECT_* variations. // // For now we only support predicates whose arity is at most %(n)s. // Please email googletestframework@googlegroups.com if you need // support for higher arities. // GTEST_ASSERT_ is the basic statement to which all of the assertions // in this file reduce. Don't use this in your code. #define GTEST_ASSERT_(expression, on_failure) \\ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\ if (const ::testing::AssertionResult gtest_ar = (expression)) \\ ; \\ else \\ on_failure(gtest_ar.failure_message()) """ % DEFS) def Arity(n): """Returns the English name of the given arity.""" if n < 0: return None elif n <= 3: return ['nullary', 'unary', 'binary', 'ternary'][n] else: return '%s-ary' % n def Title(word): """Returns the given word in title case. The difference between this and string's title() method is that Title('4-ary') is '4-ary' while '4-ary'.title() is '4-Ary'.""" return word[0].upper() + word[1:] def OneTo(n): """Returns the list [1, 2, 3, ..., n].""" return range(1, n + 1) def Iter(n, format, sep=''): """Given a positive integer n, a format string that contains 0 or more '%s' format specs, and optionally a separator string, returns the join of n strings, each formatted with the format string on an iterator ranged from 1 to n. Example: Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'. """ # How many '%s' specs are in format? spec_count = len(format.split('%s')) - 1 return sep.join([format % (spec_count * (i,)) for i in OneTo(n)]) def ImplementationForArity(n): """Returns the implementation of n-ary predicate assertions.""" # A map the defines the values used in the implementation template. DEFS = { 'n' : str(n), 'vs' : Iter(n, 'v%s', sep=', '), 'vts' : Iter(n, '#v%s', sep=', '), 'arity' : Arity(n), 'Arity' : Title(Arity(n)) } impl = """ // Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use // this in your code. template <typename Pred""" % DEFS impl += Iter(n, """, typename T%s""") impl += """> AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS impl += Iter(n, """, const char* e%s""") impl += """, Pred pred""" impl += Iter(n, """, const T%s& v%s""") impl += """) { if (pred(%(vs)s)) return AssertionSuccess(); """ % DEFS impl += ' return AssertionFailure() << pred_text << "("' impl += Iter(n, """ << e%s""", sep=' << ", "') impl += ' << ") evaluates to false, where"' impl += Iter(n, """ << "\\n" << e%s << " evaluates to " << v%s""") impl += """; } // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s. // Don't use this in your code. #define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\ GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use // this in your code. #define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\ GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS impl += Iter(n, """, \\ #v%s""") impl += """, \\ pred""" impl += Iter(n, """, \\ v%s""") impl += """), on_failure) // %(Arity)s predicate assertion macros. #define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_) #define EXPECT_PRED%(n)s(pred, %(vs)s) \\ GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_) #define ASSERT_PRED%(n)s(pred, %(vs)s) \\ GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_) """ % DEFS return impl def HeaderPostamble(): """Returns the postamble for the header file.""" return """ #endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ """ def GenerateFile(path, content): """Given a file path and a content string, overwrites it with the given content.""" print 'Updating file %s . . .' % path f = file(path, 'w+') print >>f, content, f.close() print 'File %s has been updated.' % path def GenerateHeader(n): """Given the maximum arity n, updates the header file that implements the predicate assertions.""" GenerateFile(HEADER, HeaderPreamble(n) + ''.join([ImplementationForArity(i) for i in OneTo(n)]) + HeaderPostamble()) def UnitTestPreamble(): """Returns the preamble for the unit test file.""" # A map that defines the values used in the preamble template. DEFS = { 'today' : time.strftime('%m/%d/%Y'), 'year' : time.strftime('%Y'), 'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]), } return ( """// Copyright 2006, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // This file is AUTOMATICALLY GENERATED on %(today)s by command // '%(command)s'. DO NOT EDIT BY HAND! // Regression test for gtest_pred_impl.h // // This file is generated by a script and quite long. If you intend to // learn how Google Test works by reading its unit tests, read // gtest_unittest.cc instead. // // This is intended as a regression test for the Google Test predicate // assertions. We compile it as part of the gtest_unittest target // only to keep the implementation tidy and compact, as it is quite // involved to set up the stage for testing Google Test using Google // Test itself. // // Currently, gtest_unittest takes ~11 seconds to run in the testing // daemon. In the future, if it grows too large and needs much more // time to finish, we should consider separating this file into a // stand-alone regression test. #include <iostream> #include "gtest/gtest.h" #include "gtest/gtest-spi.h" // A user-defined data type. struct Bool { explicit Bool(int val) : value(val != 0) {} bool operator>(int n) const { return value > Bool(n).value; } Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); } bool operator==(const Bool& rhs) const { return value == rhs.value; } bool value; }; // Enables Bool to be used in assertions. std::ostream& operator<<(std::ostream& os, const Bool& x) { return os << (x.value ? "true" : "false"); } """ % DEFS) def TestsForArity(n): """Returns the tests for n-ary predicate assertions.""" # A map that defines the values used in the template for the tests. DEFS = { 'n' : n, 'es' : Iter(n, 'e%s', sep=', '), 'vs' : Iter(n, 'v%s', sep=', '), 'vts' : Iter(n, '#v%s', sep=', '), 'tvs' : Iter(n, 'T%s v%s', sep=', '), 'int_vs' : Iter(n, 'int v%s', sep=', '), 'Bool_vs' : Iter(n, 'Bool v%s', sep=', '), 'types' : Iter(n, 'typename T%s', sep=', '), 'v_sum' : Iter(n, 'v%s', sep=' + '), 'arity' : Arity(n), 'Arity' : Title(Arity(n)), } tests = ( """// Sample functions/functors for testing %(arity)s predicate assertions. // A %(arity)s predicate function. template <%(types)s> bool PredFunction%(n)s(%(tvs)s) { return %(v_sum)s > 0; } // The following two functions are needed to circumvent a bug in // gcc 2.95.3, which sometimes has problem with the above template // function. bool PredFunction%(n)sInt(%(int_vs)s) { return %(v_sum)s > 0; } bool PredFunction%(n)sBool(%(Bool_vs)s) { return %(v_sum)s > 0; } """ % DEFS) tests += """ // A %(arity)s predicate functor. struct PredFunctor%(n)s { template <%(types)s> bool operator()(""" % DEFS tests += Iter(n, 'const T%s& v%s', sep=""", """) tests += """) { return %(v_sum)s > 0; } }; """ % DEFS tests += """ // A %(arity)s predicate-formatter function. template <%(types)s> testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS tests += Iter(n, 'const char* e%s', sep=""", """) tests += Iter(n, """, const T%s& v%s""") tests += """) { if (PredFunction%(n)s(%(vs)s)) return testing::AssertionSuccess(); return testing::AssertionFailure() << """ % DEFS tests += Iter(n, 'e%s', sep=' << " + " << ') tests += """ << " is expected to be positive, but evaluates to " << %(v_sum)s << "."; } """ % DEFS tests += """ // A %(arity)s predicate-formatter functor. struct PredFormatFunctor%(n)s { template <%(types)s> testing::AssertionResult operator()(""" % DEFS tests += Iter(n, 'const char* e%s', sep=""", """) tests += Iter(n, """, const T%s& v%s""") tests += """) const { return PredFormatFunction%(n)s(%(es)s, %(vs)s); } }; """ % DEFS tests += """ // Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s. class Predicate%(n)sTest : public testing::Test { protected: virtual void SetUp() { expected_to_finish_ = true; finished_ = false;""" % DEFS tests += """ """ + Iter(n, 'n%s_ = ') + """0; } """ tests += """ virtual void TearDown() { // Verifies that each of the predicate's arguments was evaluated // exactly once.""" tests += ''.join([""" EXPECT_EQ(1, n%s_) << "The predicate assertion didn't evaluate argument %s " "exactly once.";""" % (i, i + 1) for i in OneTo(n)]) tests += """ // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { FAIL() << "The predicate assertion unexpactedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; } } // true iff the test function is expected to run to finish. static bool expected_to_finish_; // true iff the test function did run to finish. static bool finished_; """ % DEFS tests += Iter(n, """ static int n%s_;""") tests += """ }; bool Predicate%(n)sTest::expected_to_finish_; bool Predicate%(n)sTest::finished_; """ % DEFS tests += Iter(n, """int Predicate%%(n)sTest::n%s_; """) % DEFS tests += """ typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest; typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest; typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest; typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest; """ % DEFS def GenTest(use_format, use_assert, expect_failure, use_functor, use_user_type): """Returns the test for a predicate assertion macro. Args: use_format: true iff the assertion is a *_PRED_FORMAT*. use_assert: true iff the assertion is a ASSERT_*. expect_failure: true iff the assertion is expected to fail. use_functor: true iff the first argument of the assertion is a functor (as opposed to a function) use_user_type: true iff the predicate functor/function takes argument(s) of a user-defined type. Example: GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior of a successful EXPECT_PRED_FORMATn() that takes a functor whose arguments have built-in types.""" if use_assert: assrt = 'ASSERT' # 'assert' is reserved, so we cannot use # that identifier here. else: assrt = 'EXPECT' assertion = assrt + '_PRED' if use_format: pred_format = 'PredFormat' assertion += '_FORMAT' else: pred_format = 'Pred' assertion += '%(n)s' % DEFS if use_functor: pred_format_type = 'functor' pred_format += 'Functor%(n)s()' else: pred_format_type = 'function' pred_format += 'Function%(n)s' if not use_format: if use_user_type: pred_format += 'Bool' else: pred_format += 'Int' test_name = pred_format_type.title() if use_user_type: arg_type = 'user-defined type (Bool)' test_name += 'OnUserType' if expect_failure: arg = 'Bool(n%s_++)' else: arg = 'Bool(++n%s_)' else: arg_type = 'built-in type (int)' test_name += 'OnBuiltInType' if expect_failure: arg = 'n%s_++' else: arg = '++n%s_' if expect_failure: successful_or_failed = 'failed' expected_or_not = 'expected.' test_name += 'Failure' else: successful_or_failed = 'successful' expected_or_not = 'UNEXPECTED!' test_name += 'Success' # A map that defines the values used in the test template. defs = DEFS.copy() defs.update({ 'assert' : assrt, 'assertion' : assertion, 'test_name' : test_name, 'pf_type' : pred_format_type, 'pf' : pred_format, 'arg_type' : arg_type, 'arg' : arg, 'successful' : successful_or_failed, 'expected' : expected_or_not, }) test = """ // Tests a %(successful)s %(assertion)s where the // predicate-formatter is a %(pf_type)s on a %(arg_type)s. TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs indent = (len(assertion) + 3)*' ' extra_indent = '' if expect_failure: extra_indent = ' ' if use_assert: test += """ expected_to_finish_ = false; EXPECT_FATAL_FAILURE({ // NOLINT""" else: test += """ EXPECT_NONFATAL_FAILURE({ // NOLINT""" test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs test = test % defs test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs) test += ');\n' + extra_indent + ' finished_ = true;\n' if expect_failure: test += ' }, "");\n' test += '}\n' return test # Generates tests for all 2**6 = 64 combinations. tests += ''.join([GenTest(use_format, use_assert, expect_failure, use_functor, use_user_type) for use_format in [0, 1] for use_assert in [0, 1] for expect_failure in [0, 1] for use_functor in [0, 1] for use_user_type in [0, 1] ]) return tests def UnitTestPostamble(): """Returns the postamble for the tests.""" return '' def GenerateUnitTest(n): """Returns the tests for up-to n-ary predicate assertions.""" GenerateFile(UNIT_TEST, UnitTestPreamble() + ''.join([TestsForArity(i) for i in OneTo(n)]) + UnitTestPostamble()) def _Main(): """The entry point of the script. Generates the header file and its unit test.""" if len(sys.argv) != 2: print __doc__ print 'Author: ' + __author__ sys.exit(1) n = int(sys.argv[1]) GenerateHeader(n) GenerateUnitTest(n) if __name__ == '__main__': _Main()
bsd-3-clause
feer56/Kitsune2
kitsune/kpi/surveygizmo_utils.py
13
4648
import json from datetime import timedelta from django.conf import settings import requests SURVEYS = { 'general': { # This is for users browsing the KB and navigation pages. 'email_collection_survey_id': 1002970, 'exit_survey_id': 991425, 'exit_survey_campaign_id': 878533, }, 'questions': { # This is for users that are browsing questions. 'email_collection_survey_id': 1717268, 'exit_survey_id': 1724445, 'exit_survey_campaign_id': 1687339, }, 'askers': { # This is for users that asked a question 2 days ago. 'exit_survey_id': 1817790, 'exit_survey_campaign_id': 1876443, }, 'kb-firefox-android': { # This is for KB users looking at Firefox for Android pages. 'email_collection_survey_id': 1983780, 'exit_survey_id': 1979872, 'exit_survey_campaign_id': 2208951, }, } def get_email_addresses(survey, startdate, enddate): """Get the email addresses collected between startdate and enddate.""" user = settings.SURVEYGIZMO_USER password = settings.SURVEYGIZMO_PASSWORD emails = [] page = 1 more_pages = True survey_id = SURVEYS[survey]['email_collection_survey_id'] while more_pages: response = requests.get( 'https://restapi.surveygizmo.com/v2/survey/{survey}' '/surveyresponse?' 'filter[field][0]=datesubmitted' '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0' 'filter[field][1]=datesubmitted' '&filter[operator][1]=<&filter[value][1]={end}+0:0:0' '&filter[field][2]=status&filter[operator][2]==' '&filter[value][2]=Complete' '&resultsperpage=500' '&page={page}' '&user:pass={user}:{password}'.format( survey=survey_id, start=startdate, end=enddate, page=page, user=user, password=password), timeout=300) results = json.loads(response.content) total_pages = results['total_pages'] more_pages = page < total_pages emails = emails + [r['[question(13)]'] for r in results['data']] page += 1 return emails def add_email_to_campaign(survey, email): """Add email to the exit survey campaign.""" user = settings.SURVEYGIZMO_USER password = settings.SURVEYGIZMO_PASSWORD survey_id = SURVEYS[survey]['exit_survey_id'] campaign_id = SURVEYS[survey]['exit_survey_campaign_id'] try: requests.put( 'https://restapi.surveygizmo.com/v2/survey/{survey}' '/surveycampaign/{campaign}/contact?' 'semailaddress={email}' '&user:pass={user}:{password}'.format( survey=survey_id, campaign=campaign_id, email=email, user=user, password=password), timeout=30) except requests.exceptions.Timeout: print 'Timedout adding: %s' % email def get_exit_survey_results(survey, date): """Collect and aggregate the exit survey results for the date.""" user = settings.SURVEYGIZMO_USER password = settings.SURVEYGIZMO_PASSWORD answers = [] page = 1 more_pages = True survey_id = SURVEYS[survey]['exit_survey_id'] while more_pages: response = requests.get( 'https://restapi.surveygizmo.com/v2/survey/{survey}' '/surveyresponse?' 'filter[field][0]=datesubmitted' '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0' '&filter[field][1]=datesubmitted' '&filter[operator][1]=<&filter[value][1]={end}+0:0:0' '&filter[field][2]=status&filter[operator][2]==' '&filter[value][2]=Complete' '&resultsperpage=500' '&page={page}' '&user:pass={user}:{password}'.format( survey=survey_id, start=date, end=date + timedelta(days=1), page=page, user=user, password=password), timeout=300) results = json.loads(response.content) total_pages = results.get('total_pages', 0) more_pages = page < total_pages answers = answers + [r.get('[question(2)]') for r in results.get('data', [])] page += 1 # Aggregate results. summary = { 'yes': 0, 'no': 0, 'dont-know': 0, } for answer in answers: lower_stripped = answer.lower().strip() if lower_stripped in ['no', 'yes']: summary[lower_stripped] += 1 else: summary['dont-know'] += 1 return summary
bsd-3-clause
b-me/django
tests/model_options/test_tablespaces.py
337
5389
from __future__ import unicode_literals from django.apps import apps from django.conf import settings from django.db import connection from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models.tablespaces import ( Article, ArticleRef, Authors, Reviewers, Scientist, ScientistRef, ) def sql_for_table(model): with connection.schema_editor(collect_sql=True) as editor: editor.create_model(model) return editor.collected_sql[0] def sql_for_index(model): return '\n'.join(connection.schema_editor()._model_indexes_sql(model)) # We can't test the DEFAULT_TABLESPACE and DEFAULT_INDEX_TABLESPACE settings # because they're evaluated when the model class is defined. As a consequence, # @override_settings doesn't work, and the tests depend class TablespacesTests(TestCase): def setUp(self): # The unmanaged models need to be removed after the test in order to # prevent bad interactions with the flush operation in other tests. self._old_models = apps.app_configs['model_options'].models.copy() for model in Article, Authors, Reviewers, Scientist: model._meta.managed = True def tearDown(self): for model in Article, Authors, Reviewers, Scientist: model._meta.managed = False apps.app_configs['model_options'].models = self._old_models apps.all_models['model_options'] = self._old_models apps.clear_cache() def assertNumContains(self, haystack, needle, count): real_count = haystack.count(needle) self.assertEqual(real_count, count, "Found %d instances of '%s', " "expected %d" % (real_count, needle, count)) @skipUnlessDBFeature('supports_tablespaces') def test_tablespace_for_model(self): sql = sql_for_table(Scientist).lower() if settings.DEFAULT_INDEX_TABLESPACE: # 1 for the table self.assertNumContains(sql, 'tbl_tbsp', 1) # 1 for the index on the primary key self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1) else: # 1 for the table + 1 for the index on the primary key self.assertNumContains(sql, 'tbl_tbsp', 2) @skipIfDBFeature('supports_tablespaces') def test_tablespace_ignored_for_model(self): # No tablespace-related SQL self.assertEqual(sql_for_table(Scientist), sql_for_table(ScientistRef)) @skipUnlessDBFeature('supports_tablespaces') def test_tablespace_for_indexed_field(self): sql = sql_for_table(Article).lower() if settings.DEFAULT_INDEX_TABLESPACE: # 1 for the table self.assertNumContains(sql, 'tbl_tbsp', 1) # 1 for the primary key + 1 for the index on code self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2) else: # 1 for the table + 1 for the primary key + 1 for the index on code self.assertNumContains(sql, 'tbl_tbsp', 3) # 1 for the index on reference self.assertNumContains(sql, 'idx_tbsp', 1) @skipIfDBFeature('supports_tablespaces') def test_tablespace_ignored_for_indexed_field(self): # No tablespace-related SQL self.assertEqual(sql_for_table(Article), sql_for_table(ArticleRef)) @skipUnlessDBFeature('supports_tablespaces') def test_tablespace_for_many_to_many_field(self): sql = sql_for_table(Authors).lower() # The join table of the ManyToManyField goes to the model's tablespace, # and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set. if settings.DEFAULT_INDEX_TABLESPACE: # 1 for the table self.assertNumContains(sql, 'tbl_tbsp', 1) # 1 for the primary key self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1) else: # 1 for the table + 1 for the index on the primary key self.assertNumContains(sql, 'tbl_tbsp', 2) self.assertNumContains(sql, 'idx_tbsp', 0) sql = sql_for_index(Authors).lower() # The ManyToManyField declares no db_tablespace, its indexes go to # the model's tablespace, unless DEFAULT_INDEX_TABLESPACE is set. if settings.DEFAULT_INDEX_TABLESPACE: self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2) else: self.assertNumContains(sql, 'tbl_tbsp', 2) self.assertNumContains(sql, 'idx_tbsp', 0) sql = sql_for_table(Reviewers).lower() # The join table of the ManyToManyField goes to the model's tablespace, # and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set. if settings.DEFAULT_INDEX_TABLESPACE: # 1 for the table self.assertNumContains(sql, 'tbl_tbsp', 1) # 1 for the primary key self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1) else: # 1 for the table + 1 for the index on the primary key self.assertNumContains(sql, 'tbl_tbsp', 2) self.assertNumContains(sql, 'idx_tbsp', 0) sql = sql_for_index(Reviewers).lower() # The ManyToManyField declares db_tablespace, its indexes go there. self.assertNumContains(sql, 'tbl_tbsp', 0) self.assertNumContains(sql, 'idx_tbsp', 2)
bsd-3-clause
Pedram26/Humans-vs-Aliens
HumansAliens.app/Contents/Resources/lib/python2.7/pygame/midi.py
23
20840
"""pygame.midi pygame module for interacting with midi input and output. The midi module can send output to midi devices, and get input from midi devices. It can also list midi devices on the system. Including real midi devices, and virtual ones. It uses the portmidi library. Is portable to which ever platforms portmidi supports (currently windows, OSX, and linux). This uses pyportmidi for now, but may use its own bindings at some point in the future. The pyportmidi bindings are included with pygame. New in pygame 1.9.0. """ #TODO: # - finish writing tests. # - likely as interactive tests... so you'd need to plug in a midi device. # - create a background thread version for input threads. # - that can automatically inject input into the event queue # once the input object is running. Like joysticks. import pygame import pygame.locals import atexit # MIDIIN = pygame.locals.USEREVENT + 10 MIDIOUT = pygame.locals.USEREVENT + 11 _init = False _pypm = None __all__ = [ "Input", "MIDIIN", "MIDIOUT", "MidiException", "Output", "get_count", "get_default_input_id", "get_default_output_id", "get_device_info", "init", "midis2events", "quit", "time", ] __theclasses__ = ["Input", "Output"] def init(): """initialize the midi module pygame.midi.init(): return None Call the initialisation function before using the midi module. It is safe to call this more than once. """ global _init, _pypm if not _init: import pygame.pypm _pypm = pygame.pypm _pypm.Initialize() _init = True atexit.register(quit) def quit(): """uninitialize the midi module pygame.midi.quit(): return None Called automatically atexit if you don't call it. It is safe to call this function more than once. """ global _init, _pypm if _init: # TODO: find all Input and Output classes and close them first? _pypm.Terminate() _init = False del _pypm #del pygame._pypm def _check_init(): if not _init: raise RuntimeError("pygame.midi not initialised.") def get_count(): """gets the number of devices. pygame.midi.get_count(): return num_devices Device ids range from 0 to get_count() -1 """ _check_init() return _pypm.CountDevices() def get_default_input_id(): """gets default input device number pygame.midi.get_default_input_id(): return default_id Return the default device ID or -1 if there are no devices. The result can be passed to the Input()/Ouput() class. On the PC, the user can specify a default device by setting an environment variable. For example, to use device #1. set PM_RECOMMENDED_INPUT_DEVICE=1 The user should first determine the available device ID by using the supplied application "testin" or "testout". In general, the registry is a better place for this kind of info, and with USB devices that can come and go, using integers is not very reliable for device identification. Under Windows, if PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is *NOT* found in the environment, then the default device is obtained by looking for a string in the registry under: HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device for a string. The number of the first device with a substring that matches the string exactly is returned. For example, if the string in the registry is "USB", and device 1 is named "In USB MidiSport 1x1", then that will be the default input because it contains the string "USB". In addition to the name, get_device_info() returns "interf", which is the interface name. (The "interface" is the underlying software system or API used by PortMidi to access devices. Examples are MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.) At present, the only Win32 interface is "MMSystem", the only Linux interface is "ALSA", and the only Max OS X interface is "CoreMIDI". To specify both the interface and the device name in the registry, separate the two with a comma and a space, e.g.: MMSystem, In USB MidiSport 1x1 In this case, the string before the comma must be a substring of the "interf" string, and the string after the space must be a substring of the "name" name string in order to match the device. Note: in the current release, the default is simply the first device (the input or output device with the lowest PmDeviceID). """ return _pypm.GetDefaultInputDeviceID() def get_default_output_id(): """gets default output device number pygame.midi.get_default_output_id(): return default_id Return the default device ID or -1 if there are no devices. The result can be passed to the Input()/Ouput() class. On the PC, the user can specify a default device by setting an environment variable. For example, to use device #1. set PM_RECOMMENDED_OUTPUT_DEVICE=1 The user should first determine the available device ID by using the supplied application "testin" or "testout". In general, the registry is a better place for this kind of info, and with USB devices that can come and go, using integers is not very reliable for device identification. Under Windows, if PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is *NOT* found in the environment, then the default device is obtained by looking for a string in the registry under: HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device for a string. The number of the first device with a substring that matches the string exactly is returned. For example, if the string in the registry is "USB", and device 1 is named "In USB MidiSport 1x1", then that will be the default input because it contains the string "USB". In addition to the name, get_device_info() returns "interf", which is the interface name. (The "interface" is the underlying software system or API used by PortMidi to access devices. Examples are MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.) At present, the only Win32 interface is "MMSystem", the only Linux interface is "ALSA", and the only Max OS X interface is "CoreMIDI". To specify both the interface and the device name in the registry, separate the two with a comma and a space, e.g.: MMSystem, In USB MidiSport 1x1 In this case, the string before the comma must be a substring of the "interf" string, and the string after the space must be a substring of the "name" name string in order to match the device. Note: in the current release, the default is simply the first device (the input or output device with the lowest PmDeviceID). """ _check_init() return _pypm.GetDefaultOutputDeviceID() def get_device_info(an_id): """ returns information about a midi device pygame.midi.get_device_info(an_id): return (interf, name, input, output, opened) interf - a text string describing the device interface, eg 'ALSA'. name - a text string for the name of the device, eg 'Midi Through Port-0' input - 0, or 1 if the device is an input device. output - 0, or 1 if the device is an output device. opened - 0, or 1 if the device is opened. If the id is out of range, the function returns None. """ _check_init() return _pypm.GetDeviceInfo(an_id) class Input(object): """Input is used to get midi input from midi devices. Input(device_id) Input(device_id, buffer_size) buffer_size -the number of input events to be buffered waiting to be read using Input.read() """ def __init__(self, device_id, buffer_size=4096): """ The buffer_size specifies the number of input events to be buffered waiting to be read using Input.read(). """ _check_init() if device_id == -1: raise MidiException("Device id is -1, not a valid output id. -1 usually means there were no default Output devices.") try: r = get_device_info(device_id) except TypeError: raise TypeError("an integer is required") except OverflowError: raise OverflowError("long int too large to convert to int") # and now some nasty looking error checking, to provide nice error # messages to the kind, lovely, midi using people of whereever. if r: interf, name, input, output, opened = r if input: try: self._input = _pypm.Input(device_id, buffer_size) except TypeError: raise TypeError("an integer is required") self.device_id = device_id elif output: raise MidiException("Device id given is not a valid input id, it is an output id.") else: raise MidiException("Device id given is not a valid input id.") else: raise MidiException("Device id invalid, out of range.") def _check_open(self): if self._input is None: raise MidiException("midi not open.") def close(self): """ closes a midi stream, flushing any pending buffers. Input.close(): return None PortMidi attempts to close open streams when the application exits -- this is particularly difficult under Windows. """ _check_init() if not (self._input is None): self._input.Close() self._input = None def read(self, num_events): """reads num_events midi events from the buffer. Input.read(num_events): return midi_event_list Reads from the Input buffer and gives back midi events. [[[status,data1,data2,data3],timestamp], [[status,data1,data2,data3],timestamp],...] """ _check_init() self._check_open() return self._input.Read(num_events) def poll(self): """returns true if there's data, or false if not. Input.poll(): return Bool raises a MidiException on error. """ _check_init() self._check_open() r = self._input.Poll() if r == _pypm.TRUE: return True elif r == _pypm.FALSE: return False else: err_text = GetErrorText(r) raise MidiException( (r, err_text) ) class Output(object): """Output is used to send midi to an output device Output(device_id) Output(device_id, latency = 0) Output(device_id, buffer_size = 4096) Output(device_id, latency, buffer_size) The buffer_size specifies the number of output events to be buffered waiting for output. (In some cases -- see below -- PortMidi does not buffer output at all and merely passes data to a lower-level API, in which case buffersize is ignored.) latency is the delay in milliseconds applied to timestamps to determine when the output should actually occur. (If latency is < 0, 0 is assumed.) If latency is zero, timestamps are ignored and all output is delivered immediately. If latency is greater than zero, output is delayed until the message timestamp plus the latency. (NOTE: time is measured relative to the time source indicated by time_proc. Timestamps are absolute, not relative delays or offsets.) In some cases, PortMidi can obtain better timing than your application by passing timestamps along to the device driver or hardware. Latency may also help you to synchronize midi data to audio data by matching midi latency to the audio buffer latency. """ def __init__(self, device_id, latency = 0, buffer_size = 4096): """Output(device_id) Output(device_id, latency = 0) Output(device_id, buffer_size = 4096) Output(device_id, latency, buffer_size) The buffer_size specifies the number of output events to be buffered waiting for output. (In some cases -- see below -- PortMidi does not buffer output at all and merely passes data to a lower-level API, in which case buffersize is ignored.) latency is the delay in milliseconds applied to timestamps to determine when the output should actually occur. (If latency is < 0, 0 is assumed.) If latency is zero, timestamps are ignored and all output is delivered immediately. If latency is greater than zero, output is delayed until the message timestamp plus the latency. (NOTE: time is measured relative to the time source indicated by time_proc. Timestamps are absolute, not relative delays or offsets.) In some cases, PortMidi can obtain better timing than your application by passing timestamps along to the device driver or hardware. Latency may also help you to synchronize midi data to audio data by matching midi latency to the audio buffer latency. """ _check_init() self._aborted = 0 if device_id == -1: raise MidiException("Device id is -1, not a valid output id. -1 usually means there were no default Output devices.") try: r = get_device_info(device_id) except TypeError: raise TypeError("an integer is required") except OverflowError: raise OverflowError("long int too large to convert to int") # and now some nasty looking error checking, to provide nice error # messages to the kind, lovely, midi using people of whereever. if r: interf, name, input, output, opened = r if output: try: self._output = _pypm.Output(device_id, latency) except TypeError: raise TypeError("an integer is required") self.device_id = device_id elif input: raise MidiException("Device id given is not a valid output id, it is an input id.") else: raise MidiException("Device id given is not a valid output id.") else: raise MidiException("Device id invalid, out of range.") def _check_open(self): if self._output is None: raise MidiException("midi not open.") if self._aborted: raise MidiException("midi aborted.") def close(self): """ closes a midi stream, flushing any pending buffers. Output.close(): return None PortMidi attempts to close open streams when the application exits -- this is particularly difficult under Windows. """ _check_init() if not (self._output is None): self._output.Close() self._output = None def abort(self): """terminates outgoing messages immediately Output.abort(): return None The caller should immediately close the output port; this call may result in transmission of a partial midi message. There is no abort for Midi input because the user can simply ignore messages in the buffer and close an input device at any time. """ _check_init() if self._output: self._output.Abort() self._aborted = 1 def write(self, data): """writes a list of midi data to the Output Output.write(data) writes series of MIDI information in the form of a list: write([[[status <,data1><,data2><,data3>],timestamp], [[status <,data1><,data2><,data3>],timestamp],...]) <data> fields are optional example: choose program change 1 at time 20000 and send note 65 with velocity 100 500 ms later. write([[[0xc0,0,0],20000],[[0x90,60,100],20500]]) notes: 1. timestamps will be ignored if latency = 0. 2. To get a note to play immediately, send MIDI info with timestamp read from function Time. 3. understanding optional data fields: write([[[0xc0,0,0],20000]]) is equivalent to write([[[0xc0],20000]]) Can send up to 1024 elements in your data list, otherwise an IndexError exception is raised. """ _check_init() self._check_open() self._output.Write(data) def write_short(self, status, data1 = 0, data2 = 0): """write_short(status <, data1><, data2>) Output.write_short(status) Output.write_short(status, data1 = 0, data2 = 0) output MIDI information of 3 bytes or less. data fields are optional status byte could be: 0xc0 = program change 0x90 = note on etc. data bytes are optional and assumed 0 if omitted example: note 65 on with velocity 100 write_short(0x90,65,100) """ _check_init() self._check_open() self._output.WriteShort(status, data1, data2) def write_sys_ex(self, when, msg): """writes a timestamped system-exclusive midi message. Output.write_sys_ex(when, msg) msg - can be a *list* or a *string* when - a timestamp in miliseconds example: (assuming o is an onput MIDI stream) o.write_sys_ex(0,'\\xF0\\x7D\\x10\\x11\\x12\\x13\\xF7') is equivalent to o.write_sys_ex(pygame.midi.time(), [0xF0,0x7D,0x10,0x11,0x12,0x13,0xF7]) """ _check_init() self._check_open() self._output.WriteSysEx(when, msg) def note_on(self, note, velocity=None, channel = 0): """turns a midi note on. Note must be off. Output.note_on(note, velocity=None, channel = 0) Turn a note on in the output stream. The note must already be off for this to work correctly. """ if velocity is None: velocity = 0 if not (0 <= channel <= 15): raise ValueError("Channel not between 0 and 15.") self.write_short(0x90+channel, note, velocity) def note_off(self, note, velocity=None, channel = 0): """turns a midi note off. Note must be on. Output.note_off(note, velocity=None, channel = 0) Turn a note off in the output stream. The note must already be on for this to work correctly. """ if velocity is None: velocity = 0 if not (0 <= channel <= 15): raise ValueError("Channel not between 0 and 15.") self.write_short(0x80 + channel, note, velocity) def set_instrument(self, instrument_id, channel = 0): """select an instrument, with a value between 0 and 127 Output.set_instrument(instrument_id, channel = 0) """ if not (0 <= instrument_id <= 127): raise ValueError("Undefined instrument id: %d" % instrument_id) if not (0 <= channel <= 15): raise ValueError("Channel not between 0 and 15.") self.write_short(0xc0+channel, instrument_id) def time(): """returns the current time in ms of the PortMidi timer pygame.midi.time(): return time The time is reset to 0, when the module is inited. """ return _pypm.Time() def midis2events(midis, device_id): """converts midi events to pygame events pygame.midi.midis2events(midis, device_id): return [Event, ...] Takes a sequence of midi events and returns list of pygame events. """ evs = [] for midi in midis: ((status,data1,data2,data3),timestamp) = midi e = pygame.event.Event(MIDIIN, status=status, data1=data1, data2=data2, data3=data3, timestamp=timestamp, vice_id = device_id) evs.append( e ) return evs class MidiException(Exception): """exception that pygame.midi functions and classes can raise MidiException(errno) """ def __init__(self, value): self.parameter = value def __str__(self): return repr(self.parameter)
apache-2.0
pombreda/pyfilesystem
fs/wrapfs/hidedotfilesfs.py
16
3395
""" fs.wrapfs.hidedotfilesfs ======================== An FS wrapper class for hiding dot-files in directory listings. """ from fs.wrapfs import WrapFS from fs.path import * from fnmatch import fnmatch class HideDotFilesFS(WrapFS): """FS wrapper class that hides dot-files in directory listings. The listdir() function takes an extra keyword argument 'hidden' indicating whether hidden dot-files should be included in the output. It is False by default. """ def is_hidden(self, path): """Check whether the given path should be hidden.""" return path and basename(path)[0] == "." def _encode(self, path): return path def _decode(self, path): return path def listdir(self, path="", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False, hidden=False): kwds = dict(wildcard=wildcard, full=full, absolute=absolute, dirs_only=dirs_only, files_only=files_only) entries = self.wrapped_fs.listdir(path,**kwds) if not hidden: entries = [e for e in entries if not self.is_hidden(e)] return entries def ilistdir(self, path="", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False, hidden=False): kwds = dict(wildcard=wildcard, full=full, absolute=absolute, dirs_only=dirs_only, files_only=files_only) for e in self.wrapped_fs.ilistdir(path,**kwds): if hidden or not self.is_hidden(e): yield e def walk(self, path="/", wildcard=None, dir_wildcard=None, search="breadth",hidden=False): if search == "breadth": dirs = [path] while dirs: current_path = dirs.pop() paths = [] for filename in self.listdir(current_path,hidden=hidden): path = pathjoin(current_path, filename) if self.isdir(path): if dir_wildcard is not None: if fnmatch(path, dir_wildcard): dirs.append(path) else: dirs.append(path) else: if wildcard is not None: if fnmatch(path, wildcard): paths.append(filename) else: paths.append(filename) yield (current_path, paths) elif search == "depth": def recurse(recurse_path): for path in self.listdir(recurse_path, wildcard=dir_wildcard, full=True, dirs_only=True,hidden=hidden): for p in recurse(path): yield p yield (recurse_path, self.listdir(recurse_path, wildcard=wildcard, files_only=True,hidden=hidden)) for p in recurse(path): yield p else: raise ValueError("Search should be 'breadth' or 'depth'") def isdirempty(self, path): path = normpath(path) iter_dir = iter(self.listdir(path,hidden=True)) try: iter_dir.next() except StopIteration: return True return False
bsd-3-clause
Murloc992/ZombieGameProject
libs/googletest/test/gtest_throw_on_failure_test.py
2917
5766
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's throw-on-failure mode with exceptions disabled. This script invokes gtest_throw_on_failure_test_ (a program written with Google Test) with different environments and command line flags. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Constants. # The command line flag for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE = 'gtest_throw_on_failure' # Path to the gtest_throw_on_failure_test_ program, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_throw_on_failure_test_') # Utilities. def SetEnvVar(env_var, value): """Sets an environment variable to a given value; unsets it when the given value is None. """ env_var = env_var.upper() if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def Run(command): """Runs a command; returns True/False if its exit code is/isn't 0.""" print 'Running "%s". . .' % ' '.join(command) p = gtest_test_utils.Subprocess(command) return p.exited and p.exit_code == 0 # The tests. TODO(wan@google.com): refactor the class to share common # logic with code in gtest_break_on_failure_unittest.py. class ThrowOnFailureTest(gtest_test_utils.TestCase): """Tests the throw-on-failure mode.""" def RunAndVerify(self, env_var_value, flag_value, should_fail): """Runs gtest_throw_on_failure_test_ and verifies that it does (or does not) exit with a non-zero code. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. should_fail: True iff the program is expected to fail. """ SetEnvVar(THROW_ON_FAILURE, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % THROW_ON_FAILURE else: flag = '--%s' % THROW_ON_FAILURE command = [EXE_PATH] if flag: command.append(flag) if should_fail: should_or_not = 'should' else: should_or_not = 'should not' failed = not Run(command) SetEnvVar(THROW_ON_FAILURE, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero ' 'exit code.' % (THROW_ON_FAILURE, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(failed == should_fail, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False) def testThrowOnFailureEnvVar(self): """Tests using the GTEST_THROW_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, should_fail=False) self.RunAndVerify(env_var_value='1', flag_value=None, should_fail=True) def testThrowOnFailureFlag(self): """Tests using the --gtest_throw_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', should_fail=False) self.RunAndVerify(env_var_value=None, flag_value='1', should_fail=True) def testThrowOnFailureFlagOverridesEnvVar(self): """Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE.""" self.RunAndVerify(env_var_value='0', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='0', flag_value='1', should_fail=True) self.RunAndVerify(env_var_value='1', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='1', flag_value='1', should_fail=True) if __name__ == '__main__': gtest_test_utils.Main()
gpl-3.0
rdujardin/icforum
icforum/chat/api/serializers.py
1
1459
# Copyright 2016 Infinite Connection # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rest_framework import serializers from ..models import * from users.api.serializers import UserSerializer class RoomInputSerializer(serializers.ModelSerializer): class Meta: model = Room fields = ('id', 'name', 'members') class RoomOutputSerializer(serializers.ModelSerializer): class Meta: model = Room fields = ('id', 'name', 'members') members = UserSerializer(many=True) class ChatMessageInputSerializer(serializers.ModelSerializer): class Meta: model = ChatMessage fields = ('id', 'room', 'author', 'date', 'content') date = serializers.ReadOnlyField() author = serializers.ReadOnlyField(source='author.pk') class ChatMessageOutputSerializer(serializers.ModelSerializer): class Meta: model = ChatMessage fields = ('id', 'room', 'author', 'date', 'content') date = serializers.ReadOnlyField() author = UserSerializer(read_only=True)
apache-2.0
ujenmr/ansible
lib/ansible/modules/network/nxos/nxos_install_os.py
14
21738
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_install_os extends_documentation_fragment: nxos short_description: Set boot options like boot, kickstart image and issu. description: - Install an operating system by setting the boot options like boot image and kickstart image and optionally select to install using ISSU (In Server Software Upgrade). notes: - Tested against the following platforms and images - N9k 7.0(3)I4(6), 7.0(3)I5(3), 7.0(3)I6(1), 7.0(3)I7(1), 7.0(3)F2(2), 7.0(3)F3(2) - N3k 6.0(2)A8(6), 6.0(2)A8(8), 7.0(3)I6(1), 7.0(3)I7(1) - N7k 7.3(0)D1(1), 8.0(1), 8.1(1), 8.2(1) - This module requires both the ANSIBLE_PERSISTENT_CONNECT_TIMEOUT and ANSIBLE_PERSISTENT_COMMAND_TIMEOUT timers to be set to 600 seconds or higher. The module will exit if the timers are not set properly. - Do not include full file paths, just the name of the file(s) stored on the top level flash directory. - This module attempts to install the software immediately, which may trigger a reboot. - In check mode, the module will indicate if an upgrade is needed and whether or not the upgrade is disruptive or non-disruptive(ISSU). author: - Jason Edelman (@jedelman8) - Gabriele Gerbibo (@GGabriele) version_added: 2.2 options: system_image_file: description: - Name of the system (or combined) image file on flash. required: true kickstart_image_file: description: - Name of the kickstart image file on flash. (Not required on all Nexus platforms) issu: version_added: "2.5" description: - Upgrade using In Service Software Upgrade (ISSU). (Supported on N5k, N7k, N9k platforms) - Selecting 'required' or 'yes' means that upgrades will only proceed if the switch is capable of ISSU. - Selecting 'desired' means that upgrades will use ISSU if possible but will fall back to disruptive upgrade if needed. - Selecting 'no' means do not use ISSU. Forced disruptive. choices: ['required','desired', 'yes', 'no'] default: 'no' ''' EXAMPLES = ''' - name: Install OS on N9k check_mode: no nxos_install_os: system_image_file: nxos.7.0.3.I6.1.bin issu: desired - name: Wait for device to come back up with new image wait_for: port: 22 state: started timeout: 500 delay: 60 host: "{{ inventory_hostname }}" - name: Check installed OS for newly installed version nxos_command: commands: ['show version | json'] provider: "{{ connection }}" register: output - assert: that: - output['stdout'][0]['kickstart_ver_str'] == '7.0(3)I6(1)' ''' RETURN = ''' install_state: description: Boot and install information. returned: always type: dict sample: { "install_state": [ "Compatibility check is done:", "Module bootable Impact Install-type Reason", "------ -------- -------------- ------------ ------", " 1 yes non-disruptive reset ", "Images will be upgraded according to following table:", "Module Image Running-Version(pri:alt) New-Version Upg-Required", "------ ---------- ---------------------------------------- -------------------- ------------", " 1 nxos 7.0(3)I6(1) 7.0(3)I7(1) yes", " 1 bios v4.4.0(07/12/2017) v4.4.0(07/12/2017) no" ], } ''' import re from time import sleep from ansible.module_utils.network.nxos.nxos import load_config, run_commands from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule # Output options are 'text' or 'json' def execute_show_command(module, command, output='text'): cmds = [{ 'command': command, 'output': output, }] return run_commands(module, cmds) def get_platform(module): """Determine platform type""" data = execute_show_command(module, 'show inventory', 'json') pid = data[0]['TABLE_inv']['ROW_inv'][0]['productid'] if re.search(r'N3K', pid): type = 'N3K' elif re.search(r'N5K', pid): type = 'N5K' elif re.search(r'N6K', pid): type = 'N6K' elif re.search(r'N7K', pid): type = 'N7K' elif re.search(r'N9K', pid): type = 'N9K' else: type = 'unknown' return type def parse_show_install(data): """Helper method to parse the output of the 'show install all impact' or 'install all' commands. Sample Output: Installer will perform impact only check. Please wait. Verifying image bootflash:/nxos.7.0.3.F2.2.bin for boot variable "nxos". [####################] 100% -- SUCCESS Verifying image type. [####################] 100% -- SUCCESS Preparing "bios" version info using image bootflash:/nxos.7.0.3.F2.2.bin. [####################] 100% -- SUCCESS Preparing "nxos" version info using image bootflash:/nxos.7.0.3.F2.2.bin. [####################] 100% -- SUCCESS Performing module support checks. [####################] 100% -- SUCCESS Notifying services about system upgrade. [####################] 100% -- SUCCESS Compatibility check is done: Module bootable Impact Install-type Reason ------ -------- -------------- ------------ ------ 8 yes disruptive reset Incompatible image for ISSU 21 yes disruptive reset Incompatible image for ISSU Images will be upgraded according to following table: Module Image Running-Version(pri:alt) New-Version Upg-Required ------ ---------- ---------------------------------------- ------------ 8 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes 8 bios v01.17 v01.17 no 21 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes 21 bios v01.70 v01.70 no """ if len(data) > 0: data = massage_install_data(data) ud = {'raw': data} ud['processed'] = [] ud['disruptive'] = False ud['upgrade_needed'] = False ud['error'] = False ud['invalid_command'] = False ud['install_in_progress'] = False ud['server_error'] = False ud['upgrade_succeeded'] = False ud['use_impact_data'] = False # Check for server errors if isinstance(data, int): if data == -1: ud['server_error'] = True elif data >= 500: ud['server_error'] = True elif data == -32603: ud['server_error'] = True return ud else: ud['list_data'] = data.split('\n') for x in ud['list_data']: # Check for errors and exit if found. if re.search(r'Pre-upgrade check failed', x): ud['error'] = True break if re.search(r'[I|i]nvalid command', x): ud['invalid_command'] = True ud['error'] = True break if re.search(r'No install all data found', x): ud['error'] = True break # Check for potentially transient conditions if re.search(r'Another install procedure may\s*be in progress', x): ud['install_in_progress'] = True break if re.search(r'Backend processing error', x): ud['server_error'] = True break if re.search(r'timed out', x): ud['server_error'] = True break if re.search(r'^(-1|5\d\d)$', x): ud['server_error'] = True break # Check for messages indicating a successful upgrade. if re.search(r'Finishing the upgrade', x): ud['upgrade_succeeded'] = True break if re.search(r'Install has been successful', x): ud['upgrade_succeeded'] = True break if re.search(r'Switching over onto standby', x): ud['upgrade_succeeded'] = True break # We get these messages when the upgrade is non-disruptive and # we loose connection with the switchover but far enough along that # we can be confident the upgrade succeeded. if re.search(r'timeout trying to send command: install', x): ud['upgrade_succeeded'] = True ud['use_impact_data'] = True break if re.search(r'[C|c]onnection failure: timed out', x): ud['upgrade_succeeded'] = True ud['use_impact_data'] = True break # Begin normal parsing. if re.search(r'----|Module|Images will|Compatibility', x): ud['processed'].append(x) continue # Check to see if upgrade will be disruptive or non-disruptive and # build dictionary of individual modules and their status. # Sample Line: # # Module bootable Impact Install-type Reason # ------ -------- ---------- ------------ ------ # 8 yes disruptive reset Incompatible image rd = r'(\d+)\s+(\S+)\s+(disruptive|non-disruptive)\s+(\S+)' mo = re.search(rd, x) if mo: ud['processed'].append(x) key = 'm%s' % mo.group(1) field = 'disruptive' if mo.group(3) == 'non-disruptive': ud[key] = {field: False} else: ud[field] = True ud[key] = {field: True} field = 'bootable' if mo.group(2) == 'yes': ud[key].update({field: True}) else: ud[key].update({field: False}) continue # Check to see if switch needs an upgrade and build a dictionary # of individual modules and their individual upgrade status. # Sample Line: # # Module Image Running-Version(pri:alt) New-Version Upg-Required # ------ ----- ---------------------------------------- ------------ # 8 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes mo = re.search(r'(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(yes|no)', x) if mo: ud['processed'].append(x) key = 'm%s_%s' % (mo.group(1), mo.group(2)) field = 'upgrade_needed' if mo.group(5) == 'yes': ud[field] = True ud[key] = {field: True} else: ud[key] = {field: False} continue return ud def massage_install_data(data): # Transport cli returns a list containing one result item. # Transport nxapi returns a list containing two items. The second item # contains the data we are interested in. default_error_msg = 'No install all data found' if len(data) == 1: result_data = data[0] elif len(data) == 2: result_data = data[1] else: result_data = default_error_msg # Further processing may be needed for result_data if len(data) == 2 and isinstance(data[1], dict): if 'clierror' in data[1].keys(): result_data = data[1]['clierror'] elif 'code' in data[1].keys() and data[1]['code'] == '500': # We encountered a backend processing error for nxapi result_data = data[1]['msg'] else: result_data = default_error_msg return result_data def build_install_cmd_set(issu, image, kick, type, force=True): commands = ['terminal dont-ask'] # Different NX-OS plaforms behave differently for # disruptive and non-disruptive upgrade paths. # # 1) Combined kickstart/system image: # * Use option 'non-disruptive' for issu. # * Omit option non-disruptive' for distruptive upgrades. # 2) Separate kickstart + system images. # * Omit hidden 'force' option for issu. # * Use hidden 'force' option for disruptive upgrades. # * Note: Not supported on all platforms if re.search(r'required|desired|yes', issu): if kick is None: issu_cmd = 'non-disruptive' else: issu_cmd = '' else: if kick is None: issu_cmd = '' else: issu_cmd = 'force' if force else '' if type == 'impact': rootcmd = 'show install all impact' # The force option is not available for the impact command. if kick: issu_cmd = '' else: rootcmd = 'install all' if kick is None: commands.append( '%s nxos %s %s' % (rootcmd, image, issu_cmd)) else: commands.append( '%s %s system %s kickstart %s' % (rootcmd, issu_cmd, image, kick)) return commands def parse_show_version(data): version_data = {'raw': data[0].split('\n')} version_data['version'] = '' version_data['error'] = False for x in version_data['raw']: mo = re.search(r'(kickstart|system|NXOS):\s+version\s+(\S+)', x) if mo: version_data['version'] = mo.group(2) continue if version_data['version'] == '': version_data['error'] = True return version_data def check_mode_legacy(module, issu, image, kick=None): """Some platforms/images/transports don't support the 'install all impact' command so we need to use a different method.""" current = execute_show_command(module, 'show version', 'json')[0] # Call parse_show_data on empty string to create the default upgrade # data stucture dictionary data = parse_show_install('') upgrade_msg = 'No upgrade required' # Process System Image data['error'] = False tsver = 'show version image bootflash:%s' % image data['upgrade_cmd'] = [tsver] target_image = parse_show_version(execute_show_command(module, tsver)) if target_image['error']: data['error'] = True data['raw'] = target_image['raw'] if current['kickstart_ver_str'] != target_image['version'] and not data['error']: data['upgrade_needed'] = True data['disruptive'] = True upgrade_msg = 'Switch upgraded: system: %s' % tsver # Process Kickstart Image if kick is not None and not data['error']: tkver = 'show version image bootflash:%s' % kick data['upgrade_cmd'].append(tsver) target_kick = parse_show_version(execute_show_command(module, tkver)) if target_kick['error']: data['error'] = True data['raw'] = target_kick['raw'] if current['kickstart_ver_str'] != target_kick['version'] and not data['error']: data['upgrade_needed'] = True data['disruptive'] = True upgrade_msg = upgrade_msg + ' kickstart: %s' % tkver data['list_data'] = data['raw'] data['processed'] = upgrade_msg return data def check_mode_nextgen(module, issu, image, kick=None): """Use the 'install all impact' command for check_mode""" opts = {'ignore_timeout': True} commands = build_install_cmd_set(issu, image, kick, 'impact') data = parse_show_install(load_config(module, commands, True, opts)) # If an error is encountered when issu is 'desired' then try again # but set issu to 'no' if data['error'] and issu == 'desired': issu = 'no' commands = build_install_cmd_set(issu, image, kick, 'impact') # The system may be busy from the previous call to check_mode so loop # until it's done. data = check_install_in_progress(module, commands, opts) if data['server_error']: data['error'] = True data['upgrade_cmd'] = commands return data def check_install_in_progress(module, commands, opts): for attempt in range(20): data = parse_show_install(load_config(module, commands, True, opts)) if data['install_in_progress']: sleep(1) continue break return data def check_mode(module, issu, image, kick=None): """Check switch upgrade impact using 'show install all impact' command""" data = check_mode_nextgen(module, issu, image, kick) if data['server_error']: # We encountered an unrecoverable error in the attempt to get upgrade # impact data from the 'show install all impact' command. # Fallback to legacy method. data = check_mode_legacy(module, issu, image, kick) if data['invalid_command']: # If we are upgrading from a device running a separate kickstart and # system image the impact command will fail. # Fallback to legacy method. data = check_mode_legacy(module, issu, image, kick) return data def do_install_all(module, issu, image, kick=None): """Perform the switch upgrade using the 'install all' command""" impact_data = check_mode(module, issu, image, kick) if module.check_mode: # Check mode set in the playbook so just return the impact data. msg = '*** SWITCH WAS NOT UPGRADED: IMPACT DATA ONLY ***' impact_data['processed'].append(msg) return impact_data if impact_data['error']: # Check mode discovered an error so return with this info. return impact_data elif not impact_data['upgrade_needed']: # The switch is already upgraded. Nothing more to do. return impact_data else: # If we get here, check_mode returned no errors and the switch # needs to be upgraded. if impact_data['disruptive']: # Check mode indicated that ISSU is not possible so issue the # upgrade command without the non-disruptive flag unless the # playbook specified issu: yes/required. if issu == 'yes': msg = 'ISSU/ISSD requested but impact data indicates ISSU/ISSD is not possible' module.fail_json(msg=msg, raw_data=impact_data['list_data']) else: issu = 'no' commands = build_install_cmd_set(issu, image, kick, 'install') opts = {'ignore_timeout': True} # The system may be busy from the call to check_mode so loop until # it's done. upgrade = check_install_in_progress(module, commands, opts) if upgrade['invalid_command'] and 'force' in commands[1]: # Not all platforms support the 'force' keyword. Check for this # condition and re-try without the 'force' keyword if needed. commands = build_install_cmd_set(issu, image, kick, 'install', False) upgrade = check_install_in_progress(module, commands, opts) upgrade['upgrade_cmd'] = commands # Special case: If we encounter a server error at this stage # it means the command was sent and the upgrade was started but # we will need to use the impact data instead of the current install # data. if upgrade['server_error']: upgrade['upgrade_succeeded'] = True upgrade['use_impact_data'] = True if upgrade['use_impact_data']: if upgrade['upgrade_succeeded']: upgrade = impact_data upgrade['upgrade_succeeded'] = True else: upgrade = impact_data upgrade['upgrade_succeeded'] = False if not upgrade['upgrade_succeeded']: upgrade['error'] = True return upgrade def main(): argument_spec = dict( system_image_file=dict(required=True), kickstart_image_file=dict(required=False), issu=dict(choices=['required', 'desired', 'no', 'yes'], default='no'), ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) # Get system_image_file(sif), kickstart_image_file(kif) and # issu settings from module params. sif = module.params['system_image_file'] kif = module.params['kickstart_image_file'] issu = module.params['issu'] if re.search(r'(yes|required)', issu): issu = 'yes' if kif == 'null' or kif == '': kif = None install_result = do_install_all(module, issu, sif, kick=kif) if install_result['error']: cmd = install_result['upgrade_cmd'] msg = 'Failed to upgrade device using command: %s' % cmd module.fail_json(msg=msg, raw_data=install_result['list_data']) state = install_result['processed'] changed = install_result['upgrade_needed'] module.exit_json(changed=changed, install_state=state, warnings=warnings) if __name__ == '__main__': main()
gpl-3.0
wilsonkichoi/zipline
zipline/finance/trading.py
3
18887
# # Copyright 2015 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bisect import logbook import datetime import pandas as pd import numpy as np from six import string_types from sqlalchemy import create_engine from zipline.assets import AssetDBWriter, AssetFinder from zipline.data.loader import load_market_data from zipline.utils import tradingcalendar from zipline.errors import ( NoFurtherDataError ) from zipline.utils.memoize import remember_last, lazyval log = logbook.Logger('Trading') class TradingEnvironment(object): """ The financial simulations in zipline depend on information about the benchmark index and the risk free rates of return. The benchmark index defines the benchmark returns used in the calculation of performance metrics such as alpha/beta. Many components, including risk, performance, transforms, and batch_transforms, need access to a calendar of trading days and market hours. The TradingEnvironment maintains two time keeping facilities: - a DatetimeIndex of trading days for calendar calculations - a timezone name, which should be local to the exchange hosting the benchmark index. All dates are normalized to UTC for serialization and storage, and the timezone is used to ensure proper rollover through daylight savings and so on. User code will not normally need to use TradingEnvironment directly. If you are extending zipline's core financial components and need to use the environment, you must import the module and build a new TradingEnvironment object, then pass that TradingEnvironment as the 'env' arg to your TradingAlgorithm. Parameters ---------- load : callable, optional The function that returns benchmark returns and treasury curves. The treasury curves are expected to be a DataFrame with an index of dates and columns of the curve names, e.g. '10year', '1month', etc. bm_symbol : str, optional The benchmark symbol exchange_tz : tz-coercable, optional The timezone of the exchange. min_date : datetime, optional The oldest date that we know about in this environment. max_date : datetime, optional The most recent date that we know about in this environment. env_trading_calendar : pd.DatetimeIndex, optional The calendar of datetimes that define our market hours. asset_db_path : str or sa.engine.Engine, optional The path to the assets db or sqlalchemy Engine object to use to construct an AssetFinder. """ # Token used as a substitute for pickling objects that contain a # reference to a TradingEnvironment PERSISTENT_TOKEN = "<TradingEnvironment>" def __init__(self, load=None, bm_symbol='^GSPC', exchange_tz="US/Eastern", min_date=None, max_date=None, env_trading_calendar=tradingcalendar, asset_db_path=':memory:'): self.trading_day = env_trading_calendar.trading_day.copy() # `tc_td` is short for "trading calendar trading days" tc_td = env_trading_calendar.trading_days self.trading_days = tc_td[tc_td.slice_indexer(min_date, max_date)] self.first_trading_day = self.trading_days[0] self.last_trading_day = self.trading_days[-1] self.early_closes = env_trading_calendar.get_early_closes( self.first_trading_day, self.last_trading_day) self.open_and_closes = env_trading_calendar.open_and_closes.loc[ self.trading_days] self.bm_symbol = bm_symbol if not load: load = load_market_data self.benchmark_returns, self.treasury_curves = \ load(self.trading_day, self.trading_days, self.bm_symbol) if max_date: tr_c = self.treasury_curves # Mask the treasury curves down to the current date. # In the case of live trading, the last date in the treasury # curves would be the day before the date considered to be # 'today'. self.treasury_curves = tr_c[tr_c.index <= max_date] self.exchange_tz = exchange_tz if isinstance(asset_db_path, string_types): asset_db_path = 'sqlite:///%s' % asset_db_path self.engine = engine = create_engine(asset_db_path) else: self.engine = engine = asset_db_path if engine is not None: AssetDBWriter(engine).init_db() self.asset_finder = AssetFinder(engine) else: self.asset_finder = None @lazyval def market_minutes(self): return self.minutes_for_days_in_range(self.first_trading_day, self.last_trading_day) def write_data(self, **kwargs): """Write data into the asset_db. Parameters ---------- **kwargs Forwarded to AssetDBWriter.write """ AssetDBWriter(self.engine).write(**kwargs) def normalize_date(self, test_date): test_date = pd.Timestamp(test_date, tz='UTC') return pd.tseries.tools.normalize_date(test_date) def utc_dt_in_exchange(self, dt): return pd.Timestamp(dt).tz_convert(self.exchange_tz) def exchange_dt_in_utc(self, dt): return pd.Timestamp(dt, tz=self.exchange_tz).tz_convert('UTC') def is_market_hours(self, test_date): if not self.is_trading_day(test_date): return False mkt_open, mkt_close = self.get_open_and_close(test_date) return test_date >= mkt_open and test_date <= mkt_close def is_trading_day(self, test_date): dt = self.normalize_date(test_date) return (dt in self.trading_days) def next_trading_day(self, test_date): dt = self.normalize_date(test_date) delta = datetime.timedelta(days=1) while dt <= self.last_trading_day: dt += delta if dt in self.trading_days: return dt return None def previous_trading_day(self, test_date): dt = self.normalize_date(test_date) delta = datetime.timedelta(days=-1) while self.first_trading_day < dt: dt += delta if dt in self.trading_days: return dt return None def add_trading_days(self, n, date): """ Adds n trading days to date. If this would fall outside of the trading calendar, a NoFurtherDataError is raised. :Arguments: n : int The number of days to add to date, this can be positive or negative. date : datetime The date to add to. :Returns: new_date : datetime n trading days added to date. """ if n == 1: return self.next_trading_day(date) if n == -1: return self.previous_trading_day(date) idx = self.get_index(date) + n if idx < 0 or idx >= len(self.trading_days): raise NoFurtherDataError( msg='Cannot add %d days to %s' % (n, date) ) return self.trading_days[idx] def days_in_range(self, start, end): start_date = self.normalize_date(start) end_date = self.normalize_date(end) mask = ((self.trading_days >= start_date) & (self.trading_days <= end_date)) return self.trading_days[mask] def opens_in_range(self, start, end): return self.open_and_closes.market_open.loc[start:end] def closes_in_range(self, start, end): return self.open_and_closes.market_close.loc[start:end] def minutes_for_days_in_range(self, start, end): """ Get all market minutes for the days between start and end, inclusive. """ start_date = self.normalize_date(start) end_date = self.normalize_date(end) o_and_c = self.open_and_closes[ self.open_and_closes.index.slice_indexer(start_date, end_date)] opens = o_and_c.market_open closes = o_and_c.market_close one_min = pd.Timedelta(1, unit='m') all_minutes = [] for i in range(0, len(o_and_c.index)): market_open = opens[i] market_close = closes[i] day_minutes = np.arange(market_open, market_close + one_min, dtype='datetime64[m]') all_minutes.append(day_minutes) # Concatenate all minutes and truncate minutes before start/after end. return pd.DatetimeIndex( np.concatenate(all_minutes), copy=False, tz='UTC', ) def next_open_and_close(self, start_date): """ Given the start_date, returns the next open and close of the market. """ next_open = self.next_trading_day(start_date) if next_open is None: raise NoFurtherDataError( msg=("Attempt to backtest beyond available history. " "Last known date: %s" % self.last_trading_day) ) return self.get_open_and_close(next_open) def previous_open_and_close(self, start_date): """ Given the start_date, returns the previous open and close of the market. """ previous = self.previous_trading_day(start_date) if previous is None: raise NoFurtherDataError( msg=("Attempt to backtest beyond available history. " "First known date: %s" % self.first_trading_day) ) return self.get_open_and_close(previous) def next_market_minute(self, start): """ Get the next market minute after @start. This is either the immediate next minute, the open of the same day if @start is before the market open on a trading day, or the open of the next market day after @start. """ if self.is_trading_day(start): market_open, market_close = self.get_open_and_close(start) # If start before market open on a trading day, return market open. if start < market_open: return market_open # If start is during trading hours, then get the next minute. elif start < market_close: return start + datetime.timedelta(minutes=1) # If start is not in a trading day, or is after the market close # then return the open of the *next* trading day. return self.next_open_and_close(start)[0] @remember_last def previous_market_minute(self, start): """ Get the next market minute before @start. This is either the immediate previous minute, the close of the same day if @start is after the close on a trading day, or the close of the market day before @start. """ if self.is_trading_day(start): market_open, market_close = self.get_open_and_close(start) # If start after the market close, return market close. if start > market_close: return market_close # If start is during trading hours, then get previous minute. if start > market_open: return start - datetime.timedelta(minutes=1) # If start is not a trading day, or is before the market open # then return the close of the *previous* trading day. return self.previous_open_and_close(start)[1] def get_open_and_close(self, day): index = self.open_and_closes.index.get_loc(day.date()) todays_minutes = self.open_and_closes.iloc[index] return todays_minutes[0], todays_minutes[1] def market_minutes_for_day(self, stamp): market_open, market_close = self.get_open_and_close(stamp) return pd.date_range(market_open, market_close, freq='T') def open_close_window(self, start, count, offset=0, step=1): """ Return a DataFrame containing `count` market opens and closes, beginning with `start` + `offset` days and continuing `step` minutes at a time. """ # TODO: Correctly handle end of data. start_idx = self.get_index(start) + offset stop_idx = start_idx + (count * step) index = np.arange(start_idx, stop_idx, step) return self.open_and_closes.iloc[index] def market_minute_window(self, start, count, step=1): """ Return a DatetimeIndex containing `count` market minutes, starting with `start` and continuing `step` minutes at a time. """ if not self.is_market_hours(start): raise ValueError("market_minute_window starting at " "non-market time {minute}".format(minute=start)) all_minutes = [] current_day_minutes = self.market_minutes_for_day(start) first_minute_idx = current_day_minutes.searchsorted(start) minutes_in_range = current_day_minutes[first_minute_idx::step] # Build up list of lists of days' market minutes until we have count # minutes stored altogether. while True: if len(minutes_in_range) >= count: # Truncate off extra minutes minutes_in_range = minutes_in_range[:count] all_minutes.append(minutes_in_range) count -= len(minutes_in_range) if count <= 0: break if step > 0: start, _ = self.next_open_and_close(start) current_day_minutes = self.market_minutes_for_day(start) else: _, start = self.previous_open_and_close(start) current_day_minutes = self.market_minutes_for_day(start) minutes_in_range = current_day_minutes[::step] # Concatenate all the accumulated minutes. return pd.DatetimeIndex( np.concatenate(all_minutes), copy=False, tz='UTC', ) def trading_day_distance(self, first_date, second_date): first_date = self.normalize_date(first_date) second_date = self.normalize_date(second_date) # TODO: May be able to replace the following with searchsorted. # Find leftmost item greater than or equal to day i = bisect.bisect_left(self.trading_days, first_date) if i == len(self.trading_days): # nothing found return None j = bisect.bisect_left(self.trading_days, second_date) if j == len(self.trading_days): return None return j - i def get_index(self, dt): """ Return the index of the given @dt, or the index of the preceding trading day if the given dt is not in the trading calendar. """ ndt = self.normalize_date(dt) if ndt in self.trading_days: return self.trading_days.searchsorted(ndt) else: return self.trading_days.searchsorted(ndt) - 1 class SimulationParameters(object): def __init__(self, period_start, period_end, capital_base=10e3, emission_rate='daily', data_frequency='daily', env=None, arena='backtest'): self.period_start = period_start self.period_end = period_end self.capital_base = capital_base self.emission_rate = emission_rate self.data_frequency = data_frequency # copied to algorithm's environment for runtime access self.arena = arena if env is not None: self.update_internal_from_env(env=env) def update_internal_from_env(self, env): assert self.period_start <= self.period_end, \ "Period start falls after period end." assert self.period_start <= env.last_trading_day, \ "Period start falls after the last known trading day." assert self.period_end >= env.first_trading_day, \ "Period end falls before the first known trading day." self.first_open = self._calculate_first_open(env) self.last_close = self._calculate_last_close(env) start_index = env.get_index(self.first_open) end_index = env.get_index(self.last_close) # take an inclusive slice of the environment's # trading_days. self.trading_days = env.trading_days[start_index:end_index + 1] def _calculate_first_open(self, env): """ Finds the first trading day on or after self.period_start. """ first_open = self.period_start one_day = datetime.timedelta(days=1) while not env.is_trading_day(first_open): first_open = first_open + one_day mkt_open, _ = env.get_open_and_close(first_open) return mkt_open def _calculate_last_close(self, env): """ Finds the last trading day on or before self.period_end """ last_close = self.period_end one_day = datetime.timedelta(days=1) while not env.is_trading_day(last_close): last_close = last_close - one_day _, mkt_close = env.get_open_and_close(last_close) return mkt_close @property def days_in_period(self): """return the number of trading days within the period [start, end)""" return len(self.trading_days) def __repr__(self): return """ {class_name}( period_start={period_start}, period_end={period_end}, capital_base={capital_base}, data_frequency={data_frequency}, emission_rate={emission_rate}, first_open={first_open}, last_close={last_close})\ """.format(class_name=self.__class__.__name__, period_start=self.period_start, period_end=self.period_end, capital_base=self.capital_base, data_frequency=self.data_frequency, emission_rate=self.emission_rate, first_open=self.first_open, last_close=self.last_close) def noop_load(*args, **kwargs): """ A method that can be substituted in as the load method in a TradingEnvironment to prevent it from loading benchmarks. Accepts any arguments, but returns only a tuple of Nones regardless of input. """ return None, None
apache-2.0
HolgerPeters/setuptools_scm
setuptools_scm/utils.py
6
2043
""" utils """ from __future__ import print_function, unicode_literals import sys import shlex import subprocess import os import io import platform DEBUG = bool(os.environ.get("SETUPTOOLS_SCM_DEBUG")) def trace(*k): if DEBUG: print(*k) sys.stdout.flush() def ensure_stripped_str(str_or_bytes): if isinstance(str_or_bytes, str): return str_or_bytes.strip() else: return str_or_bytes.decode('utf-8', 'surogate_escape').strip() def _always_strings(env_dict): """ On Windows and Python 2, environment dictionaries must be strings and not unicode. """ is_windows = platform.system == 'Windows' PY2 = sys.version_info < (3,) if is_windows or PY2: env_dict.update( (key, str(value)) for (key, value) in env_dict.items() ) return env_dict def do_ex(cmd, cwd='.'): trace('cmd', repr(cmd)) p = subprocess.Popen( shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=_always_strings(dict( os.environ, # disable hgrc processing other than .hg/hgrc HGRCPATH='', # try to disable i18n LC_ALL='C', LANGUAGE='', HGPLAIN='1', )) ) out, err = p.communicate() if out: trace('out', repr(out)) if err: trace('err', repr(err)) if p.returncode: trace('ret', p.returncode) return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode def do(cmd, cwd='.'): out, err, ret = do_ex(cmd, cwd) if ret: trace('ret', ret) print(err) return out def data_from_mime(path): with io.open(path, encoding='utf-8') as fp: content = fp.read() trace('content', repr(content)) # the complex conditions come from reading pseudo-mime-messages data = dict( x.split(': ', 1) for x in content.splitlines() if ': ' in x) trace('data', data) return data
mit
dimara/ganeti
qa/qa_iptables.py
9
3651
#!/usr/bin/python -u # # Copyright (C) 2013 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Manipulates nodes using `iptables` to simulate non-standard network conditions. """ import uuid from qa_utils import AssertCommand # String used as a comment for produced `iptables` results IPTABLES_COMMENT_MARKER = "ganeti_qa_script" class RulesContext(object): def __init__(self): self._nodes = set() def __enter__(self): self.marker = IPTABLES_COMMENT_MARKER + "_" + str(uuid.uuid4()) return Rules(self) def __exit__(self, ext_type, exc_val, exc_tb): CleanRules(self._nodes, self.marker) def AddNode(self, node): self._nodes.add(node) class Rules(object): """Allows to introduce iptable rules and dispose them at the end of a block. Don't instantiate this class directly. Use `with RulesContext() as r` instead. """ def __init__(self, ctx=None): self._ctx = ctx if self._ctx is not None: self.marker = self._ctx.marker else: self.marker = IPTABLES_COMMENT_MARKER def AddNode(self, node): if self._ctx is not None: self._ctx.AddNode(node) def AppendRule(self, node, chain, rule, table="filter"): """Appends an `iptables` rule to a given node """ AssertCommand(["iptables", "-t", table, "-A", chain] + rule + ["-m", "comment", "--comment", self.marker], node=node) self.AddNode(node) def RedirectPort(self, node, host, port, new_port): """Adds a rule to a master node that makes a destination host+port visible under a different port number. """ self.AppendRule(node, "OUTPUT", ["--protocol", "tcp", "--destination", host, "--dport", str(port), "--jump", "DNAT", "--to-destination", ":" + str(new_port)], table="nat") GLOBAL_RULES = Rules() def CleanRules(nodes, marker=IPTABLES_COMMENT_MARKER): """Removes all QA `iptables` rules matching a given marker from a given node. If no marker is given, the global default is used, which clean all custom markers. """ if not hasattr(nodes, '__iter__'): nodes = [nodes] for node in nodes: AssertCommand(("iptables-save | grep -v '%s' | iptables-restore" % (marker, )), node=node)
bsd-2-clause
BlogomaticProject/Blogomatic
opt/blog-o-matic/usr/lib/python/Bio/SeqRecord.py
2
39372
# Copyright 2000-2002 Andrew Dalke. # Copyright 2002-2004 Brad Chapman. # Copyright 2006-2009 by Peter Cock. # All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Represent a Sequence Record, a sequence with annotation.""" __docformat__ = "epytext en" #Simple markup to show doctests nicely # NEEDS TO BE SYNCH WITH THE REST OF BIOPYTHON AND BIOPERL # In particular, the SeqRecord and BioSQL.BioSeq.DBSeqRecord classes # need to be in sync (this is the BioSQL "Database SeqRecord", see # also BioSQL.BioSeq.DBSeq which is the "Database Seq" class) class _RestrictedDict(dict): """Dict which only allows sequences of given length as values (PRIVATE). This simple subclass of the Python dictionary is used in the SeqRecord object for holding per-letter-annotations. This class is intended to prevent simple errors by only allowing python sequences (e.g. lists, strings and tuples) to be stored, and only if their length matches that expected (the length of the SeqRecord's seq object). It cannot however prevent the entries being edited in situ (for example appending entries to a list). """ def __init__(self, length): """Create an EMPTY restricted dictionary.""" dict.__init__(self) self._length = int(length) def __setitem__(self, key, value): if not hasattr(value,"__len__") or not hasattr(value,"__getitem__") \ or len(value) != self._length: raise TypeError("We only allow python sequences (lists, tuples or " "strings) of length %i." % self._length) dict.__setitem__(self, key, value) def update(self, new_dict): #Force this to go via our strict __setitem__ method for (key, value) in new_dict.iteritems(): self[key] = value class SeqRecord(object): """A SeqRecord object holds a sequence and information about it. Main attributes: - id - Identifier such as a locus tag (string) - seq - The sequence itself (Seq object or similar) Additional attributes: - name - Sequence name, e.g. gene name (string) - description - Additional text (string) - dbxrefs - List of database cross references (list of strings) - features - Any (sub)features defined (list of SeqFeature objects) - annotations - Further information about the whole sequence (dictionary) Most entries are strings, or lists of strings. - letter_annotations - Per letter/symbol annotation (restricted dictionary). This holds Python sequences (lists, strings or tuples) whose length matches that of the sequence. A typical use would be to hold a list of integers representing sequencing quality scores, or a string representing the secondary structure. You will typically use Bio.SeqIO to read in sequences from files as SeqRecord objects. However, you may want to create your own SeqRecord objects directly (see the __init__ method for further details): >>> from Bio.Seq import Seq >>> from Bio.SeqRecord import SeqRecord >>> from Bio.Alphabet import IUPAC >>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF", ... IUPAC.protein), ... id="YP_025292.1", name="HokC", ... description="toxic membrane protein") >>> print record ID: YP_025292.1 Name: HokC Description: toxic membrane protein Number of features: 0 Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein()) If you want to save SeqRecord objects to a sequence file, use Bio.SeqIO for this. For the special case where you want the SeqRecord turned into a string in a particular file format there is a format method which uses Bio.SeqIO internally: >>> print record.format("fasta") >YP_025292.1 toxic membrane protein MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF <BLANKLINE> You can also do things like slicing a SeqRecord, checking its length, etc >>> len(record) 44 >>> edited = record[:10] + record[11:] >>> print edited.seq MKQHKAMIVAIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF >>> print record.seq MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF """ def __init__(self, seq, id = "<unknown id>", name = "<unknown name>", description = "<unknown description>", dbxrefs = None, features = None, annotations = None, letter_annotations = None): """Create a SeqRecord. Arguments: - seq - Sequence, required (Seq, MutableSeq or UnknownSeq) - id - Sequence identifier, recommended (string) - name - Sequence name, optional (string) - description - Sequence description, optional (string) - dbxrefs - Database cross references, optional (list of strings) - features - Any (sub)features, optional (list of SeqFeature objects) - annotations - Dictionary of annotations for the whole sequence - letter_annotations - Dictionary of per-letter-annotations, values should be strings, list or tuples of the same length as the full sequence. You will typically use Bio.SeqIO to read in sequences from files as SeqRecord objects. However, you may want to create your own SeqRecord objects directly. Note that while an id is optional, we strongly recommend you supply a unique id string for each record. This is especially important if you wish to write your sequences to a file. If you don't have the actual sequence, but you do know its length, then using the UnknownSeq object from Bio.Seq is appropriate. You can create a 'blank' SeqRecord object, and then populate the attributes later. """ if id is not None and not isinstance(id, basestring): #Lots of existing code uses id=None... this may be a bad idea. raise TypeError("id argument should be a string") if not isinstance(name, basestring): raise TypeError("name argument should be a string") if not isinstance(description, basestring): raise TypeError("description argument should be a string") self._seq = seq self.id = id self.name = name self.description = description # database cross references (for the whole sequence) if dbxrefs is None: dbxrefs = [] elif not isinstance(dbxrefs, list): raise TypeError("dbxrefs argument should be a list (of strings)") self.dbxrefs = dbxrefs # annotations about the whole sequence if annotations is None: annotations = {} elif not isinstance(annotations, dict): raise TypeError("annotations argument should be a dict") self.annotations = annotations if letter_annotations is None: # annotations about each letter in the sequence if seq is None: #Should we allow this and use a normal unrestricted dict? self._per_letter_annotations = _RestrictedDict(length=0) else: try: self._per_letter_annotations = \ _RestrictedDict(length=len(seq)) except: raise TypeError("seq argument should be a Seq object or similar") else: #This will be handled via the property set function, which will #turn this into a _RestrictedDict and thus ensure all the values #in the dict are the right length self.letter_annotations = letter_annotations # annotations about parts of the sequence if features is None: features = [] elif not isinstance(features, list): raise TypeError("features argument should be a list (of SeqFeature objects)") self.features = features #TODO - Just make this a read only property? def _set_per_letter_annotations(self, value): if not isinstance(value, dict): raise TypeError("The per-letter-annotations should be a " "(restricted) dictionary.") #Turn this into a restricted-dictionary (and check the entries) try: self._per_letter_annotations = _RestrictedDict(length=len(self.seq)) except AttributeError: #e.g. seq is None self._per_letter_annotations = _RestrictedDict(length=0) self._per_letter_annotations.update(value) letter_annotations = property( \ fget=lambda self : self._per_letter_annotations, fset=_set_per_letter_annotations, doc="""Dictionary of per-letter-annotation for the sequence. For example, this can hold quality scores used in FASTQ or QUAL files. Consider this example using Bio.SeqIO to read in an example Solexa variant FASTQ file as a SeqRecord: >>> from Bio import SeqIO >>> handle = open("Quality/solexa_faked.fastq", "rU") >>> record = SeqIO.read(handle, "fastq-solexa") >>> handle.close() >>> print record.id, record.seq slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN >>> print record.letter_annotations.keys() ['solexa_quality'] >>> print record.letter_annotations["solexa_quality"] [40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5] The letter_annotations get sliced automatically if you slice the parent SeqRecord, for example taking the last ten bases: >>> sub_record = record[-10:] >>> print sub_record.id, sub_record.seq slxa_0001_1_0001_01 ACGTNNNNNN >>> print sub_record.letter_annotations["solexa_quality"] [4, 3, 2, 1, 0, -1, -2, -3, -4, -5] Any python sequence (i.e. list, tuple or string) can be recorded in the SeqRecord's letter_annotations dictionary as long as the length matches that of the SeqRecord's sequence. e.g. >>> len(sub_record.letter_annotations) 1 >>> sub_record.letter_annotations["dummy"] = "abcdefghij" >>> len(sub_record.letter_annotations) 2 You can delete entries from the letter_annotations dictionary as usual: >>> del sub_record.letter_annotations["solexa_quality"] >>> sub_record.letter_annotations {'dummy': 'abcdefghij'} You can completely clear the dictionary easily as follows: >>> sub_record.letter_annotations = {} >>> sub_record.letter_annotations {} """) def _set_seq(self, value): #TODO - Add a deprecation warning that the seq should be write only? if self._per_letter_annotations: #TODO - Make this a warning? Silently empty the dictionary? raise ValueError("You must empty the letter annotations first!") self._seq = value try: self._per_letter_annotations = _RestrictedDict(length=len(self.seq)) except AttributeError: #e.g. seq is None self._per_letter_annotations = _RestrictedDict(length=0) seq = property(fget=lambda self : self._seq, fset=_set_seq, doc="The sequence itself, as a Seq or MutableSeq object.") def __getitem__(self, index): """Returns a sub-sequence or an individual letter. Slicing, e.g. my_record[5:10], returns a new SeqRecord for that sub-sequence with approriate annotation preserved. The name, id and description are kept. Any per-letter-annotations are sliced to match the requested sub-sequence. Unless a stride is used, all those features which fall fully within the subsequence are included (with their locations adjusted accordingly). However, the annotations dictionary and the dbxrefs list are not used for the new SeqRecord, as in general they may not apply to the subsequence. If you want to preserve them, you must explictly copy them to the new SeqRecord yourself. Using an integer index, e.g. my_record[5] is shorthand for extracting that letter from the sequence, my_record.seq[5]. For example, consider this short protein and its secondary structure as encoded by the PDB (e.g. H for alpha helices), plus a simple feature for its histidine self phosphorylation site: >>> from Bio.Seq import Seq >>> from Bio.SeqRecord import SeqRecord >>> from Bio.SeqFeature import SeqFeature, FeatureLocation >>> from Bio.Alphabet import IUPAC >>> rec = SeqRecord(Seq("MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLAT" ... "EMMSEQDGYLAESINKDIEECNAIIEQFIDYLR", ... IUPAC.protein), ... id="1JOY", name="EnvZ", ... description="Homodimeric domain of EnvZ from E. coli") >>> rec.letter_annotations["secondary_structure"] = " S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT " >>> rec.features.append(SeqFeature(FeatureLocation(20,21), ... type = "Site")) Now let's have a quick look at the full record, >>> print rec ID: 1JOY Name: EnvZ Description: Homodimeric domain of EnvZ from E. coli Number of features: 1 Per letter annotation for: secondary_structure Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein()) >>> print rec.letter_annotations["secondary_structure"] S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT >>> print rec.features[0].location [20:21] Now let's take a sub sequence, here chosen as the first (fractured) alpha helix which includes the histidine phosphorylation site: >>> sub = rec[11:41] >>> print sub ID: 1JOY Name: EnvZ Description: Homodimeric domain of EnvZ from E. coli Number of features: 1 Per letter annotation for: secondary_structure Seq('RTLLMAGVSHDLRTPLTRIRLATEMMSEQD', IUPACProtein()) >>> print sub.letter_annotations["secondary_structure"] HHHHHTTTHHHHHHHHHHHHHHHHHHHHHH >>> print sub.features[0].location [9:10] You can also of course omit the start or end values, for example to get the first ten letters only: >>> print rec[:10] ID: 1JOY Name: EnvZ Description: Homodimeric domain of EnvZ from E. coli Number of features: 0 Per letter annotation for: secondary_structure Seq('MAAGVKQLAD', IUPACProtein()) Or for the last ten letters: >>> print rec[-10:] ID: 1JOY Name: EnvZ Description: Homodimeric domain of EnvZ from E. coli Number of features: 0 Per letter annotation for: secondary_structure Seq('IIEQFIDYLR', IUPACProtein()) If you omit both, then you get a copy of the original record (although lacking the annotations and dbxrefs): >>> print rec[:] ID: 1JOY Name: EnvZ Description: Homodimeric domain of EnvZ from E. coli Number of features: 1 Per letter annotation for: secondary_structure Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein()) Finally, indexing with a simple integer is shorthand for pulling out that letter from the sequence directly: >>> rec[5] 'K' >>> rec.seq[5] 'K' """ if isinstance(index, int): #NOTE - The sequence level annotation like the id, name, etc #do not really apply to a single character. However, should #we try and expose any per-letter-annotation here? If so how? return self.seq[index] elif isinstance(index, slice): if self.seq is None: raise ValueError("If the sequence is None, we cannot slice it.") parent_length = len(self) answer = self.__class__(self.seq[index], id=self.id, name=self.name, description=self.description) #TODO - The desription may no longer apply. #It would be safer to change it to something #generic like "edited" or the default value. #Don't copy the annotation dict and dbxefs list, #they may not apply to a subsequence. #answer.annotations = dict(self.annotations.iteritems()) #answer.dbxrefs = self.dbxrefs[:] #TODO - Review this in light of adding SeqRecord objects? #TODO - Cope with strides by generating ambiguous locations? if index.step is None or index.step == 1: #Select relevant features, add them with shifted locations if index.start is None: start = 0 else: start = index.start if index.stop is None: stop = -1 else: stop = index.stop if (start < 0 or stop < 0) and parent_length == 0: raise ValueError, \ "Cannot support negative indices without the sequence length" if start < 0: start = parent_length + start if stop < 0: stop = parent_length + stop + 1 #assert str(self.seq)[index] == str(self.seq)[start:stop] for f in self.features: if f.ref or f.ref_db: #TODO - Implement this (with lots of tests)? import warnings warnings.warn("When slicing SeqRecord objects, any " "SeqFeature referencing other sequences (e.g. " "from segmented GenBank records) is ignored.") continue if start <= f.location.nofuzzy_start \ and f.location.nofuzzy_end <= stop: answer.features.append(f._shift(-start)) #Slice all the values to match the sliced sequence #(this should also work with strides, even negative strides): for key, value in self.letter_annotations.iteritems(): answer._per_letter_annotations[key] = value[index] return answer raise ValueError, "Invalid index" def __iter__(self): """Iterate over the letters in the sequence. For example, using Bio.SeqIO to read in a protein FASTA file: >>> from Bio import SeqIO >>> record = SeqIO.read(open("Fasta/loveliesbleeding.pro"),"fasta") >>> for amino in record: ... print amino ... if amino == "L" : break X A G L >>> print record.seq[3] L This is just a shortcut for iterating over the sequence directly: >>> for amino in record.seq: ... print amino ... if amino == "L" : break X A G L >>> print record.seq[3] L Note that this does not facilitate iteration together with any per-letter-annotation. However, you can achieve that using the python zip function on the record (or its sequence) and the relevant per-letter-annotation: >>> from Bio import SeqIO >>> rec = SeqIO.read(open("Quality/solexa_faked.fastq", "rU"), ... "fastq-solexa") >>> print rec.id, rec.seq slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN >>> print rec.letter_annotations.keys() ['solexa_quality'] >>> for nuc, qual in zip(rec,rec.letter_annotations["solexa_quality"]): ... if qual > 35: ... print nuc, qual A 40 C 39 G 38 T 37 A 36 You may agree that using zip(rec.seq, ...) is more explicit than using zip(rec, ...) as shown above. """ return iter(self.seq) def __contains__(self, char): """Implements the 'in' keyword, searches the sequence. e.g. >>> from Bio import SeqIO >>> record = SeqIO.read(open("Fasta/sweetpea.nu"), "fasta") >>> "GAATTC" in record False >>> "AAA" in record True This essentially acts as a proxy for using "in" on the sequence: >>> "GAATTC" in record.seq False >>> "AAA" in record.seq True Note that you can also use Seq objects as the query, >>> from Bio.Seq import Seq >>> from Bio.Alphabet import generic_dna >>> Seq("AAA") in record True >>> Seq("AAA", generic_dna) in record True See also the Seq object's __contains__ method. """ return char in self.seq def __str__(self): """A human readable summary of the record and its annotation (string). The python built in function str works by calling the object's ___str__ method. e.g. >>> from Bio.Seq import Seq >>> from Bio.SeqRecord import SeqRecord >>> from Bio.Alphabet import IUPAC >>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF", ... IUPAC.protein), ... id="YP_025292.1", name="HokC", ... description="toxic membrane protein, small") >>> print str(record) ID: YP_025292.1 Name: HokC Description: toxic membrane protein, small Number of features: 0 Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein()) In this example you don't actually need to call str explicity, as the print command does this automatically: >>> print record ID: YP_025292.1 Name: HokC Description: toxic membrane protein, small Number of features: 0 Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein()) Note that long sequences are shown truncated. """ lines = [] if self.id : lines.append("ID: %s" % self.id) if self.name : lines.append("Name: %s" % self.name) if self.description : lines.append("Description: %s" % self.description) if self.dbxrefs : lines.append("Database cross-references: " \ + ", ".join(self.dbxrefs)) lines.append("Number of features: %i" % len(self.features)) for a in self.annotations: lines.append("/%s=%s" % (a, str(self.annotations[a]))) if self.letter_annotations: lines.append("Per letter annotation for: " \ + ", ".join(self.letter_annotations.keys())) #Don't want to include the entire sequence, #and showing the alphabet is useful: lines.append(repr(self.seq)) return "\n".join(lines) def __repr__(self): """A concise summary of the record for debugging (string). The python built in function repr works by calling the object's ___repr__ method. e.g. >>> from Bio.Seq import Seq >>> from Bio.SeqRecord import SeqRecord >>> from Bio.Alphabet import generic_protein >>> rec = SeqRecord(Seq("MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKAT" ... +"GEMKEQTEWHRVVLFGKLAEVASEYLRKGSQVYIEGQLRTRKWTDQ" ... +"SGQDRYTTEVVVNVGGTMQMLGGRQGGGAPAGGNIGGGQPQGGWGQ" ... +"PQQPQGGNQFSGGAQSRPQQSAPAAPSNEPPMDFDDDIPF", ... generic_protein), ... id="NP_418483.1", name="b4059", ... description="ssDNA-binding protein", ... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"]) >>> print repr(rec) SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570']) At the python prompt you can also use this shorthand: >>> rec SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570']) Note that long sequences are shown truncated. Also note that any annotations, letter_annotations and features are not shown (as they would lead to a very long string). """ return self.__class__.__name__ \ + "(seq=%s, id=%s, name=%s, description=%s, dbxrefs=%s)" \ % tuple(map(repr, (self.seq, self.id, self.name, self.description, self.dbxrefs))) def format(self, format): r"""Returns the record as a string in the specified file format. The format should be a lower case string supported as an output format by Bio.SeqIO, which is used to turn the SeqRecord into a string. e.g. >>> from Bio.Seq import Seq >>> from Bio.SeqRecord import SeqRecord >>> from Bio.Alphabet import IUPAC >>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF", ... IUPAC.protein), ... id="YP_025292.1", name="HokC", ... description="toxic membrane protein") >>> record.format("fasta") '>YP_025292.1 toxic membrane protein\nMKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF\n' >>> print record.format("fasta") >YP_025292.1 toxic membrane protein MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF <BLANKLINE> The python print command automatically appends a new line, meaning in this example a blank line is shown. If you look at the string representation you can see there is a trailing new line (shown as slash n) which is important when writing to a file or if concatenating mutliple sequence strings together. Note that this method will NOT work on every possible file format supported by Bio.SeqIO (e.g. some are for multiple sequences only). """ #See also the __format__ added for Python 2.6 / 3.0, PEP 3101 #See also the Bio.Align.Generic.Alignment class and its format() return self.__format__(format) def __format__(self, format_spec): """Returns the record as a string in the specified file format. This method supports the python format() function added in Python 2.6/3.0. The format_spec should be a lower case string supported by Bio.SeqIO as an output file format. See also the SeqRecord's format() method. """ if not format_spec: #Follow python convention and default to using __str__ return str(self) from Bio import SeqIO if format_spec in SeqIO._BinaryFormats: #Return bytes on Python 3 try: #This is in Python 2.6+, but we need it on Python 3 from io import BytesIO handle = BytesIO() except ImportError: #Must be on Python 2.5 or older from StringIO import StringIO handle = StringIO() else: from StringIO import StringIO handle = StringIO() SeqIO.write(self, handle, format_spec) return handle.getvalue() def __len__(self): """Returns the length of the sequence. For example, using Bio.SeqIO to read in a FASTA nucleotide file: >>> from Bio import SeqIO >>> record = SeqIO.read(open("Fasta/sweetpea.nu"),"fasta") >>> len(record) 309 >>> len(record.seq) 309 """ return len(self.seq) def __nonzero__(self): """Returns True regardless of the length of the sequence. This behaviour is for backwards compatibility, since until the __len__ method was added, a SeqRecord always evaluated as True. Note that in comparison, a Seq object will evaluate to False if it has a zero length sequence. WARNING: The SeqRecord may in future evaluate to False when its sequence is of zero length (in order to better match the Seq object behaviour)! """ return True def __add__(self, other): """Add another sequence or string to this sequence. The other sequence can be a SeqRecord object, a Seq object (or similar, e.g. a MutableSeq) or a plain Python string. If you add a plain string or a Seq (like) object, the new SeqRecord will simply have this appended to the existing data. However, any per letter annotation will be lost: >>> from Bio import SeqIO >>> handle = open("Quality/solexa_faked.fastq", "rU") >>> record = SeqIO.read(handle, "fastq-solexa") >>> handle.close() >>> print record.id, record.seq slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN >>> print record.letter_annotations.keys() ['solexa_quality'] >>> new = record + "ACT" >>> print new.id, new.seq slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNNACT >>> print new.letter_annotations.keys() [] The new record will attempt to combine the annotation, but for any ambiguities (e.g. different names) it defaults to omitting that annotation. >>> from Bio import SeqIO >>> handle = open("GenBank/pBAD30.gb") >>> plasmid = SeqIO.read(handle, "gb") >>> handle.close() >>> print plasmid.id, len(plasmid) pBAD30 4923 Now let's cut the plasmid into two pieces, and join them back up the other way round (i.e. shift the starting point on this plasmid, have a look at the annotated features in the original file to see why this particular split point might make sense): >>> left = plasmid[:3765] >>> right = plasmid[3765:] >>> new = right + left >>> print new.id, len(new) pBAD30 4923 >>> str(new.seq) == str(right.seq + left.seq) True >>> len(new.features) == len(left.features) + len(right.features) True When we add the left and right SeqRecord objects, their annotation is all consistent, so it is all conserved in the new SeqRecord: >>> new.id == left.id == right.id == plasmid.id True >>> new.name == left.name == right.name == plasmid.name True >>> new.description == plasmid.description True >>> new.annotations == left.annotations == right.annotations True >>> new.letter_annotations == plasmid.letter_annotations True >>> new.dbxrefs == left.dbxrefs == right.dbxrefs True However, we should point out that when we sliced the SeqRecord, any annotations dictionary or dbxrefs list entries were lost. You can explicitly copy them like this: >>> new.annotations = plasmid.annotations.copy() >>> new.dbxrefs = plasmid.dbxrefs[:] """ if not isinstance(other, SeqRecord): #Assume it is a string or a Seq. #Note can't transfer any per-letter-annotations return SeqRecord(self.seq + other, id = self.id, name = self.name, description = self.description, features = self.features[:], annotations = self.annotations.copy(), dbxrefs = self.dbxrefs[:]) #Adding two SeqRecord objects... must merge annotation. answer = SeqRecord(self.seq + other.seq, features = self.features[:], dbxrefs = self.dbxrefs[:]) #Will take all the features and all the db cross refs, l = len(self) for f in other.features: answer.features.append(f._shift(l)) del l for ref in other.dbxrefs: if ref not in answer.dbxrefs: answer.dbxrefs.append(ref) #Take common id/name/description/annotation if self.id == other.id: answer.id = self.id if self.name == other.name: answer.name = self.name if self.description == other.description: answer.description = self.description for k,v in self.annotations.iteritems(): if k in other.annotations and other.annotations[k] == v: answer.annotations[k] = v #Can append matching per-letter-annotation for k,v in self.letter_annotations.iteritems(): if k in other.letter_annotations: answer.letter_annotations[k] = v + other.letter_annotations[k] return answer def __radd__(self, other): """Add another sequence or string to this sequence (from the left). This method handles adding a Seq object (or similar, e.g. MutableSeq) or a plain Python string (on the left) to a SeqRecord (on the right). See the __add__ method for more details, but for example: >>> from Bio import SeqIO >>> handle = open("Quality/solexa_faked.fastq", "rU") >>> record = SeqIO.read(handle, "fastq-solexa") >>> handle.close() >>> print record.id, record.seq slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN >>> print record.letter_annotations.keys() ['solexa_quality'] >>> new = "ACT" + record >>> print new.id, new.seq slxa_0001_1_0001_01 ACTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN >>> print new.letter_annotations.keys() [] """ if isinstance(other, SeqRecord): raise RuntimeError("This should have happened via the __add__ of " "the other SeqRecord being added!") #Assume it is a string or a Seq. #Note can't transfer any per-letter-annotations offset = len(other) return SeqRecord(other + self.seq, id = self.id, name = self.name, description = self.description, features = [f._shift(offset) for f in self.features], annotations = self.annotations.copy(), dbxrefs = self.dbxrefs[:]) def upper(self): """Returns a copy of the record with an upper case sequence. All the annotation is preserved unchanged. e.g. >>> from Bio.Alphabet import generic_dna >>> from Bio.Seq import Seq >>> from Bio.SeqRecord import SeqRecord >>> record = SeqRecord(Seq("acgtACGT", generic_dna), id="Test", ... description = "Made up for this example") >>> record.letter_annotations["phred_quality"] = [1,2,3,4,5,6,7,8] >>> print record.upper().format("fastq") @Test Made up for this example ACGTACGT + "#$%&'() <BLANKLINE> Naturally, there is a matching lower method: >>> print record.lower().format("fastq") @Test Made up for this example acgtacgt + "#$%&'() <BLANKLINE> """ return SeqRecord(self.seq.upper(), id = self.id, name = self.name, description = self.description, dbxrefs = self.dbxrefs[:], features = self.features[:], annotations = self.annotations.copy(), letter_annotations=self.letter_annotations.copy()) def lower(self): """Returns a copy of the record with a lower case sequence. All the annotation is preserved unchanged. e.g. >>> from Bio import SeqIO >>> record = SeqIO.read("Fasta/aster.pro", "fasta") >>> print record.format("fasta") >gi|3298468|dbj|BAA31520.1| SAMIPF GGHVNPAVTFGAFVGGNITLLRGIVYIIAQLLGSTVACLLLKFVTNDMAVGVFSLSAGVG VTNALVFEIVMTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI <BLANKLINE> >>> print record.lower().format("fasta") >gi|3298468|dbj|BAA31520.1| SAMIPF gghvnpavtfgafvggnitllrgivyiiaqllgstvaclllkfvtndmavgvfslsagvg vtnalvfeivmtfglvytvyataidpkkgslgtiapiaigfivgani <BLANKLINE> To take a more annotation rich example, >>> from Bio import SeqIO >>> old = SeqIO.read("EMBL/TRBG361.embl", "embl") >>> len(old.features) 3 >>> new = old.lower() >>> len(old.features) == len(new.features) True >>> old.annotations["organism"] == new.annotations["organism"] True >>> old.dbxrefs == new.dbxrefs True """ return SeqRecord(self.seq.lower(), id = self.id, name = self.name, description = self.description, dbxrefs = self.dbxrefs[:], features = self.features[:], annotations = self.annotations.copy(), letter_annotations=self.letter_annotations.copy()) def _test(): """Run the Bio.SeqRecord module's doctests (PRIVATE). This will try and locate the unit tests directory, and run the doctests from there in order that the relative paths used in the examples work. """ import doctest import os if os.path.isdir(os.path.join("..","Tests")): print "Runing doctests..." cur_dir = os.path.abspath(os.curdir) os.chdir(os.path.join("..","Tests")) doctest.testmod() os.chdir(cur_dir) del cur_dir print "Done" elif os.path.isdir(os.path.join("Tests")) : print "Runing doctests..." cur_dir = os.path.abspath(os.curdir) os.chdir(os.path.join("Tests")) doctest.testmod() os.chdir(cur_dir) del cur_dir print "Done" if __name__ == "__main__": _test()
gpl-2.0
Muges/audiotsm
audiotsm/utils/windows.py
1
1667
# -*- coding: utf-8 -*- """ The :mod:`audiotsm.utils.windows` module contains window functions used for digital signal processing. """ import numpy as np def apply(buffer, window): """Applies a window to a buffer. :param buffer: a matrix of shape (``m``, ``n``), with ``m`` the number of channels and ``n`` the length of the buffer. :type buffer: :class:`numpy.ndarray` :param window: a :class:`numpy.ndarray` of shape (``n``,). """ if window is None: return for channel in buffer: channel *= window def hanning(length): """Returns a periodic Hanning window. Contrary to :func:`numpy.hanning`, which returns the symetric Hanning window, :func:`hanning` returns a periodic Hanning window, which is better for spectral analysis. :param length: the number of points of the Hanning window :type length: :class:`int` :return: the window as a :class:`numpy.ndarray` of shape (``length``,). """ if length <= 0: return np.zeros(0) time = np.arange(length) return 0.5 * (1 - np.cos(2 * np.pi * time / length)) def product(window1, window2): """Returns the product of two windows. :param window1: a :class:`numpy.ndarray` of shape (``n``,) or ``None``. :param window2: a :class:`numpy.ndarray` of shape (``n``,) or ``None``. :returns: the product of the two windows. If one of the windows is equal to ``None``, the other is returned, and if the two are equal to ``None``, ``None`` is returned. """ if window1 is None: return window2 if window2 is None: return window1 return window1 * window2
mit
xiangel/hue
desktop/core/src/desktop/lib/conf.py
11
24263
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The application configuration framework. The user of the framework uses * Config - These correspond to an individual key-value pair in the conf file. For example, [ app_name ] my_property = "foo" is represented by this Config variable: MY_PROPERTY = Config(key='my_property', default='my_default', type=str, help='blah') val = MY_PROPERTY.get() * ConfigSection - A ConfigSection corresponds to a section in the conf file. For example, in [ app_name ] [[ section_a ]] a_property = "bar" both "app_name" and "section_a" are ConfigSection's. A section in which all children are known beforehand can be represented by: SECTION_A = ConfigSection( key='section_a', help='blah', members=dict(a_prop=Config(key='a_property', required=True))) a_val = SECTION_A.a_prop.get() * UnspecifiedConfigSection - An UnspecifiedConfigSection corresponds to a section that has the same type of children. But its exact members are not know beforehand. For example, [[ filesystems ]] [[[ cluster_1 ]]] namenode_host = localhost # User may define more: # [[[ cluster_2 ]]] # namenode_host = 10.0.0.1 would be represented by: FS = UnspecifiedConfigSection( key='filesystems', each=ConfigSection(members=dict( nn_host=Config(key='namenode_host', required=True)) all_clusters = FS.keys() for x in all_clusters: val = FS['x'].nn_host.get() You _MUST_ define all Config, ConfigSection and UnspecifiedConfigSection objects in your application's conf.py. During startup, Desktop binds configuration files to your config variables. """ # The Config object unfortunately has a kwarg called "type", and everybody is # using it. So instead of breaking compatibility, we make a "pytype" alias. pytype = type from django.utils.encoding import smart_str from desktop.lib.paths import get_desktop_root, get_build_dir import configobj import json import logging import os import textwrap import re import sys # Magical object for use as a "symbol" _ANONYMOUS = ("_ANONYMOUS") # Supported thrift transports SUPPORTED_THRIFT_TRANSPORTS = ('buffered', 'framed') # a BoundContainer(BoundConfig) object which has all of the application's configs as members GLOBAL_CONFIG = None LOG = logging.getLogger(__name__) __all__ = ["UnspecifiedConfigSection", "ConfigSection", "Config", "load_confs", "coerce_bool", "coerce_csv", "coerce_json_dict"] class BoundConfig(object): def __init__(self, config, bind_to, grab_key=_ANONYMOUS, prefix=''): """ A Config object that has been bound to specific data. @param config The config that is bound - must handle get_value @param bind_to The data it is bound to - must support subscripting @param grab_key The key in bind_to in which to expect this configuration @param prefix The prefix in the config tree leading to this configuration """ self.config = config self.bind_to = bind_to self.grab_key = grab_key # The prefix of a config is the path leading to the config, including section and subsections # along the way, e.g. hadoop.filesystems.cluster_1. # # The prefix is recorded in BoundConfig only, because section names can change dynamically for # UnspecifiedConfigSection's, which have children with _ANONYMOUS keys. If our config is # _ANONYMOUS, this `prefix' includes the prefix plus the actual key name. self.prefix = prefix def get_fully_qualifying_key(self): """Returns the full key name, in the form of section[.subsection[...]].key""" res = self.prefix if self.config.key is not _ANONYMOUS: res += self.prefix and '.' + self.config.key or self.config.key return res def _get_data_and_presence(self): """ Returns a tuple (data, present). 'present' is whether the data was found in self.bind_to 'data' is the data itself, or None whenever present is False """ if self.grab_key is not _ANONYMOUS: present = self.grab_key in self.bind_to data = self.bind_to.get(self.grab_key) else: present = True data = self.bind_to return data, present def get(self): """Get the data, or its default value.""" data, present = self._get_data_and_presence() return self.config.get_value(data, present=present, prefix=self.prefix, coerce_type=True) def get_raw(self): """Get raw config value. This maybe a non-string or non-iterable object.""" data, present = self._get_data_and_presence() return self.config.get_value(data, present=present, prefix=self.prefix, coerce_type=False) def set_for_testing(self, data=None, present=True): """ This temporarily sets this configuration's value to data (or, if present=False, to the default value). This returns a lambda which should be executed when the testing phase is done. Note that self is a new object at every access, but self.bind_to is shared, so we can modify that. """ def set_data_presence(data, presence): self.bind_to[self.grab_key] = data if not presence: del self.bind_to[self.grab_key] assert self.grab_key is not _ANONYMOUS # TODO(todd) really? old_data = self.bind_to.get(self.grab_key) old_presence = self.grab_key in self.bind_to set_data_presence(data, present) return (lambda: set_data_presence(old_data, old_presence)) def validate(self): self.config.validate(self.bind_to) def print_help(self, *args, **kwargs): self.config.print_help(*args, **kwargs) def __repr__(self): return repr("%s(config=%s, bind_to=%s, grab_key=%s)" % (str(self.__class__), repr(self.config), repr(self.bind_to), repr(self.grab_key))) class Config(object): def __init__(self, key=_ANONYMOUS, default=None, dynamic_default=None, required=False, help=None, type=str, private=False): """ Initialize a new Configurable variable. @param key the configuration key (eg "filebrowser.foo") @param default the default value @param dynamic_default a lambda to use to calculate the default @param required whether this must be set @param help some text to print out for help @param type a callable that coerces a string into the expected type. str is the default. Should raise an exception in the case that it cannot be coerced. @param private if True, does not emit help text """ if not callable(type): raise ValueError("%s: The type argument '%s()' is not callable" % (key, type)) if default is not None and dynamic_default is not None: raise ValueError("Cannot specify both dynamic_default and default for key %s" % key) if dynamic_default is not None and not dynamic_default.__doc__ and not private: raise ValueError("Dynamic default '%s' must have __doc__ defined!" % (key,)) if pytype(default) in (int, long, float, complex, bool) and \ not isinstance(type(default), pytype(default)): raise ValueError("%s: '%s' does not match that of the default value %r (%s)" % (key, type, default, pytype(default))) if type == bool: LOG.warn("%s is of type bool. Resetting it as type 'coerce_bool'." " Please fix it permanently" % (key,)) type = coerce_bool self.key = key self.default_value = default self.dynamic_default = dynamic_default self.required = required self.help = help self.type = type self.private = private # It makes no sense to be required if you have a default, # since you'll never throw the "not set" error. assert not (self.required and self.default), \ "Config cannot be required if it has a default." def bind(self, conf, prefix): """Rather than doing the lookup now and assigning self.value or something, this binding creates a new object. This is because, for a given Config object, it might need to be bound to different parts of a configuration tree. For example, if a "host" Config object is under an UnspecifiedConfigSection it will be end up applying to each subsection. We therefore bind it multiple times, once to each subsection. """ return BoundConfig(config=self, bind_to=conf, grab_key=self.key, prefix=prefix) def get_value(self, val, present, prefix=None, coerce_type=True): """ Return the value for this configuration variable from the currently loaded configuration. @throws KeyError if it is required but not set. @throws ValueError if it does not validate correctly. """ if self.required and not present: raise KeyError("Configuration key %s not in configuration!" % self.key) if present: raw_val = val else: raw_val = self.default if coerce_type: return self._coerce_type(raw_val, prefix) else: return raw_val def validate(self, source): """ Raise an exception if this configuration value is missing but required, or of the incorrect type. """ # Getting the value will raise an exception if it's in bad form. val = source.get(self.key, None) present = val is not None _ = self.get_value(val, present) def _coerce_type(self, raw, _): """ Coerces the value in 'raw' to the correct type, based on self.type """ if raw is None: return raw return self.type(raw) def print_help(self, out=sys.stdout, indent=0): """ Print out a help string for this configuration object to the specified output stream. @param indent the number of spaces to indent all text by """ if self.private: return indent_str = indent * " " if self.required: req_kw = "required" else: req_kw = "optional" print >>out, indent_str + "Key: %s (%s)" % (self.get_presentable_key(), req_kw) if self.default_value: print >>out, indent_str + " Default: %s" % repr(self.default) elif self.dynamic_default: print >>out, indent_str + " Dynamic default: %s" % self.dynamic_default.__doc__.strip() print >>out, self.get_presentable_help_text(indent=indent) print >>out def get_presentable_help_text(self, indent=0): indent_str = " " * indent help = self.help or "[no help text provided]" help = textwrap.fill(help, initial_indent=(indent_str + " "), subsequent_indent=(indent_str + " ")) return help def get_presentable_key(self): if self.key is _ANONYMOUS: return "<user specified name>" # TODO(todd) add "metavar" like optparse else: return self.key @property def default(self): if self.dynamic_default is not None: return self.dynamic_default() return self.default_value class BoundContainer(BoundConfig): """Binds a ConfigSection to actual data.""" def __contains__(self, item): return self.get().__contains__(item) def __iter__(self): return self.get().__iter__() def __len__(self): return len(self.get()) def get_data_dict(self): data, present = self._get_data_and_presence() if present: return data else: assert self.grab_key is not _ANONYMOUS return self.bind_to.setdefault(self.grab_key, {}) def keys(self): return self.get_data_dict().keys() class BoundContainerWithGetAttr(BoundContainer): """ A configuration bound to a data container where we expect the user to use getattr syntax (container.FOO) to access the members. This is used by ConfigSection """ def __getattr__(self, attr): return self.config.get_member(self.get_data_dict(), attr, self.prefix) class BoundContainerWithGetItem(BoundContainer): """ A configuration bound to a data container where we expect the user to use [...] syntax to access the members. This is used for UnspecifiedConfigSection """ def __getitem__(self, attr): if attr in self.__dict__: return self.__dict__[attr] return self.config.get_member(self.get_data_dict(), attr, self.prefix) class ConfigSection(Config): """ A section of configuration variables whose names are known a priori. For example, this can be used to group configuration for a cluster. """ def __init__(self, key=_ANONYMOUS, members=None, **kwargs): """Initializes a ConfigSection @param members a dictionary whose keys are the attributes by which the members are accessed. For example: members=dict(FOO=Config(...)) means that you will access this configuration as section.FOO.get() """ super(ConfigSection, self).__init__(key, default={}, **kwargs) self.members = members or {} for member in members.itervalues(): assert member.key is not _ANONYMOUS def update_members(self, new_members, overwrite=True): """ Add the new_members to this ConfigSection. @param new_members A dictionary of {key=Config(...), key2=Config(...)}. @param overwrite Whether to overwrite the current member on key conflict. """ for member in new_members.itervalues(): assert member.key is not _ANONYMOUS if not overwrite: new_members = new_members.copy() for k in self.members.iterkeys(): if new_members.has_key(k): del new_members[k] self.members.update(new_members) def bind(self, config, prefix): return BoundContainerWithGetAttr(self, bind_to=config, grab_key=self.key, prefix=prefix) def _coerce_type(self, raw, prefix=''): """ Materialize this section as a dictionary. The keys are those specified in the members dict, and the values are bound configuration parameters. """ return dict([(key, self.get_member(raw, key, prefix)) for key in self.members.iterkeys()]) def get_member(self, data, attr, prefix): if self.key is not _ANONYMOUS: prefix += prefix and '.' + self.key or self.key return self.members[attr].bind(data, prefix) def print_help(self, out=sys.stdout, indent=0, skip_header=False): if self.private: return if not skip_header: print >>out, (" " * indent) + "[%s]" % self.get_presentable_key() print >>out, self.get_presentable_help_text(indent=indent) print >>out new_indent = indent + 2 else: new_indent = indent # We sort the configuration for canonicalization. for programmer_key, config in sorted(self.members.iteritems(), key=lambda x: x[1].key): config.print_help(out=out, indent=new_indent) class UnspecifiedConfigSection(Config): """ A special Config that maps a section name to a list of anonymous subsections. The subsections are anonymous in the sense that their names are unknown beforehand, but all have the same structure. For example, this can be used for a [clusters] section which expects some number of [[cluster]] sections underneath it. This class is NOT a ConfigSection, although it supports get_member(). The key difference is that its get_member() returns a BoundConfig with: (1) an anonymous ConfigSection, (2) an anonymous grab_key, and (3) a `prefix' containing the prefix plus the actual key name. """ def __init__(self, key=_ANONYMOUS, each=None, **kwargs): super(UnspecifiedConfigSection, self).__init__(key, default={}, **kwargs) assert each.key is _ANONYMOUS self.each = each # `each' is a ConfigSection def bind(self, config, prefix): return BoundContainerWithGetItem(self, bind_to=config, grab_key=self.key, prefix=prefix) def _coerce_type(self, raw, prefix=''): """ Materialize this section as a dictionary. The keys are the keys specified by the user in the config file. """ return dict([(key, self.get_member(raw, key, prefix)) for key in raw.iterkeys()]) def get_member(self, data, attr, prefix=''): tail = self.key + '.' + attr child_prefix = prefix child_prefix += prefix and '.' + tail or tail return self.each.bind(data[attr], child_prefix) def print_help(self, out=sys.stdout, indent=0): indent_str = " " * indent print >>out, indent_str + "[%s]" % self.get_presentable_key() print >>out, self.get_presentable_help_text(indent=indent) print >>out print >>out, indent_str + " Consists of some number of sections like:" self.each.print_help(out=out, indent=indent+2) def _configs_from_dir(conf_dir): """ Generator to load configurations from a directory. This will only load files that end in .ini """ for filename in sorted(os.listdir(conf_dir)): if filename.startswith(".") or not filename.endswith('.ini'): continue LOG.debug("Loading configuration from: %s" % filename) try: conf = configobj.ConfigObj(os.path.join(conf_dir, filename)) except configobj.ConfigObjError, ex: LOG.error("Error in configuration file '%s': %s" % (os.path.join(conf_dir, filename), ex)) raise conf['DEFAULT'] = dict(desktop_root=get_desktop_root(), build_dir=get_build_dir()) yield conf def load_confs(conf_source=None): """Loads and merges all of the configurations passed in, returning a ConfigObj for the result. @param conf_source if not specified, reads conf/ from desktop/conf/. Otherwise should be a generator of ConfigObjs """ if conf_source is None: conf_source = _configs_from_dir(get_desktop_root("conf")) conf = configobj.ConfigObj() for in_conf in conf_source: conf.merge(in_conf) return conf def _bind_module_members(module, data, section): """ Bind all Config instances found inside the given module to the given data. Returns the dict of unbound configs. """ members = {} for key, val in module.__dict__.iteritems(): if not isinstance(val, Config): continue members[key] = val module.__dict__[key] = val.bind(data, prefix=section) return members def bind_module_config(mod, conf_data, config_key): """Binds the configuration for the module to the given data. conf_data is a dict-like structure in which the configuration data has been loaded. The configuration for this module should be inside a section which is named as follows: - if the name of the module is foo.conf, it should be a section [foo] - if the name of the module is bar, it should be a section [bar] - if the module has a CONFIGURATION_SECTION attribute, that attribute should be a string, and determines the section name. config_key is the key that should map to the configuration. It's used to allow renaming of configurations. For example, for the module "hello.world.conf", type(conf_data['hello.world']) should be dict-like and contain the configuration for the hello.world module. Note that this mutates the contents of the module - any Config instances will be changed into BoundConfig instances such that you can call .get() on them. """ if hasattr(mod, "CONFIGURATION_SECTION"): section = mod.CONFIGURATION_SECTION elif mod.__name__.endswith(".conf"): section = mod.__name__[:-len(".conf")] else: section = mod.__name__ if config_key is None: bind_data = conf_data.get(section, {}) else: section = config_key bind_data = conf_data.get(config_key, {}) members = _bind_module_members(mod, bind_data, section) return ConfigSection(section, members=members, help=mod.__doc__) def initialize(modules, config_dir): """ Set up the GLOBAL_CONFIG variable by loading all configuration variables from the given module list. Repeated initialization updates GLOBAL_CONFIG with the configuration from the new module list. """ global GLOBAL_CONFIG # Import confs conf_data = load_confs(_configs_from_dir(config_dir)) sections = {} for module in modules: section = bind_module_config(module['module'], conf_data, module['config_key']) sections[section.key] = section GLOBAL_HELP = "(root of all configuration)" if GLOBAL_CONFIG is None: GLOBAL_CONFIG = ConfigSection(members=sections, help=GLOBAL_HELP).bind(conf_data, prefix='') else: new_config = ConfigSection(members=sections, help=GLOBAL_HELP) new_config.update_members(GLOBAL_CONFIG.config.members, overwrite=False) conf_data.merge(GLOBAL_CONFIG.bind_to) GLOBAL_CONFIG = new_config.bind(conf_data, prefix='') return def is_anonymous(key): return key == _ANONYMOUS def coerce_str_lowercase(value): return smart_str(value).lower() def coerce_bool(value): if isinstance(value, bool): return value if isinstance(value, basestring): upper = value.upper() else: upper = value if upper in ("FALSE", "0", "NO", "OFF", "NAY", "", None): return False if upper in ("TRUE", "1", "YES", "ON", "YEA"): return True raise Exception("Could not coerce %r to boolean value" % (value,)) def coerce_string(value): if type(value) == list: return ','.join(value) else: return value def coerce_csv(value): if isinstance(value, str): return value.split(',') elif isinstance(value, list): return value raise Exception("Could not coerce %r to csv array." % value) def coerce_json_dict(value): if isinstance(value, basestring): return json.loads(value) elif isinstance(value, dict): return value raise Exception("Could not coerce %r to json dictionary." % value) def list_of_compiled_res(skip_empty=False): def fn(list_of_strings): if isinstance(list_of_strings, basestring): list_of_strings = list_of_strings.split(',') list_of_strings = filter(lambda string: string if skip_empty else True, list_of_strings) return list(re.compile(x) for x in list_of_strings) return fn def validate_path(confvar, is_dir=None, fs=os.path, message='Path does not exist on the filesystem.'): """ Validate that the value of confvar is an existent path. @param confvar The configuration variable. @param is_dir True/False would verify that the path is/isn't a directory. None to disable check. @return [(confvar, error_msg)] or [] """ path = confvar.get() if path is None or not fs.exists(path): return [(confvar, message)] if is_dir is not None: if is_dir: if not fs.isdir(path): return [(confvar, 'Not a directory.')] elif not fs.isfile(path): return [(confvar, 'Not a file.')] return [ ] def validate_port(confvar): """ Validate that the value of confvar is between [0, 65535]. Returns [(confvar, error_msg)] or [] """ port_val = confvar.get() error_res = [(confvar, 'Port should be an integer between 0 and 65535 (inclusive).')] try: port = int(port_val) if port < 0 or port > 65535: return error_res except ValueError: return error_res return [ ] def validate_thrift_transport(confvar): """ Validate that the provided thrift transport is supported. Returns [(confvar, error_msg)] or [] """ transport = confvar.get() error_res = [(confvar, 'Thrift transport %s not supported. Please choose a supported transport: %s' % (transport, ', '.join(SUPPORTED_THRIFT_TRANSPORTS)))] if transport not in SUPPORTED_THRIFT_TRANSPORTS: return error_res return []
apache-2.0
rgscherf/gainful2
parsing/parsinglib/org_handler.py
1
10424
# import shutil # import PyPDF2 # import json # import os import requests import re import dateutil.parser as d from datetime import date, timedelta from bs4 import BeautifulSoup from itertools import dropwhile from .jobcontainer import JobContainer from .org_urls import urls from .utils_icims import get_icims_jobs from .utils_brainhunter import parse_brainhunter_job_table, parse_brainhunter_detail_page, brainhunter_extract_salary, brainhunter_detail_page_exception class Organization(): def __init__(self, org_name): request_url = urls[org_name] r = requests.get(request_url) self.soup = BeautifulSoup(r.text, "html5lib") def parse(self, soup): raise NotImplementedError class PeelRegion(Organization): def __init__(self): Organization.__init__(self, "peel_region") def parse(self): get_icims_jobs("GTA - Peel", "Peel Region", self.soup) class Mississauga(Organization): def __init__(self): Organization.__init__(self, "mississauga") def parse(self): get_icims_jobs("GTA - Peel", "Mississauga", self.soup) class Brampton(Organization): # brampton works, but does not capture closing dates for seasonal vacancies # (these dates are written inline on the page) def __init__(self): Organization.__init__(self, "brampton") def parse(self): get_icims_jobs("GTA - Peel", "Brampton", self.soup) class YorkRegion(Organization): def __init__(self): Organization.__init__(self, "york_region") def make_url(self, cs): """ there's a bunch of junk in these job urls including an expiring token. Luckily, we can chop out the token and the server will insert one for us on request. (saving the chopped URL ensures we'll properly detect uniques in the DB) """ tempurl = cs[1].a["href"] tempurl = tempurl.split("clid=")[1].split("&BRID=")[0] url = "http://clients.njoyn.com/cl2/xweb/Xweb.asp?clid={}".format( tempurl) return url def parse_detail_page(self, job): def stringops_removechar(char, string): if char in string: return "".join(filter(lambda a: a != char, string)) else: return string req = requests.get(job.url_detail) soup = BeautifulSoup(req.text, "html5lib") try: info_table = soup.find("table").find_all("tr") except AttributeError: return # search header table for basic info for r in info_table: cells = r.find_all("td") field_name = cells[0].text try: val = cells[1].text.strip() except IndexError: continue if "Department" in field_name: if " Department" in val: val = val.split(" Department")[0] job.division = val elif "Date Posted" in field_name: job.date_posted = d.parse(val) elif "Date Closing" in field_name: job.date_closing = d.parse(val) if not job.date_closing: job.date_closing = date.today() + timedelta(weeks=3) # now searching the body text for salary information # I think we only incur regex expense when we compile # so iteration is not too too bad. body = soup.find_all("p") ex = re.compile(r"\$[0-9]*(.|,)[0-9]*") for p in body: text = p.text result = ex.search(text) if result: result = result.group(0)[1:] result = stringops_removechar(",", result) job.salary_amount = float(result) else: pass def parse(self): t = self.soup.find(id="searchtable").find_all("tr")[1:] for r in t: job = JobContainer() cols = r.find_all("td") job.url_detail = self.make_url(cols) if not job.is_unique(): continue job.region = "GTA - York" job.organization = "York Region" job.title = cols[1].text self.parse_detail_page(job) job.save() class Toronto(Organization): def __init__(self): Organization.__init__(self, "toronto") def parse(self): detail_dict = {"division": "Division", "date_posted": "Posting Date", "date_closing": "Closing Date", "salary_amount": "Salary/Rate" } for j in parse_brainhunter_job_table(self.soup): j.region = "GTA - Toronto" j.organization = "Toronto" try: parse_brainhunter_detail_page(detail_dict, j) except KeyError as e: brainhunter_detail_page_exception(j, e) class Markham(Organization): def __init__(self): Organization.__init__(self, "markham") def parse(self): detail_dict = {"division": "Department", "date_posted": "Posting Date", "date_closing": "Expiry Date", "salary_amount": "Salary/Rate" } for j in parse_brainhunter_job_table(self.soup): j.region = "GTA - York" j.organization = "Markham" parse_brainhunter_detail_page(detail_dict, j) class Halton(Organization): def __init__(self): Organization.__init__(self, "halton") def parse(self): rows = self.soup.find(class_="List").find_all("tr")[2:] for r in rows: job = JobContainer() cols = r.find_all("td") try: job.url_detail = "http://webaps.halton.ca/about/jobs/" + \ cols[0].a["href"] except TypeError: return if not job.is_unique(): continue job.organization = "Halton Region" job.region = "GTA - Halton" job.title = cols[0].text.strip() job.division = cols[1].text.strip() self.parse_detail_page(job) job.save() def parse_detail_page(self, job): r = requests.get(job.url_detail) soup = BeautifulSoup(r.text, "html5lib") rows = soup.find_all("tr") for r in rows: cols = r.find_all("td") cols = list( dropwhile(lambda x: x.text.strip().lower() == "", cols)) if len(cols) < 2: continue field = cols[0].text.strip().lower() val = cols[1].text.strip() if field == "salary range:": job.salary_amount = brainhunter_extract_salary(val) elif field == "posted:": job.date_posted = d.parse(val).date() elif "posting ex" in field: job.date_closing = d.parse(val).date() class Burlington(Organization): def __init__(self): Organization.__init__(self, "burlington") def parse(self): rows = self.soup.find(class_="TPListTbl").find_all("tr")[1:] for r in rows: cols = r.find_all("td") job = JobContainer() job.url_detail = "http://careers2.hiredesk.net" + cols[0].a["href"] if not job.is_unique(): continue job.title = cols[0].text.strip() job.region = "GTA - Halton" job.organization = "Burlington" job.date_posted = date.today() job.date_closing = d.parse(cols[4].text.strip()).date() self.parse_detail_page(job) job.save() def parse_detail_page(self, job): """ get job's department and salary """ r = requests.get(job.url_detail) soup = BeautifulSoup(r.text, "html5lib") rows = soup.find(class_="FormContent").find_all("tr") for r in rows: cols = r.find_all("td") if len(cols) < 2: continue field = cols[0].text.strip().lower() val = cols[1].text.strip() if field == "department": job.division = val elif field == "salary range" or field == "hourly rate": job.salary_amount = brainhunter_extract_salary(val) if job.division == None: job.division = "" class Oakville(Organization): def __init__(self): Organization.__init__(self, "oakville") def parse(self): def trim_url(url): if "jsessionid" in url: spl = url.split(";jsessionid=") fst = spl[0] snd = "".join(spl).split("?org=")[1] return fst + "?org=" + snd else: raise IndexError("malformed Oakville URL") rows = self.soup.find(id="cws-search-results").find_all("tr")[1:] for r in rows: job = JobContainer() cols = r.find_all("td") job.url_detail = trim_url(cols[0].a["href"]) if not job.is_unique(): continue job.region = "GTA - Halton" job.organization = "Oakville" job.title = cols[0].text.strip() job.date_posted = d.parse( cols[2].text.strip(), dayfirst=True).date() job.date_closing = d.parse( cols[3].text.strip(), dayfirst=True).date() self.parse_detail_page(job) job.save() def parse_detail_page(self, job): r = requests.get(job.url_detail) soup = BeautifulSoup(r.text, "html.parser") # html5lib failed here.. rows = soup.find(id="taleoContent").table.find_all("tr")[4:] for r in rows: cols = r.find_all("td") if cols[0].text.strip().lower() == "department:": job.division = cols[1].text.strip() elif cols[0].text.strip().lower() == "location:": sal = cols[4].text.strip() try: job.salary_amount = brainhunter_extract_salary(sal) except IndexError: job.salary_amount = 0 # the main parse util calls find_jobs to kick off web scraping. # make sure current_orgs is always up to date. current_orgs = [Oakville(), Burlington(), Halton(), Markham(), YorkRegion(), Brampton(), PeelRegion(), Mississauga(), Toronto() ] def find_jobs(): for o in current_orgs: o.parse()
mit
openhatch/new-mini-tasks
vendor/packages/Django/django/utils/importlib.py
124
1228
# Taken from Python 2.7 with permission from/by the original author. import sys def _resolve_name(name, package, level): """Return the absolute name of the module to be imported.""" if not hasattr(package, 'rindex'): raise ValueError("'package' not set to a string") dot = len(package) for x in range(level, 1, -1): try: dot = package.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") return "%s.%s" % (package[:dot], name) def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ if name.startswith('.'): if not package: raise TypeError("relative imports require the 'package' argument") level = 0 for character in name: if character != '.': break level += 1 name = _resolve_name(name[level:], package, level) __import__(name) return sys.modules[name]
apache-2.0
bear/gae-deploy-test
lib/flask/templating.py
783
4707
# -*- coding: utf-8 -*- """ flask.templating ~~~~~~~~~~~~~~~~ Implements the bridge to Jinja2. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import posixpath from jinja2 import BaseLoader, Environment as BaseEnvironment, \ TemplateNotFound from .globals import _request_ctx_stack, _app_ctx_stack from .signals import template_rendered from .module import blueprint_is_module from ._compat import itervalues, iteritems def _default_template_ctx_processor(): """Default template context processor. Injects `request`, `session` and `g`. """ reqctx = _request_ctx_stack.top appctx = _app_ctx_stack.top rv = {} if appctx is not None: rv['g'] = appctx.g if reqctx is not None: rv['request'] = reqctx.request rv['session'] = reqctx.session return rv class Environment(BaseEnvironment): """Works like a regular Jinja2 environment but has some additional knowledge of how Flask's blueprint works so that it can prepend the name of the blueprint to referenced templates if necessary. """ def __init__(self, app, **options): if 'loader' not in options: options['loader'] = app.create_global_jinja_loader() BaseEnvironment.__init__(self, **options) self.app = app class DispatchingJinjaLoader(BaseLoader): """A loader that looks for templates in the application and all the blueprint folders. """ def __init__(self, app): self.app = app def get_source(self, environment, template): for loader, local_name in self._iter_loaders(template): try: return loader.get_source(environment, local_name) except TemplateNotFound: pass raise TemplateNotFound(template) def _iter_loaders(self, template): loader = self.app.jinja_loader if loader is not None: yield loader, template # old style module based loaders in case we are dealing with a # blueprint that is an old style module try: module, local_name = posixpath.normpath(template).split('/', 1) blueprint = self.app.blueprints[module] if blueprint_is_module(blueprint): loader = blueprint.jinja_loader if loader is not None: yield loader, local_name except (ValueError, KeyError): pass for blueprint in itervalues(self.app.blueprints): if blueprint_is_module(blueprint): continue loader = blueprint.jinja_loader if loader is not None: yield loader, template def list_templates(self): result = set() loader = self.app.jinja_loader if loader is not None: result.update(loader.list_templates()) for name, blueprint in iteritems(self.app.blueprints): loader = blueprint.jinja_loader if loader is not None: for template in loader.list_templates(): prefix = '' if blueprint_is_module(blueprint): prefix = name + '/' result.add(prefix + template) return list(result) def _render(template, context, app): """Renders the template and fires the signal""" rv = template.render(context) template_rendered.send(app, template=template, context=context) return rv def render_template(template_name_or_list, **context): """Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), context, ctx.app) def render_template_string(source, **context): """Renders a template from the given template source string with the given context. :param source: the sourcecode of the template to be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.from_string(source), context, ctx.app)
apache-2.0
rupertsmall/neon
neon/visualizations/data.py
12
3652
# ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- import h5py import numpy as np def create_minibatch_x(minibatches, minibatch_markers, epoch_axis): """ Helper function to build x axis for data captured per minibatch Arguments: minibatches (int): how many total minibatches minibatch_markers (int array): cumulative number of minibatches complete at a given epoch epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis """ if epoch_axis: x = np.zeros((minibatches,)) last_e = 0 for e_idx, e in enumerate(minibatch_markers): e_minibatches = e - last_e x[last_e:e] = e_idx + (np.arange(float(e_minibatches))/e_minibatches) last_e = e else: x = np.arange(minibatches) return x def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis): """ Helper function to build x axis for points captured per epoch Arguments: points (int): how many data points need a corresponding x axis points epoch_freq (int): are points once an epoch or once every n epochs? minibatch_markers (int array): cumulative number of minibatches complete at a given epoch epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis """ if epoch_axis: x = np.zeros((points,)) last_e = 0 for e_idx, e in enumerate(minibatch_markers): e_minibatches = e - last_e if (e_idx + 1) % epoch_freq == 0: x[e_idx/epoch_freq] = e_idx + (e_minibatches - 1) / e_minibatches last_e = e else: x = minibatch_markers[(epoch_freq-1)::epoch_freq] - 1 return x def h5_cost_data(filename, epoch_axis=True): """ Read cost data from hdf5 file. Generate x axis data for each cost line. Returns: list of tuples of (name, x data, y data) """ ret = list() with h5py.File(filename, "r") as f: config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']] total_epochs = config.attrs['total_epochs'] total_minibatches = config.attrs['total_minibatches'] minibatch_markers = time_markers['minibatch'] for name, ydata in cost.iteritems(): y = ydata[...] if ydata.attrs['time_markers'] == 'epoch_freq': y_epoch_freq = ydata.attrs['epoch_freq'] assert len(y) == total_epochs / y_epoch_freq x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis) elif ydata.attrs['time_markers'] == 'minibatch': assert len(y) == total_minibatches x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis) else: raise TypeError('Unsupported data format for h5_cost_data') ret.append((name, x, y)) return ret
apache-2.0
kalvdans/scipy
scipy/sparse/linalg/isolve/setup.py
108
1408
#!/usr/bin/env python from __future__ import division, print_function, absolute_import from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.system_info import get_info, NotFoundError from numpy.distutils.misc_util import Configuration from scipy._build_utils import get_g77_abi_wrappers config = Configuration('isolve',parent_package,top_path) lapack_opt = get_info('lapack_opt') if not lapack_opt: raise NotFoundError('no lapack/blas resources found') # iterative methods methods = ['BiCGREVCOM.f.src', 'BiCGSTABREVCOM.f.src', 'CGREVCOM.f.src', 'CGSREVCOM.f.src', # 'ChebyREVCOM.f.src', 'GMRESREVCOM.f.src', # 'JacobiREVCOM.f.src', 'QMRREVCOM.f.src', # 'SORREVCOM.f.src' ] Util = ['STOPTEST2.f.src','getbreak.f.src'] sources = Util + methods + ['_iterative.pyf.src'] sources = [join('iterative', x) for x in sources] sources += get_g77_abi_wrappers(lapack_opt) config.add_extension('_iterative', sources=sources, extra_info=lapack_opt) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
ItsCalebJones/SpaceLaunchNow-Server
api/forms/admin_forms.py
1
2709
from django import forms from api.models import * class LaunchForm(forms.ModelForm): holdreason = forms.CharField(widget=forms.Textarea, required=False) failreason = forms.CharField(widget=forms.Textarea, required=False) class Meta: model = Launch fields = '__all__' class LandingForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Landing fields = '__all__' class LauncherForm(forms.ModelForm): details = forms.CharField(widget=forms.Textarea) class Meta: model = Launcher fields = '__all__' class PayloadForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Payload fields = '__all__' class MissionForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Mission fields = '__all__' class MissionForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Mission fields = '__all__' class EventsForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Events fields = '__all__' class LauncherConfigForm(forms.ModelForm): librarian_notes = forms.CharField(widget=forms.Textarea, required=False) description = forms.CharField(widget=forms.Textarea) class Meta: model = LauncherConfig fields = '__all__' class OrbiterForm(forms.ModelForm): history = forms.CharField(widget=forms.Textarea) details = forms.CharField(widget=forms.Textarea) capability = forms.CharField(widget=forms.Textarea) flight_life = forms.CharField(widget=forms.Textarea) class Meta: model = SpacecraftConfiguration fields = '__all__' class AgencyForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Agency fields = '__all__' class AstronautForm(forms.ModelForm): bio = forms.CharField(widget=forms.Textarea) class Meta: model = Astronaut fields = '__all__' class SpacecraftForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = Spacecraft fields = '__all__' class SpacecraftFlightForm(forms.ModelForm): destination = forms.CharField(widget=forms.Textarea) class Meta: model = SpacecraftFlight fields = '__all__' class SpaceStationForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea) class Meta: model = SpaceStation fields = '__all__'
apache-2.0
dreamsxin/kbengine
kbe/res/scripts/common/Lib/test/test_pdb.py
60
33443
# A test suite for pdb; not very comprehensive at the moment. import doctest import pdb import sys import types import unittest import subprocess import textwrap from test import support # This little helper class is essential for testing pdb under doctest. from test.test_doctest import _FakeInput class PdbTestInput(object): """Context manager that makes testing Pdb in doctests easier.""" def __init__(self, input): self.input = input def __enter__(self): self.real_stdin = sys.stdin sys.stdin = _FakeInput(self.input) self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None def __exit__(self, *exc): sys.stdin = self.real_stdin if self.orig_trace: sys.settrace(self.orig_trace) def test_pdb_displayhook(): """This tests the custom displayhook for pdb. >>> def test_function(foo, bar): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... pass >>> with PdbTestInput([ ... 'foo', ... 'bar', ... 'for i in range(5): print(i)', ... 'continue', ... ]): ... test_function(1, None) > <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function() -> pass (Pdb) foo 1 (Pdb) bar (Pdb) for i in range(5): print(i) 0 1 2 3 4 (Pdb) continue """ def test_pdb_basic_commands(): """Test the basic commands of pdb. >>> def test_function_2(foo, bar='default'): ... print(foo) ... for i in range(5): ... print(i) ... print(bar) ... for i in range(10): ... never_executed ... print('after for') ... print('...') ... return foo.upper() >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... ret = test_function_2('baz') ... print(ret) >>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE ... 'step', # entering the function call ... 'args', # display function args ... 'list', # list function source ... 'bt', # display backtrace ... 'up', # step up to test_function() ... 'down', # step down to test_function_2() again ... 'next', # stepping to print(foo) ... 'next', # stepping to the for loop ... 'step', # stepping into the for loop ... 'until', # continuing until out of the for loop ... 'next', # executing the print(bar) ... 'jump 8', # jump over second for loop ... 'return', # return out of function ... 'retval', # display return value ... 'continue', ... ]): ... test_function() > <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function() -> ret = test_function_2('baz') (Pdb) step --Call-- > <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2() -> def test_function_2(foo, bar='default'): (Pdb) args foo = 'baz' bar = 'default' (Pdb) list 1 -> def test_function_2(foo, bar='default'): 2 print(foo) 3 for i in range(5): 4 print(i) 5 print(bar) 6 for i in range(10): 7 never_executed 8 print('after for') 9 print('...') 10 return foo.upper() [EOF] (Pdb) bt ... <doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>() -> test_function() <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function() -> ret = test_function_2('baz') > <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2() -> def test_function_2(foo, bar='default'): (Pdb) up > <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function() -> ret = test_function_2('baz') (Pdb) down > <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2() -> def test_function_2(foo, bar='default'): (Pdb) next > <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2() -> print(foo) (Pdb) next baz > <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2() -> for i in range(5): (Pdb) step > <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2() -> print(i) (Pdb) until 0 1 2 3 4 > <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2() -> print(bar) (Pdb) next default > <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2() -> for i in range(10): (Pdb) jump 8 > <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2() -> print('after for') (Pdb) return after for ... --Return-- > <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ' -> return foo.upper() (Pdb) retval 'BAZ' (Pdb) continue BAZ """ def test_pdb_breakpoint_commands(): """Test basic commands related to breakpoints. >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... print(1) ... print(2) ... print(3) ... print(4) First, need to clear bdb state that might be left over from previous tests. Otherwise, the new breakpoints might get assigned different numbers. >>> from bdb import Breakpoint >>> Breakpoint.next = 1 >>> Breakpoint.bplist = {} >>> Breakpoint.bpbynumber = [None] Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because the breakpoint list outputs a tab for the "stop only" and "ignore next" lines, which we don't want to put in here. >>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE ... 'break 3', ... 'disable 1', ... 'ignore 1 10', ... 'condition 1 1 < 2', ... 'break 4', ... 'break 4', ... 'break', ... 'clear 3', ... 'break', ... 'condition 1', ... 'enable 1', ... 'clear 1', ... 'commands 2', ... 'p "42"', ... 'print("42", 7*6)', # Issue 18764 (not about breakpoints) ... 'end', ... 'continue', # will stop at breakpoint 2 (line 4) ... 'clear', # clear all! ... 'y', ... 'tbreak 5', ... 'continue', # will stop at temporary breakpoint ... 'break', # make sure breakpoint is gone ... 'continue', ... ]): ... test_function() > <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function() -> print(1) (Pdb) break 3 Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3 (Pdb) disable 1 Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3 (Pdb) ignore 1 10 Will ignore next 10 crossings of breakpoint 1. (Pdb) condition 1 1 < 2 New condition set for breakpoint 1. (Pdb) break 4 Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 (Pdb) break 4 Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 (Pdb) break Num Type Disp Enb Where 1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3 stop only if 1 < 2 ignore next 10 hits 2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 (Pdb) clear 3 Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 (Pdb) break Num Type Disp Enb Where 1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3 stop only if 1 < 2 ignore next 10 hits 2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 (Pdb) condition 1 Breakpoint 1 is now unconditional. (Pdb) enable 1 Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3 (Pdb) clear 1 Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3 (Pdb) commands 2 (com) p "42" (com) print("42", 7*6) (com) end (Pdb) continue 1 '42' 42 42 > <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function() -> print(2) (Pdb) clear Clear all breaks? y Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4 (Pdb) tbreak 5 Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5 (Pdb) continue 2 Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5 > <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function() -> print(3) (Pdb) break (Pdb) continue 3 4 """ def do_nothing(): pass def do_something(): print(42) def test_list_commands(): """Test the list and source commands of pdb. >>> def test_function_2(foo): ... import test.test_pdb ... test.test_pdb.do_nothing() ... 'some...' ... 'more...' ... 'code...' ... 'to...' ... 'make...' ... 'a...' ... 'long...' ... 'listing...' ... 'useful...' ... '...' ... '...' ... return foo >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... ret = test_function_2('baz') >>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE ... 'list', # list first function ... 'step', # step into second function ... 'list', # list second function ... 'list', # continue listing to EOF ... 'list 1,3', # list specific lines ... 'list x', # invalid argument ... 'next', # step to import ... 'next', # step over import ... 'step', # step into do_nothing ... 'longlist', # list all lines ... 'source do_something', # list all lines of function ... 'source fooxxx', # something that doesn't exit ... 'continue', ... ]): ... test_function() > <doctest test.test_pdb.test_list_commands[1]>(3)test_function() -> ret = test_function_2('baz') (Pdb) list 1 def test_function(): 2 import pdb; pdb.Pdb(nosigint=True).set_trace() 3 -> ret = test_function_2('baz') [EOF] (Pdb) step --Call-- > <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2() -> def test_function_2(foo): (Pdb) list 1 -> def test_function_2(foo): 2 import test.test_pdb 3 test.test_pdb.do_nothing() 4 'some...' 5 'more...' 6 'code...' 7 'to...' 8 'make...' 9 'a...' 10 'long...' 11 'listing...' (Pdb) list 12 'useful...' 13 '...' 14 '...' 15 return foo [EOF] (Pdb) list 1,3 1 -> def test_function_2(foo): 2 import test.test_pdb 3 test.test_pdb.do_nothing() (Pdb) list x *** ... (Pdb) next > <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2() -> import test.test_pdb (Pdb) next > <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2() -> test.test_pdb.do_nothing() (Pdb) step --Call-- > ...test_pdb.py(...)do_nothing() -> def do_nothing(): (Pdb) longlist ... -> def do_nothing(): ... pass (Pdb) source do_something ... def do_something(): ... print(42) (Pdb) source fooxxx *** ... (Pdb) continue """ def test_post_mortem(): """Test post mortem traceback debugging. >>> def test_function_2(): ... try: ... 1/0 ... finally: ... print('Exception!') >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... test_function_2() ... print('Not reached.') >>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE ... 'next', # step over exception-raising call ... 'bt', # get a backtrace ... 'list', # list code of test_function() ... 'down', # step into test_function_2() ... 'list', # list code of test_function_2() ... 'continue', ... ]): ... try: ... test_function() ... except ZeroDivisionError: ... print('Correctly reraised.') > <doctest test.test_pdb.test_post_mortem[1]>(3)test_function() -> test_function_2() (Pdb) next Exception! ZeroDivisionError: division by zero > <doctest test.test_pdb.test_post_mortem[1]>(3)test_function() -> test_function_2() (Pdb) bt ... <doctest test.test_pdb.test_post_mortem[2]>(10)<module>() -> test_function() > <doctest test.test_pdb.test_post_mortem[1]>(3)test_function() -> test_function_2() <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2() -> 1/0 (Pdb) list 1 def test_function(): 2 import pdb; pdb.Pdb(nosigint=True).set_trace() 3 -> test_function_2() 4 print('Not reached.') [EOF] (Pdb) down > <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2() -> 1/0 (Pdb) list 1 def test_function_2(): 2 try: 3 >> 1/0 4 finally: 5 -> print('Exception!') [EOF] (Pdb) continue Correctly reraised. """ def test_pdb_skip_modules(): """This illustrates the simple case of module skipping. >>> def skip_module(): ... import string ... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True).set_trace() ... string.capwords('FOO') >>> with PdbTestInput([ ... 'step', ... 'continue', ... ]): ... skip_module() > <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module() -> string.capwords('FOO') (Pdb) step --Return-- > <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None -> string.capwords('FOO') (Pdb) continue """ # Module for testing skipping of module that makes a callback mod = types.ModuleType('module_to_skip') exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__) def test_pdb_skip_modules_with_callback(): """This illustrates skipping of modules that call into other code. >>> def skip_module(): ... def callback(): ... return None ... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True).set_trace() ... mod.foo_pony(callback) >>> with PdbTestInput([ ... 'step', ... 'step', ... 'step', ... 'step', ... 'step', ... 'continue', ... ]): ... skip_module() ... pass # provides something to "step" to > <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module() -> mod.foo_pony(callback) (Pdb) step --Call-- > <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback() -> def callback(): (Pdb) step > <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback() -> return None (Pdb) step --Return-- > <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None -> return None (Pdb) step --Return-- > <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None -> mod.foo_pony(callback) (Pdb) step > <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>() -> pass # provides something to "step" to (Pdb) continue """ def test_pdb_continue_in_bottomframe(): """Test that "continue" and "next" work properly in bottom frame (issue #5294). >>> def test_function(): ... import pdb, sys; inst = pdb.Pdb(nosigint=True) ... inst.set_trace() ... inst.botframe = sys._getframe() # hackery to get the right botframe ... print(1) ... print(2) ... print(3) ... print(4) >>> with PdbTestInput([ # doctest: +ELLIPSIS ... 'next', ... 'break 7', ... 'continue', ... 'next', ... 'continue', ... 'continue', ... ]): ... test_function() > <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function() -> inst.botframe = sys._getframe() # hackery to get the right botframe (Pdb) next > <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function() -> print(1) (Pdb) break 7 Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7 (Pdb) continue 1 2 > <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function() -> print(3) (Pdb) next 3 > <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function() -> print(4) (Pdb) continue 4 """ def pdb_invoke(method, arg): """Run pdb.method(arg).""" import pdb getattr(pdb.Pdb(nosigint=True), method)(arg) def test_pdb_run_with_incorrect_argument(): """Testing run and runeval with incorrect first argument. >>> pti = PdbTestInput(['continue',]) >>> with pti: ... pdb_invoke('run', lambda x: x) Traceback (most recent call last): TypeError: exec() arg 1 must be a string, bytes or code object >>> with pti: ... pdb_invoke('runeval', lambda x: x) Traceback (most recent call last): TypeError: eval() arg 1 must be a string, bytes or code object """ def test_pdb_run_with_code_object(): """Testing run and runeval with code object as a first argument. >>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS ... pdb_invoke('run', compile('x=1', '<string>', 'exec')) > <string>(1)<module>()... (Pdb) step --Return-- > <string>(1)<module>()->None (Pdb) x 1 (Pdb) continue >>> with PdbTestInput(['x', 'continue']): ... x=0 ... pdb_invoke('runeval', compile('x+1', '<string>', 'eval')) > <string>(1)<module>()->None (Pdb) x 1 (Pdb) continue """ def test_next_until_return_at_return_event(): """Test that pdb stops after a next/until/return issued at a return debug event. >>> def test_function_2(): ... x = 1 ... x = 2 >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... test_function_2() ... test_function_2() ... test_function_2() ... end = 1 >>> from bdb import Breakpoint >>> Breakpoint.next = 1 >>> with PdbTestInput(['break test_function_2', ... 'continue', ... 'return', ... 'next', ... 'continue', ... 'return', ... 'until', ... 'continue', ... 'return', ... 'return', ... 'continue']): ... test_function() > <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function() -> test_function_2() (Pdb) break test_function_2 Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1 (Pdb) continue > <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2() -> x = 1 (Pdb) return --Return-- > <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None -> x = 2 (Pdb) next > <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function() -> test_function_2() (Pdb) continue > <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2() -> x = 1 (Pdb) return --Return-- > <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None -> x = 2 (Pdb) until > <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function() -> test_function_2() (Pdb) continue > <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2() -> x = 1 (Pdb) return --Return-- > <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None -> x = 2 (Pdb) return > <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function() -> end = 1 (Pdb) continue """ def test_pdb_next_command_for_generator(): """Testing skip unwindng stack on yield for generators for "next" command >>> def test_gen(): ... yield 0 ... return 1 ... yield 2 >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... it = test_gen() ... try: ... assert next(it) == 0 ... next(it) ... except StopIteration as ex: ... assert ex.value == 1 ... print("finished") >>> with PdbTestInput(['step', ... 'step', ... 'step', ... 'next', ... 'next', ... 'step', ... 'step', ... 'continue']): ... test_function() > <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function() -> it = test_gen() (Pdb) step > <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function() -> try: (Pdb) step > <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function() -> assert next(it) == 0 (Pdb) step --Call-- > <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen() -> def test_gen(): (Pdb) next > <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen() -> yield 0 (Pdb) next > <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen() -> return 1 (Pdb) step --Return-- > <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1 -> return 1 (Pdb) step StopIteration: 1 > <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(6)test_function() -> next(it) (Pdb) continue finished """ def test_pdb_return_command_for_generator(): """Testing no unwindng stack on yield for generators for "return" command >>> def test_gen(): ... yield 0 ... return 1 ... yield 2 >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... it = test_gen() ... try: ... assert next(it) == 0 ... next(it) ... except StopIteration as ex: ... assert ex.value == 1 ... print("finished") >>> with PdbTestInput(['step', ... 'step', ... 'step', ... 'return', ... 'step', ... 'step', ... 'continue']): ... test_function() > <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function() -> it = test_gen() (Pdb) step > <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function() -> try: (Pdb) step > <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function() -> assert next(it) == 0 (Pdb) step --Call-- > <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen() -> def test_gen(): (Pdb) return StopIteration: 1 > <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(6)test_function() -> next(it) (Pdb) step > <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function() -> except StopIteration as ex: (Pdb) step > <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function() -> assert ex.value == 1 (Pdb) continue finished """ def test_pdb_until_command_for_generator(): """Testing no unwindng stack on yield for generators for "until" command if target breakpoing is not reached >>> def test_gen(): ... yield 0 ... yield 1 ... yield 2 >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... for i in test_gen(): ... print(i) ... print("finished") >>> with PdbTestInput(['step', ... 'until 4', ... 'step', ... 'step', ... 'continue']): ... test_function() > <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function() -> for i in test_gen(): (Pdb) step --Call-- > <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen() -> def test_gen(): (Pdb) until 4 0 1 > <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen() -> yield 2 (Pdb) step --Return-- > <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2 -> yield 2 (Pdb) step > <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function() -> print(i) (Pdb) continue 2 finished """ def test_pdb_next_command_in_generator_for_loop(): """The next command on returning from a generator controled by a for loop. >>> def test_gen(): ... yield 0 ... return 1 >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... for i in test_gen(): ... print('value', i) ... x = 123 >>> with PdbTestInput(['break test_gen', ... 'continue', ... 'next', ... 'next', ... 'next', ... 'continue']): ... test_function() > <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function() -> for i in test_gen(): (Pdb) break test_gen Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1 (Pdb) continue > <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen() -> yield 0 (Pdb) next value 0 > <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen() -> return 1 (Pdb) next Internal StopIteration: 1 > <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function() -> for i in test_gen(): (Pdb) next > <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function() -> x = 123 (Pdb) continue """ def test_pdb_next_command_subiterator(): """The next command in a generator with a subiterator. >>> def test_subgenerator(): ... yield 0 ... return 1 >>> def test_gen(): ... x = yield from test_subgenerator() ... return x >>> def test_function(): ... import pdb; pdb.Pdb(nosigint=True).set_trace() ... for i in test_gen(): ... print('value', i) ... x = 123 >>> with PdbTestInput(['step', ... 'step', ... 'next', ... 'next', ... 'next', ... 'continue']): ... test_function() > <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function() -> for i in test_gen(): (Pdb) step --Call-- > <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen() -> def test_gen(): (Pdb) step > <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen() -> x = yield from test_subgenerator() (Pdb) next value 0 > <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen() -> return x (Pdb) next Internal StopIteration: 1 > <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function() -> for i in test_gen(): (Pdb) next > <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function() -> x = 123 (Pdb) continue """ class PdbTestCase(unittest.TestCase): def run_pdb(self, script, commands): """Run 'script' lines with pdb and the pdb 'commands'.""" filename = 'main.py' with open(filename, 'w') as f: f.write(textwrap.dedent(script)) self.addCleanup(support.unlink, filename) cmd = [sys.executable, '-m', 'pdb', filename] stdout = stderr = None with subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, ) as proc: stdout, stderr = proc.communicate(str.encode(commands)) stdout = stdout and bytes.decode(stdout) stderr = stderr and bytes.decode(stderr) return stdout, stderr def _assert_find_function(self, file_content, func_name, expected): file_content = textwrap.dedent(file_content) with open(support.TESTFN, 'w') as f: f.write(file_content) expected = None if not expected else ( expected[0], support.TESTFN, expected[1]) self.assertEqual( expected, pdb.find_function(func_name, support.TESTFN)) def test_find_function_empty_file(self): self._assert_find_function('', 'foo', None) def test_find_function_found(self): self._assert_find_function( """\ def foo(): pass def bar(): pass def quux(): pass """, 'bar', ('bar', 4), ) def test_issue7964(self): # open the file as binary so we can force \r\n newline with open(support.TESTFN, 'wb') as f: f.write(b'print("testing my pdb")\r\n') cmd = [sys.executable, '-m', 'pdb', support.TESTFN] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, ) self.addCleanup(proc.stdout.close) stdout, stderr = proc.communicate(b'quit\n') self.assertNotIn(b'SyntaxError', stdout, "Got a syntax error running test script under PDB") def test_issue13183(self): script = """ from bar import bar def foo(): bar() def nope(): pass def foobar(): foo() nope() foobar() """ commands = """ from bar import bar break bar continue step step quit """ bar = """ def bar(): pass """ with open('bar.py', 'w') as f: f.write(textwrap.dedent(bar)) self.addCleanup(support.unlink, 'bar.py') stdout, stderr = self.run_pdb(script, commands) self.assertTrue( any('main.py(5)foo()->None' in l for l in stdout.splitlines()), 'Fail to step into the caller after a return') def test_issue13210(self): # invoking "continue" on a non-main thread triggered an exception # inside signal.signal # raises SkipTest if python was built without threads support.import_module('threading') with open(support.TESTFN, 'wb') as f: f.write(textwrap.dedent(""" import threading import pdb def start_pdb(): pdb.Pdb().set_trace() x = 1 y = 1 t = threading.Thread(target=start_pdb) t.start()""").encode('ascii')) cmd = [sys.executable, '-u', support.TESTFN] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, ) self.addCleanup(proc.stdout.close) stdout, stderr = proc.communicate(b'cont\n') self.assertNotIn('Error', stdout.decode(), "Got an error running test script under PDB") def tearDown(self): support.unlink(support.TESTFN) def load_tests(*args): from test import test_pdb suites = [unittest.makeSuite(PdbTestCase), doctest.DocTestSuite(test_pdb)] return unittest.TestSuite(suites) if __name__ == '__main__': unittest.main()
lgpl-3.0
CiscoSystems/jujucharm-n1k
charms/precise/ceph-radosgw/hooks/utils.py
1
4851
# # Copyright 2012 Canonical Ltd. # # Authors: # James Page <james.page@ubuntu.com> # Paul Collins <paul.collins@canonical.com> # import os import subprocess import socket import sys import re def do_hooks(hooks): hook = os.path.basename(sys.argv[0]) try: hook_func = hooks[hook] except KeyError: juju_log('INFO', "This charm doesn't know how to handle '{}'.".format(hook)) else: hook_func() def install(*pkgs): cmd = [ 'apt-get', '-y', 'install' ] for pkg in pkgs: cmd.append(pkg) subprocess.check_call(cmd) TEMPLATES_DIR = 'templates' try: import jinja2 except ImportError: install('python-jinja2') import jinja2 try: import dns.resolver except ImportError: install('python-dnspython') import dns.resolver def render_template(template_name, context, template_dir=TEMPLATES_DIR): templates = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir) ) template = templates.get_template(template_name) return template.render(context) CLOUD_ARCHIVE = \ """ # Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ def configure_source(): source = str(config_get('source')) if not source: return if source.startswith('ppa:'): cmd = [ 'add-apt-repository', source ] subprocess.check_call(cmd) if source.startswith('cloud:'): install('ubuntu-cloud-keyring') pocket = source.split(':')[1] with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: apt.write(CLOUD_ARCHIVE.format(pocket)) if source.startswith('http:'): with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: apt.write("deb " + source + "\n") key = config_get('key') if key: cmd = [ 'apt-key', 'adv', '--keyserver keyserver.ubuntu.com', '--recv-keys', key ] subprocess.check_call(cmd) cmd = [ 'apt-get', 'update' ] subprocess.check_call(cmd) def enable_pocket(pocket): apt_sources = "/etc/apt/sources.list" with open(apt_sources, "r") as sources: lines = sources.readlines() with open(apt_sources, "w") as sources: for line in lines: if pocket in line: sources.write(re.sub('^# deb', 'deb', line)) else: sources.write(line) # Protocols TCP = 'TCP' UDP = 'UDP' def expose(port, protocol='TCP'): cmd = [ 'open-port', '{}/{}'.format(port, protocol) ] subprocess.check_call(cmd) def juju_log(severity, message): cmd = [ 'juju-log', '--log-level', severity, message ] subprocess.check_call(cmd) def relation_ids(relation): cmd = [ 'relation-ids', relation ] return subprocess.check_output(cmd).split() # IGNORE:E1103 def relation_list(rid): cmd = [ 'relation-list', '-r', rid, ] return subprocess.check_output(cmd).split() # IGNORE:E1103 def relation_get(attribute, unit=None, rid=None): cmd = [ 'relation-get', ] if rid: cmd.append('-r') cmd.append(rid) cmd.append(attribute) if unit: cmd.append(unit) value = str(subprocess.check_output(cmd)).strip() if value == "": return None else: return value def relation_set(**kwargs): cmd = [ 'relation-set' ] args = [] for k, v in kwargs.items(): if k == 'rid' and v: cmd.append('-r') cmd.append(v) elif k != 'rid': args.append('{}={}'.format(k, v)) cmd += args subprocess.check_call(cmd) def unit_get(attribute): cmd = [ 'unit-get', attribute ] value = str(subprocess.check_output(cmd)).strip() if value == "": return None else: return value def config_get(attribute): cmd = [ 'config-get', attribute ] value = str(subprocess.check_output(cmd)).strip() if value == "": return None else: return value def get_unit_hostname(): return socket.gethostname() def get_host_ip(hostname=unit_get('private-address')): try: # Test to see if already an IPv4 address socket.inet_aton(hostname) return hostname except socket.error: # This may throw an NXDOMAIN exception; in which case # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address
apache-2.0
Intel-tensorflow/tensorflow
tensorflow/python/distribute/input_ops.py
12
4371
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Input-pipeline utilities for Distribution strategies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import traverse from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import ops # pylint: disable=protected-access def auto_shard_dataset(dataset, num_shards, index, num_replicas_in_sync=None): """Shard the input pipeline by sharding the underlying list of files. Args: dataset: A `tf.data.Dataset` instance, typically the result of a bunch of dataset transformations. num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of shards operating in parallel. Same usage as in `tf.data.Dataset.shard`. index: A `tf.int64` scalar `tf.Tensor`, representing the worker index. Same usage as in `tf.data.Dataset.shard`. num_replicas_in_sync: An integer representing the total number of replicas across all workers. This is used in the rewrite when sharding by data. Returns: A modified `Dataset` obtained by updating the pipeline sharded by the files. The input dataset will be returned if we cannot automatically determine a good way to shard the input dataset. """ if (dataset.options().experimental_distribute.auto_shard_policy != AutoShardPolicy.OFF): if num_replicas_in_sync is None: num_replicas_in_sync = 1 if isinstance(dataset, dataset_ops.DatasetV1): return distribute._AutoShardDatasetV1(dataset, num_shards, index, num_replicas_in_sync) else: return distribute._AutoShardDataset(dataset, num_shards, index, num_replicas_in_sync) else: return dataset def _clone_dataset(dataset): """Returns a cloned version of `dataset`.""" variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(dataset) remap_dict = _clone_helper(dataset._variant_tensor.op, variant_tensor_ops) new_variant_tensor = remap_dict[dataset._variant_tensor.op].outputs[0] return dataset_ops._VariantDataset(new_variant_tensor, dataset.element_spec) def _get_op_def(op): return op.op_def or op_def_registry.get(op.type) def _clone_helper(op_to_clone, variant_tensor_ops): """Helper method that recursively clones `op_to_clone`. Args: op_to_clone: The op we want to clone. variant_tensor_ops: A list of ops that we have to clone along the way. Returns: A dictionary mapping old_ops to new_ops created. Includes op_to_clone as a key. """ remap_dict = {} for input_tensor in op_to_clone.inputs: input_tensor_op = input_tensor.op if input_tensor_op in variant_tensor_ops: recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops) remap_dict.update(recursive_map) inputs_list = [] for input_tensor in op_to_clone.inputs: input_tensor_op = input_tensor.op if input_tensor_op in remap_dict: remapped_input = remap_dict[input_tensor_op].outputs[0] inputs_list.append(remapped_input) else: inputs_list.append(input_tensor_op.outputs[input_tensor.value_index]) g = ops.get_default_graph() new_op = g.create_op( op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone)) remap_dict[op_to_clone] = new_op return remap_dict
apache-2.0
kursion/twitcheat
proxy/proxy.py
1
2008
import urllib.request import re import time import random import socket socket.setdefaulttimeout(3) PAGES = 20 def saveProxy(proxy): f = open("proxies.txt", "a") f.write(proxy+"\n") f.close() def testProxy(proxy): try: urllib.request.URLopener( {'http': 'http://'+proxy+"/"}).open("http://www.google.com") except IOError: try: urllib.request.URLopener( {'https': 'https://'+proxy+"/"}).open("http://www.google.com") except IOError: print("Connection error! (Check proxy)") return None else: print("All was fine (HTTPS) ") else: print("All was fine (HTTP)") return proxy def getProxies(page): # http://nntime.com/proxy-updated-01.htm page = str(page).zfill(2) url = "http://nntime.com/proxy-updated-{:}.htm".format(page) print("Getting:", url) queryToken = urllib.request.urlopen(url) response = queryToken.read().decode("latin-1") parseCode = r"((?:[a-z]=[0-9];)+)" matchesCode = re.findall(parseCode, response) print("Code", matchesCode) codes = {} for code in matchesCode[0].split(";")[:-1]: v = code.split("=") codes[v[0]] = v[1] print(codes) parseTable = r"<td>(.*?)</td>" matchesRow = re.findall(parseTable, response) # print(matchesRow) for row in matchesRow: m = re.findall(r'((?:[0-9]{1,3}\.){3}[0-9]{1,3})', row) if len(m) == 0: continue ip = m[0] pEnc = re.findall(r'document\.write\(":"((?:\+[a-z]){0,4})', row) portTmp = [] if len(pEnc) > 0: pDec = pEnc[0].split("+")[1:] for c in pDec: portTmp.append(codes[c]) else: continue port = "".join(portTmp) proxy = ip+":"+port print("Testing", proxy) p = testProxy(proxy) if p: saveProxy(p) for page in range(1, PAGES): getProxies(page)
mit
Raag079/self-driving-car
Term03-PathPlanning-SemanticSegmentation-and-Systems/P3-Capstone-Project/ros/src/styx/server.py
2
1338
#!/usr/bin/env python import socketio import eventlet import eventlet.wsgi import time from flask import Flask, render_template from bridge import Bridge from conf import conf sio = socketio.Server() app = Flask(__name__) bridge = Bridge(conf) msgs = [] @sio.on('connect') def connect(sid, environ): print("connect ", sid) bridge.publish_dbw_status(True) def send(topic, data): s = 1 msgs.append((topic, data)) #sio.emit(topic, data=json.dumps(data), skip_sid=True) bridge.register_server(send) @sio.on('telemetry') def telemetry(sid, data): bridge.publish_odometry(data) for i in range(len(msgs)): topic, data = msgs.pop(0) sio.emit(topic, data=data, skip_sid=True) @sio.on('control') def control(sid, data): bridge.publish_controls(data) @sio.on('obstacle') def obstacle(sid, data): bridge.publish_obstacles(data) @sio.on('lidar') def obstacle(sid, data): bridge.publish_lidar(data) @sio.on('trafficlights') def trafficlights(sid, data): bridge.publish_traffic(data) @sio.on('image') def image(sid, data): bridge.publish_camera(data) if __name__ == '__main__': # wrap Flask application with engineio's middleware app = socketio.Middleware(sio, app) # deploy as an eventlet WSGI server eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
mit
gtfierro/backchannel
readtopo.py
1
3711
import yaml import networkx as nx import matplotlib.pyplot as plt from collections import defaultdict import sys class Topo: def __init__(self, yaml_string): self.raw = yaml.load(yaml_string) self.G = nx.Graph() self.hops = defaultdict(list) self.hops_edges = defaultdict(list) if 'root' not in self.raw.iterkeys(): print 'Graph has no root!' sys.exit(1) self.root = str(self.raw.pop('root')) self.G.add_node(self.root) for node in self.raw.iterkeys(): self.G.add_node(str(node)) for node, edges in self.raw.iteritems(): for edge in edges: self.G.add_edge(str(node), str(edge)) edges = list(nx.traversal.bfs_edges(self.G, self.root)) for edge in edges: if edge[0] == self.root: # edge[1] is single-hop self.hops[1].append(edge[1]) self.hops_edges[1].append(edge) continue for i in range(1, len(edges)+1): # worst case scenario if edge[0] in self.hops[i]: self.hops[i+1].append(edge[1]) self.hops_edges[1+1].append(edge) continue print self.hops def draw(self): # node attrs node_size=1600 # 1-hop, 2-hop etc root_color = 'red' node_tiers = ['blue','green','yellow'] node_color='blue' node_alpha=0.3 node_text_size=12 # edge attrs edge_color='black' edge_alpha=0.3 edge_tickness=1 edge_text_pos=0.3 f = plt.figure() graph_pos = nx.shell_layout(self.G) # draw graph nx.draw_networkx_nodes(self.G, graph_pos, nodelist=[self.root], alpha=node_alpha, node_color=root_color) for hop, nodes in self.hops.iteritems(): if len(nodes) == 0: continue print hop nx.draw_networkx_nodes(self.G, graph_pos, nodelist=nodes, alpha=node_alpha, node_color=node_tiers[hop-1]) nx.draw_networkx_edges(self.G,graph_pos, edgelist=self.hops_edges[hop], width=edge_tickness, alpha=edge_alpha, edge_color=edge_color) nx.draw_networkx_labels(self.G, graph_pos,font_size=node_text_size) #print "Drawing..." #f.savefig("graph.png") #plt.show() def generate_ignore_block(self, node, ignored): def _ignore_neighbor(neighbor, OID="::212:6d02:0:"): return 'storm.os.ignoreNeighbor("{0}{1}")'.format(OID, neighbor) code = '' if len(ignored) > 0: code += 'if (storm.os.nodeid() == {0}) then\n\t'.format(int(node, 16)) code += '\n\t'.join(map(_ignore_neighbor, ignored)) code += '\nend' return code def to_code(self): edges = list(nx.traversal.bfs_edges(self.G, self.root)) node_set = set(self.G.nodes()) ignoreblocks = [] for node in self.G.nodes(): allowed_neighbors = set(self.G[node].keys()) allowed_neighbors.add(node) # add yourself ignore_these = node_set.difference(allowed_neighbors) ignoreblocks.append(self.generate_ignore_block(node, ignore_these)) framework = """sh = require "stormsh" sh.start() {0} cord.enter_loop() """ code = framework.format('\n'.join(ignoreblocks)) with open('./main.lua', 'w') as f: f.write(code) if __name__ == '__main__': filename = sys.argv[1] print 'Loading topology from {0}'.format(filename) topo = Topo(open(filename).read()) topo.draw() topo.to_code()
apache-2.0
iut-ibk/CityDrain3
data/scripts/testpycd3.py
5
1122
import pycd3 class TestNode(pycd3.Node): def __init__(self): pycd3.Node.__init__(self) self.x = 10 self.y = 11 self.s = "asdf" self.d = 1.0 self.n = 10 self.addParameters() def f(self, time, dt): print "f" print self.x return dt def init(self, start, end, dt): print "init" print self.x pass pycd3.init() fd = pycd3.FlowDefinition() fd['Q'] = pycd3.CalculationUnit.flow fd['C0'] = pycd3.CalculationUnit.concentration fd['C1'] = pycd3.CalculationUnit.concentration pycd3.Flow.define(fd) n = TestNode() pycd3.test_node(n) f = pycd3.Flow() print "len of of f == %s" % (len(f)) f[0] = 3.142 print "getith %s" % f.getIth(pycd3.CalculationUnit.flow, 0) print f[0] print "the flows names are: %s" % pycd3.Flow.names() print "the value of Q is: %s" % f.getValue('Q') f.setValue('Q', 42.0) print "the value of Q is: %s" % f.getValue('Q') f.clear() print "the value of Q is: %s" % f.getValue('Q') for n in pycd3.Flow.names(): print "Unit of %s is: %s" % (n, f.getUnit(n)) f.dump()
gpl-2.0
garbled1/ansible
lib/ansible/plugins/terminal/vyos.py
191
1700
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re from ansible.plugins.terminal import TerminalBase from ansible.errors import AnsibleConnectionFailure class TerminalModule(TerminalBase): terminal_stdout_re = [ re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$") ] terminal_stderr_re = [ re.compile(br"\n\s*Invalid command:"), re.compile(br"\nCommit failed"), re.compile(br"\n\s+Set failed"), ] terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000) def on_open_shell(self): try: for cmd in (b'set terminal length 0', b'set terminal width 512'): self._exec_cli_command(cmd) self._exec_cli_command(b'set terminal length %d' % self.terminal_length) except AnsibleConnectionFailure: raise AnsibleConnectionFailure('unable to set terminal parameters')
gpl-3.0
FrodeSolheim/fs-uae-launcher
fsgamesys/platforms/a2600/messa2600driver.py
1
1033
from fsgamesys.drivers.messdriver import MessDriver class MessA2600Driver(MessDriver): CONTROLLER = { "type": "controller", "description": "Controller", "mapping_name": "atari2600", } PORTS = [ {"description": "1st Controller", "types": [CONTROLLER]}, {"description": "2nd Controller", "types": [CONTROLLER]}, ] def mess_configure(self): self.mess_configure_cartridge() def mess_input_mapping(self, port): return { "SELECT": 'tag=":SWB" type="OTHER" mask="2" defvalue="2"', "RESET": 'tag=":SWB" type="OTHER" mask="1" defvalue="1"', "UP": "P#_JOYSTICK_UP", "DOWN": "P#_JOYSTICK_DOWN", "LEFT": "P#_JOYSTICK_LEFT", "RIGHT": "P#_JOYSTICK_RIGHT", "1": "P#_BUTTON1", "2": "P#_BUTTON2", "3": "P#_BUTTON3", } def mess_romset(self): if self.is_pal(): return "a2600p", {} else: return "a2600", {}
gpl-2.0
lehinevych/cfme_tests
cfme/tests/control/test_basic.py
1
17797
# -*- coding: utf-8 -*- """ Tests checking the basic functionality of the Control/Explorer section. Whether we can create/update/delete/assign/... these objects. Nothing with deep meaning. Can be also used as a unit-test for page model coverage. TODO: * Multiple expression types entering. (extend the update tests) """ import fauxfactory import pytest import cfme.fixtures.pytest_selenium as sel from cfme.control import explorer from utils.update import update from utils.version import current_version from cfme.web_ui import flash from cfme.web_ui import expression_editor pytestmark = [pytest.mark.long_running] VM_EXPRESSIONS_TO_TEST = [ ( "fill_field(VM and Instance : Boot Time, BEFORE, Today)", 'VM and Instance : Boot Time BEFORE "Today"' ), ( "fill_field(VM and Instance : Boot Time, BEFORE, 03/04/2014)", 'VM and Instance : Boot Time BEFORE "03/04/2014 00:00"' ), ( "fill_field(VM and Instance : Custom 6, RUBY, puts 'hello')", 'VM and Instance : Custom 6 RUBY <RUBY Expression>' ), ( "fill_field(VM and Instance : Format, IS NOT NULL)", 'VM and Instance : Format IS NOT NULL' ), ( "fill_count(VM and Instance.Files, =, 150)", 'COUNT OF VM and Instance.Files = 150' ), # ("fill_tag(VM and Instance.My Company Tags : Owner, Production Linux Team)",) # Needs working input/select mutability ] @pytest.yield_fixture def random_alert(): alert = explorer.Alert( fauxfactory.gen_alphanumeric(), timeline_event=True, driving_event="Hourly Timer" ) alert.create() yield alert alert.delete() @pytest.yield_fixture(params=[explorer.VMCompliancePolicy, explorer.HostCompliancePolicy, explorer.HostControlPolicy, explorer.VMControlPolicy], ids=["VMCompliancePolicy", "HostCompliancePolicy", "HostControlPolicy", "VMControlPolicy"]) def random_policy(request): policy = request.param(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete() @pytest.yield_fixture(scope="module") def vm_condition_for_expressions(): cond = explorer.VMCondition( fauxfactory.gen_alphanumeric(), expression="fill_field(VM and Instance : CPU Limit, =, 20)", scope="fill_count(VM and Instance.Files, >, 150)" ) cond.create() yield cond cond.delete() @pytest.yield_fixture def random_vm_condition(): cond = explorer.VMCondition( fauxfactory.gen_alphanumeric(), expression="fill_field(VM and Instance : CPU Limit, =, 20)", scope="fill_count(VM and Instance.Files, >, 150)" ) cond.create() yield cond cond.delete() @pytest.yield_fixture def random_host_condition(): if current_version() >= "5.4": expression = "fill_count(Host / Node.Files, >, 150)" else: expression = "fill_count(Host.Files, >, 150)" cond = explorer.HostCondition( fauxfactory.gen_alphanumeric(), expression=expression, ) cond.create() yield cond cond.delete() @pytest.yield_fixture def random_vm_control_policy(): policy = explorer.VMControlPolicy(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete() @pytest.yield_fixture def random_host_control_policy(): policy = explorer.HostControlPolicy(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete() @pytest.yield_fixture def random_container_image_control_policy(): policy = explorer.ContainerImageControlPolicy(fauxfactory.gen_alphanumeric()) policy.create() yield policy policy.delete() @pytest.yield_fixture(params=[explorer.ClusterAlertProfile, explorer.DatastoreAlertProfile, explorer.HostAlertProfile, explorer.ProviderAlertProfile, explorer.ServerAlertProfile, explorer.VMInstanceAlertProfile], ids=[explorer.ClusterAlertProfile.TYPE, explorer.DatastoreAlertProfile.TYPE, explorer.HostAlertProfile.TYPE, explorer.ProviderAlertProfile.TYPE, explorer.ServerAlertProfile.TYPE, explorer.VMInstanceAlertProfile.TYPE]) def alert_profile(request): alert = explorer.Alert( fauxfactory.gen_alphanumeric(), based_on=request.param.TYPE, timeline_event=True, driving_event="Hourly Timer" ) alert.create() alert_profile = request.param(fauxfactory.gen_alphanumeric(), [alert]) yield alert_profile alert.delete() @pytest.mark.tier(2) def test_vm_condition_crud(soft_assert): condition = explorer.VMCondition( fauxfactory.gen_alphanumeric(), expression="fill_field(VM and Instance : CPU Limit, =, 20)", scope="fill_count(VM and Instance.Files, >, 150)" ) # CR condition.create() soft_assert(condition.exists, "The condition {} does not exist!".format( condition.description )) # U with update(condition): condition.notes = "Modified!" sel.force_navigate("vm_condition_edit", context={"condition_name": condition.description}) soft_assert(sel.text(condition.form.notes).strip() == "Modified!", "Modification failed!") # D condition.delete() soft_assert(not condition.exists, "The condition {} exists!".format( condition.description )) @pytest.mark.tier(2) def test_host_condition_crud(soft_assert): if current_version() >= "5.4": expression = "fill_count(Host / Node.Files, >, 150)" else: expression = "fill_count(Host.Files, >, 150)" condition = explorer.HostCondition( fauxfactory.gen_alphanumeric(), expression=expression ) # CR condition.create() soft_assert(condition.exists, "The condition {} does not exist!".format( condition.description )) # U with update(condition): condition.notes = "Modified!" sel.force_navigate("host_condition_edit", context={"condition_name": condition.description}) soft_assert(sel.text(condition.form.notes).strip() == "Modified!", "Modification failed!") # D condition.delete() soft_assert(not condition.exists, "The condition {} exists!".format( condition.description )) @pytest.mark.tier(2) @pytest.mark.skipif(current_version() < "5.6", reason="requires cfme 5.6 and higher") def test_container_image_condition_crud(soft_assert): expression = "fill_field(Image : Tag, =, {})".format(fauxfactory.gen_alphanumeric()) condition = explorer.ContainerImageCondition( fauxfactory.gen_alphanumeric(), expression=expression ) # CR condition.create() soft_assert(condition.exists, "The condition {} does not exist!".format( condition.description )) # U with update(condition): condition.notes = "Modified!" sel.force_navigate( "container_image_condition_edit", context={"condition_name": condition.description} ) soft_assert(sel.text(condition.form.notes).strip() == "Modified!", "Modification failed!") # D condition.delete() soft_assert(not condition.exists, "The condition {} exists!".format( condition.description )) @pytest.mark.tier(2) def test_action_crud(soft_assert): action = explorer.Action( fauxfactory.gen_alphanumeric(), action_type="Tag", action_values={"tag": ("My Company Tags", "Department", "Accounting")} ) # CR action.create() soft_assert(action.exists, "The action {} does not exist!".format( action.description )) # U with update(action): action.description = "w00t w00t" sel.force_navigate("control_explorer_action_edit", context={"action_name": action.description}) soft_assert( sel.get_attribute(action.form.description, "value").strip() == "w00t w00t", "Modification failed!" ) # D action.delete() soft_assert(not action.exists, "The action {} exists!".format( action.description )) @pytest.mark.tier(2) def test_vm_control_policy_crud(soft_assert): policy = explorer.VMControlPolicy(fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format( policy.description )) # U with update(policy): policy.notes = "Modified!" sel.force_navigate("vm_control_policy_edit", context={"policy_name": policy.description}) soft_assert(sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format( policy.description )) @pytest.mark.tier(2) def test_vm_compliance_policy_crud(soft_assert): policy = explorer.VMCompliancePolicy(fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format( policy.description )) # U with update(policy): policy.notes = "Modified!" sel.force_navigate("vm_compliance_policy_edit", context={"policy_name": policy.description}) soft_assert(sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format( policy.description )) @pytest.mark.tier(2) def test_host_control_policy_crud(soft_assert): policy = explorer.HostControlPolicy(fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format( policy.description )) # U with update(policy): policy.notes = "Modified!" sel.force_navigate("host_control_policy_edit", context={"policy_name": policy.description}) soft_assert(sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format( policy.description )) @pytest.mark.tier(2) def test_host_compliance_policy_crud(soft_assert): policy = explorer.HostCompliancePolicy(fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format( policy.description )) # U with update(policy): policy.notes = "Modified!" sel.force_navigate("host_compliance_policy_edit", context={"policy_name": policy.description}) soft_assert(sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format( policy.description )) @pytest.mark.tier(2) @pytest.mark.skipif(current_version() < "5.6", reason="requires cfme 5.6 and higher") def test_container_image_control_policy_crud(soft_assert): policy = explorer.ContainerImageControlPolicy(fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format( policy.description )) # U with update(policy): policy.notes = "Modified!" sel.force_navigate( "container_image_control_policy_edit", context={"policy_name": policy.description} ) soft_assert(sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format( policy.description )) @pytest.mark.tier(2) @pytest.mark.skipif(current_version() < "5.6", reason="requires cfme 5.6 and higher") def test_container_image_compliance_policy_crud(soft_assert): policy = explorer.ContainerImageCompliancePolicy(fauxfactory.gen_alphanumeric()) # CR policy.create() soft_assert(policy.exists, "The policy {} does not exist!".format( policy.description )) # U with update(policy): policy.notes = "Modified!" sel.force_navigate( "container_image_compliance_policy_edit", context={"policy_name": policy.description}) soft_assert(sel.text(policy.form.notes).strip() == "Modified!", "Modification failed!") # D policy.delete() soft_assert(not policy.exists, "The policy {} exists!".format( policy.description )) @pytest.mark.tier(3) def test_policies_copy(random_policy, soft_assert): random_policy_copy = random_policy.copy() soft_assert(random_policy_copy.exists, "The {} does not exist!".format(random_policy_copy)) random_policy_copy.delete() @pytest.mark.tier(3) def test_assign_events_to_vm_control_policy(random_vm_control_policy, soft_assert): random_vm_control_policy.assign_events("VM Retired", "VM Clone Start") soft_assert(random_vm_control_policy.is_event_assigned("VM Retired")) soft_assert(random_vm_control_policy.is_event_assigned("VM Clone Start")) @pytest.mark.tier(3) def test_assign_events_to_host_control_policy(random_host_control_policy, soft_assert): random_host_control_policy.assign_events("Host Auth Error", "Host Compliance Passed") soft_assert(random_host_control_policy.is_event_assigned("Host Auth Error")) soft_assert(random_host_control_policy.is_event_assigned("Host Compliance Passed")) @pytest.mark.tier(3) def test_assign_vm_condition_to_vm_policy( random_vm_control_policy, random_vm_condition, soft_assert): random_vm_control_policy.assign_conditions(random_vm_condition) soft_assert(random_vm_control_policy.is_condition_assigned(random_vm_condition)) random_vm_control_policy.assign_conditions() # unassign @pytest.mark.tier(3) def test_assign_host_condition_to_host_policy( random_host_control_policy, random_host_condition, soft_assert): random_host_control_policy.assign_conditions(random_host_condition) soft_assert(random_host_control_policy.is_condition_assigned(random_host_condition)) random_host_control_policy.assign_conditions() # unassign @pytest.mark.tier(2) def test_policy_profile_crud(random_vm_control_policy, random_host_control_policy, soft_assert): profile = explorer.PolicyProfile( fauxfactory.gen_alphanumeric(), policies=[random_vm_control_policy, random_host_control_policy] ) profile.create() soft_assert(profile.exists, "Policy profile {} does not exist!".format(profile.description)) with update(profile): profile.notes = "Modified!" sel.force_navigate("policy_profile", context={"policy_profile_name": profile.description}) soft_assert(sel.text(profile.form.notes).strip() == "Modified!") profile.delete() soft_assert(not profile.exists, "The policy profile {} exists!".format(profile.description)) @pytest.mark.tier(3) # RUBY expression type is no longer supported. @pytest.mark.uncollectif(lambda expression: "RUBY" in expression and current_version() >= "5.5") @pytest.mark.parametrize(("expression", "verify"), VM_EXPRESSIONS_TO_TEST) def test_modify_vm_condition_expression( vm_condition_for_expressions, expression, verify, soft_assert): with update(vm_condition_for_expressions): vm_condition_for_expressions.expression = expression flash.assert_no_errors() if verify is not None: sel.force_navigate("vm_condition_edit", context={"condition_name": vm_condition_for_expressions.description}) vm_condition_for_expressions.form.expression.show_func() soft_assert(expression_editor.get_expression_as_text() == verify) @pytest.mark.tier(2) def test_alert_crud(soft_assert): alert = explorer.Alert( fauxfactory.gen_alphanumeric(), timeline_event=True, driving_event="Hourly Timer" ) # CR alert.create() soft_assert(alert.exists, "The alert {} does not exist!".format( alert.description )) # U with update(alert): alert.notification_frequency = "2 Hours" sel.force_navigate("control_explorer_alert_edit", context={"alert_name": alert.description}) soft_assert( (alert.form.notification_frequency.first_selected_option[0] .strip()) == "2 Hours", "Modification failed!" ) # D alert.delete() soft_assert(not alert.exists, "The alert {} exists!".format( alert.description )) @pytest.mark.tier(3) @pytest.mark.meta(blockers=[1303645], automates=[1303645]) def test_control_alert_copy(random_alert, soft_assert): alert_copy = random_alert.copy() soft_assert(alert_copy.exists, "The alert {} does not exist!".format( alert_copy.description )) alert_copy.delete() soft_assert(not alert_copy.exists, "The alert {} exists!".format( alert_copy.description )) @pytest.mark.tier(2) def test_alert_profile_crud(alert_profile, soft_assert): alert_profile.create() soft_assert(alert_profile.exists, "The alert profile {} does not exist!".format( alert_profile.description )) with update(alert_profile): alert_profile.notes = "Modified!" sel.force_navigate("{}_alert_profile_edit".format(alert_profile.PREFIX), context={"alert_profile_name": alert_profile.description}) soft_assert( sel.text( alert_profile.form.notes) == "Modified!", "Modification failed!" ) alert_profile.delete() soft_assert(not alert_profile.exists, "The alert profile {} exists!".format( alert_profile.description ))
gpl-2.0
jgliss/pydoas
docs/conf.py
1
10922
# -*- coding: utf-8 -*- # # PyDOAS documentation build configuration file, created by # sphinx-quickstart on Tue Apr 12 14:39:57 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import matplotlib # This was inserted based on this blog: https://github.com/spinus/sphinxcontrib-images/issues/41, after the following build error occured: Could not import extension sphinxcontrib.images (exception: cannot import name make_admonition), apparently due to a compatibility error between an updated version of sphinx (1.6) and the extension sphinxcontrib.images from docutils.parsers.rst.directives.admonitions import BaseAdmonition from sphinx.util import compat compat.make_admonition = BaseAdmonition matplotlib.use('agg') with open(os.path.join("..", "VERSION")) as f: __version__ = f.readline() f.close() sys.path.insert(0, os.path.abspath('../')) MOCK_MODULES = [ 'numpy', 'pandas' 'matplotlib' ] #sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. #extensions = [ # 'sphinx.ext.autodoc', # 'sphinx.ext.doctest', # 'sphinx.ext.intersphinx', # 'sphinx.ext.todo', # 'sphinx.ext.pngmath', # 'sphinx.ext.ifconfig', # 'sphinx.ext.viewcode', #] extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinxcontrib.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'pydoas' copyright = '2016, Jonas Gliss' author = 'Jonas Gliss' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. print("LIB VERSION %s" %__version__) version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'alabaster' html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pydoasdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pydoas.tex', 'pydoas Documentation', 'Jonas Gliss', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pydoas', 'pydoas Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pydoas', 'pydoas Documentation', author, 'pydoas', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} def skip(app, what, name, obj, skip, options): if name == "__init__": return False return skip def setup(app): app.connect("autodoc-skip-member", skip) autodoc_member_order = 'bysource' images_config = { 'default_image_width' : '300px', 'default_group' : 'default' }
bsd-3-clause
petecummings/django-cms
cms/utils/i18n.py
55
5196
# -*- coding: utf-8 -*- from contextlib import contextmanager from django.core.urlresolvers import get_resolver, LocaleRegexURLResolver from django.conf import settings from django.utils import translation from django.utils.translation import ugettext_lazy as _ from cms.exceptions import LanguageError from cms.utils.conf import get_cms_setting, get_site_id @contextmanager def force_language(new_lang): old_lang = get_current_language() if old_lang != new_lang: translation.activate(new_lang) yield translation.activate(old_lang) def get_languages(site_id=None): site_id = get_site_id(site_id) result = get_cms_setting('LANGUAGES').get(site_id) if not result: result = [] defaults = get_cms_setting('LANGUAGES').get('default', {}) for code, name in settings.LANGUAGES: lang = {'code': code, 'name': _(name)} lang.update(defaults) result.append(lang) get_cms_setting('LANGUAGES')[site_id] = result return result def get_language_code(language_code): """ Returns language code while making sure it's in LANGUAGES """ if not language_code: return None languages = get_language_list() if language_code in languages: # direct hit return language_code for lang in languages: if language_code.split('-')[0] == lang: # base language hit return lang if lang.split('-')[0] == language_code: # base language hit return lang return language_code def get_current_language(): """ Returns the currently active language It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES. Overcomes this issue: https://code.djangoproject.com/ticket/9340 """ language_code = translation.get_language() return get_language_code(language_code) def get_language_list(site_id=None): """ :return: returns a list of iso2codes for this site """ return ([lang['code'] for lang in get_languages(site_id)] if settings.USE_I18N else [settings.LANGUAGE_CODE]) def get_language_tuple(site_id=None): """ :return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site """ return [(lang['code'], lang['name']) for lang in get_languages(site_id)] def get_language_dict(site_id=None): """ :return: returns an dict of cms languages """ return dict(get_language_tuple(site_id)) def get_public_languages(site_id=None): """ :return: list of iso2codes of public languages for this site """ return [lang['code'] for lang in get_language_objects(site_id) if lang.get('public', True)] def get_language_object(language_code, site_id=None): """ :param language_code: RFC5646 language code :return: the language object filled up by defaults """ for language in get_languages(site_id): if language['code'] == get_language_code(language_code): return language raise LanguageError('Language not found: %s' % language_code) def get_language_objects(site_id=None): """ returns list of all language objects filled up by default values """ return list(get_languages(site_id)) def get_default_language(language_code=None, site_id=None): """ Returns default language depending on settings.LANGUAGE_CODE merged with best match from get_cms_setting('LANGUAGES') Returns: language_code """ if not language_code: language_code = get_language_code(settings.LANGUAGE_CODE) languages = get_language_list(site_id) # first try if there is an exact language if language_code in languages: return language_code # otherwise split the language code if possible, so iso3 language_code = language_code.split("-")[0] if not language_code in languages: return settings.LANGUAGE_CODE return language_code def get_fallback_languages(language, site_id=None): """ returns a list of fallback languages for the given language """ try: language = get_language_object(language, site_id) except LanguageError: language = get_languages(site_id)[0] return language.get('fallbacks', []) def get_redirect_on_fallback(language, site_id=None): """ returns if you should redirect on language fallback :param language: :param site_id: :return: Boolean """ language = get_language_object(language, site_id) return language.get('redirect_on_fallback', True) def hide_untranslated(language, site_id=None): """ Should untranslated pages in this language be hidden? :param language: :param site_id: :return: A Boolean """ obj = get_language_object(language, site_id) return obj.get('hide_untranslated', True) def is_language_prefix_patterns_used(): """ Returns `True` if the `LocaleRegexURLResolver` is used at root level of the urlpatterns, else it returns `False`. """ return any(isinstance(url_pattern, LocaleRegexURLResolver) for url_pattern in get_resolver(None).url_patterns)
bsd-3-clause
ptisserand/ansible
lib/ansible/utils/module_docs_fragments/aws.py
18
2898
# (c) 2014, Will Thames <will@thames.id.au> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # AWS only documentation fragment DOCUMENTATION = """ options: ec2_url: description: - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used. aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used. aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used. aliases: [ 'ec2_access_key', 'access_key' ] security_token: description: - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used. aliases: [ 'access_token' ] version_added: "1.6" validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. type: bool default: 'yes' version_added: "1.5" profile: description: - Uses a boto profile. Only works with boto >= 2.24.0. version_added: "1.6" requirements: - "python >= 2.6" - boto notes: - If parameters are not set within the module, the following environment variables can be used in decreasing order of precedence C(AWS_URL) or C(EC2_URL), C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY), C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN), C(AWS_REGION) or C(EC2_REGION) - Ansible uses the boto configuration file (typically ~/.boto) if no credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the AWS region, when required, but this can also be configured in the boto config file """
gpl-3.0
DGrady/pandas
pandas/tests/io/parser/usecols.py
11
18059
# -*- coding: utf-8 -*- """ Tests the usecols functionality during parsing for all of the parsers defined in parsers.py """ import pytest import numpy as np import pandas.util.testing as tm from pandas import DataFrame, Index from pandas._libs.lib import Timestamp from pandas.compat import StringIO class UsecolsTests(object): def test_raise_on_mixed_dtype_usecols(self): # See gh-12678 data = """a,b,c 1000,2000,3000 4000,5000,6000 """ msg = ("'usecols' must either be all strings, all unicode, " "all integers or a callable") usecols = [0, 'b', 2] with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), usecols=usecols) def test_usecols(self): data = """\ a,b,c 1,2,3 4,5,6 7,8,9 10,11,12""" result = self.read_csv(StringIO(data), usecols=(1, 2)) result2 = self.read_csv(StringIO(data), usecols=('b', 'c')) exp = self.read_csv(StringIO(data)) assert len(result.columns) == 2 assert (result['b'] == exp['b']).all() assert (result['c'] == exp['c']).all() tm.assert_frame_equal(result, result2) result = self.read_csv(StringIO(data), usecols=[1, 2], header=0, names=['foo', 'bar']) expected = self.read_csv(StringIO(data), usecols=[1, 2]) expected.columns = ['foo', 'bar'] tm.assert_frame_equal(result, expected) data = """\ 1,2,3 4,5,6 7,8,9 10,11,12""" result = self.read_csv(StringIO(data), names=['b', 'c'], header=None, usecols=[1, 2]) expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'], header=None) expected = expected[['b', 'c']] tm.assert_frame_equal(result, expected) result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'], header=None, usecols=['b', 'c']) tm.assert_frame_equal(result2, result) # see gh-5766 result = self.read_csv(StringIO(data), names=['a', 'b'], header=None, usecols=[0, 1]) expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'], header=None) expected = expected[['a', 'b']] tm.assert_frame_equal(result, expected) # length conflict, passed names and usecols disagree pytest.raises(ValueError, self.read_csv, StringIO(data), names=['a', 'b'], usecols=[1], header=None) def test_usecols_index_col_False(self): # see gh-9082 s = "a,b,c,d\n1,2,3,4\n5,6,7,8" s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8," cols = ['a', 'c', 'd'] expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]}) df = self.read_csv(StringIO(s), usecols=cols, index_col=False) tm.assert_frame_equal(expected, df) df = self.read_csv(StringIO(s_malformed), usecols=cols, index_col=False) tm.assert_frame_equal(expected, df) def test_usecols_index_col_conflict(self): # see gh-4201: test that index_col as integer reflects usecols data = 'a,b,c,d\nA,a,1,one\nB,b,2,two' expected = DataFrame({'c': [1, 2]}, index=Index( ['a', 'b'], name='b')) df = self.read_csv(StringIO(data), usecols=['b', 'c'], index_col=0) tm.assert_frame_equal(expected, df) df = self.read_csv(StringIO(data), usecols=['b', 'c'], index_col='b') tm.assert_frame_equal(expected, df) df = self.read_csv(StringIO(data), usecols=[1, 2], index_col='b') tm.assert_frame_equal(expected, df) df = self.read_csv(StringIO(data), usecols=[1, 2], index_col=0) tm.assert_frame_equal(expected, df) expected = DataFrame( {'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')}) expected = expected.set_index(['b', 'c']) df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'], index_col=['b', 'c']) tm.assert_frame_equal(expected, df) def test_usecols_implicit_index_col(self): # see gh-2654 data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10' result = self.read_csv(StringIO(data), usecols=['a', 'b']) expected = DataFrame({'a': ['apple', 'orange'], 'b': ['bat', 'cow']}, index=[4, 8]) tm.assert_frame_equal(result, expected) def test_usecols_regex_sep(self): # see gh-2733 data = 'a b c\n4 apple bat 5.7\n8 orange cow 10' df = self.read_csv(StringIO(data), sep=r'\s+', usecols=('a', 'b')) expected = DataFrame({'a': ['apple', 'orange'], 'b': ['bat', 'cow']}, index=[4, 8]) tm.assert_frame_equal(df, expected) def test_usecols_with_whitespace(self): data = 'a b c\n4 apple bat 5.7\n8 orange cow 10' result = self.read_csv(StringIO(data), delim_whitespace=True, usecols=('a', 'b')) expected = DataFrame({'a': ['apple', 'orange'], 'b': ['bat', 'cow']}, index=[4, 8]) tm.assert_frame_equal(result, expected) def test_usecols_with_integer_like_header(self): data = """2,0,1 1000,2000,3000 4000,5000,6000 """ usecols = [0, 1] # column selection by index expected = DataFrame(data=[[1000, 2000], [4000, 5000]], columns=['2', '0']) df = self.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(df, expected) usecols = ['0', '1'] # column selection by name expected = DataFrame(data=[[2000, 3000], [5000, 6000]], columns=['0', '1']) df = self.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(df, expected) def test_usecols_with_parse_dates(self): # See gh-9755 s = """a,b,c,d,e 0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] cols = { 'a': [0, 0], 'c_d': [ Timestamp('2014-01-01 09:00:00'), Timestamp('2014-01-02 10:00:00') ] } expected = DataFrame(cols, columns=['c_d', 'a']) df = self.read_csv(StringIO(s), usecols=[0, 2, 3], parse_dates=parse_dates) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(s), usecols=[3, 0, 2], parse_dates=parse_dates) tm.assert_frame_equal(df, expected) # See gh-13604 s = """2008-02-07 09:40,1032.43 2008-02-07 09:50,1042.54 2008-02-07 10:00,1051.65 """ parse_dates = [0] names = ['date', 'values'] usecols = names[:] index = Index([Timestamp('2008-02-07 09:40'), Timestamp('2008-02-07 09:50'), Timestamp('2008-02-07 10:00')], name='date') cols = {'values': [1032.43, 1042.54, 1051.65]} expected = DataFrame(cols, index=index) df = self.read_csv(StringIO(s), parse_dates=parse_dates, index_col=0, usecols=usecols, header=None, names=names) tm.assert_frame_equal(df, expected) # See gh-14792 s = """a,b,c,d,e,f,g,h,i,j 2016/09/21,1,1,2,3,4,5,6,7,8""" parse_dates = [0] usecols = list('abcdefghij') cols = {'a': Timestamp('2016-09-21'), 'b': [1], 'c': [1], 'd': [2], 'e': [3], 'f': [4], 'g': [5], 'h': [6], 'i': [7], 'j': [8]} expected = DataFrame(cols, columns=usecols) df = self.read_csv(StringIO(s), usecols=usecols, parse_dates=parse_dates) tm.assert_frame_equal(df, expected) s = """a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8""" parse_dates = [[0, 1]] usecols = list('abcdefghij') cols = {'a_b': '2016/09/21 1', 'c': [1], 'd': [2], 'e': [3], 'f': [4], 'g': [5], 'h': [6], 'i': [7], 'j': [8]} expected = DataFrame(cols, columns=['a_b'] + list('cdefghij')) df = self.read_csv(StringIO(s), usecols=usecols, parse_dates=parse_dates) tm.assert_frame_equal(df, expected) def test_usecols_with_parse_dates_and_full_names(self): # See gh-9755 s = """0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] names = list('abcde') cols = { 'a': [0, 0], 'c_d': [ Timestamp('2014-01-01 09:00:00'), Timestamp('2014-01-02 10:00:00') ] } expected = DataFrame(cols, columns=['c_d', 'a']) df = self.read_csv(StringIO(s), names=names, usecols=[0, 2, 3], parse_dates=parse_dates) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(s), names=names, usecols=[3, 0, 2], parse_dates=parse_dates) tm.assert_frame_equal(df, expected) def test_usecols_with_parse_dates_and_usecol_names(self): # See gh-9755 s = """0,1,20140101,0900,4 0,1,20140102,1000,4""" parse_dates = [[1, 2]] names = list('acd') cols = { 'a': [0, 0], 'c_d': [ Timestamp('2014-01-01 09:00:00'), Timestamp('2014-01-02 10:00:00') ] } expected = DataFrame(cols, columns=['c_d', 'a']) df = self.read_csv(StringIO(s), names=names, usecols=[0, 2, 3], parse_dates=parse_dates) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(s), names=names, usecols=[3, 0, 2], parse_dates=parse_dates) tm.assert_frame_equal(df, expected) def test_usecols_with_unicode_strings(self): # see gh-13219 s = '''AAA,BBB,CCC,DDD 0.056674973,8,True,a 2.613230982,2,False,b 3.568935038,7,False,a ''' data = { 'AAA': { 0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002 }, 'BBB': {0: 8, 1: 2, 2: 7} } expected = DataFrame(data) df = self.read_csv(StringIO(s), usecols=[u'AAA', u'BBB']) tm.assert_frame_equal(df, expected) def test_usecols_with_single_byte_unicode_strings(self): # see gh-13219 s = '''A,B,C,D 0.056674973,8,True,a 2.613230982,2,False,b 3.568935038,7,False,a ''' data = { 'A': { 0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002 }, 'B': {0: 8, 1: 2, 2: 7} } expected = DataFrame(data) df = self.read_csv(StringIO(s), usecols=[u'A', u'B']) tm.assert_frame_equal(df, expected) def test_usecols_with_mixed_encoding_strings(self): s = '''AAA,BBB,CCC,DDD 0.056674973,8,True,a 2.613230982,2,False,b 3.568935038,7,False,a ''' msg = ("'usecols' must either be all strings, all unicode, " "all integers or a callable") with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(s), usecols=[u'AAA', b'BBB']) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(s), usecols=[b'AAA', u'BBB']) def test_usecols_with_multibyte_characters(self): s = '''あああ,いい,ううう,ええええ 0.056674973,8,True,a 2.613230982,2,False,b 3.568935038,7,False,a ''' data = { 'あああ': { 0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002 }, 'いい': {0: 8, 1: 2, 2: 7} } expected = DataFrame(data) df = self.read_csv(StringIO(s), usecols=['あああ', 'いい']) tm.assert_frame_equal(df, expected) def test_usecols_with_multibyte_unicode_characters(self): pytest.skip('TODO: see gh-13253') s = '''あああ,いい,ううう,ええええ 0.056674973,8,True,a 2.613230982,2,False,b 3.568935038,7,False,a ''' data = { 'あああ': { 0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002 }, 'いい': {0: 8, 1: 2, 2: 7} } expected = DataFrame(data) df = self.read_csv(StringIO(s), usecols=[u'あああ', u'いい']) tm.assert_frame_equal(df, expected) def test_empty_usecols(self): # should not raise data = 'a,b,c\n1,2,3\n4,5,6' expected = DataFrame() result = self.read_csv(StringIO(data), usecols=set([])) tm.assert_frame_equal(result, expected) def test_np_array_usecols(self): # See gh-12546 data = 'a,b,c\n1,2,3' usecols = np.array(['a', 'b']) expected = DataFrame([[1, 2]], columns=usecols) result = self.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) def test_callable_usecols(self): # See gh-14154 s = '''AaA,bBb,CCC,ddd 0.056674973,8,True,a 2.613230982,2,False,b 3.568935038,7,False,a ''' data = { 'AaA': { 0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002 }, 'bBb': {0: 8, 1: 2, 2: 7}, 'ddd': {0: 'a', 1: 'b', 2: 'a'} } expected = DataFrame(data) df = self.read_csv(StringIO(s), usecols=lambda x: x.upper() in ['AAA', 'BBB', 'DDD']) tm.assert_frame_equal(df, expected) # Check that a callable returning only False returns # an empty DataFrame expected = DataFrame() df = self.read_csv(StringIO(s), usecols=lambda x: False) tm.assert_frame_equal(df, expected) def test_incomplete_first_row(self): # see gh-6710 data = '1,2\n1,2,3' names = ['a', 'b', 'c'] expected = DataFrame({'a': [1, 1], 'c': [np.nan, 3]}) usecols = ['a', 'c'] df = self.read_csv(StringIO(data), names=names, usecols=usecols) tm.assert_frame_equal(df, expected) usecols = lambda x: x in ['a', 'c'] df = self.read_csv(StringIO(data), names=names, usecols=usecols) tm.assert_frame_equal(df, expected) def test_uneven_length_cols(self): # see gh-8985 usecols = [0, 1, 2] data = '19,29,39\n' * 2 + '10,20,30,40' expected = DataFrame([[19, 29, 39], [19, 29, 39], [10, 20, 30]]) df = self.read_csv(StringIO(data), header=None, usecols=usecols) tm.assert_frame_equal(df, expected) # see gh-9549 usecols = ['A', 'B', 'C'] data = ('A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n' '1,2,3,,,1,\n1,2,3\n5,6,7') expected = DataFrame({'A': [1, 3, 1, 1, 1, 5], 'B': [2, 4, 2, 2, 2, 6], 'C': [3, 5, 4, 3, 3, 7]}) df = self.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(df, expected) def test_raise_on_usecols_names_mismatch(self): # GH 14671 data = 'a,b,c,d\n1,2,3,4\n5,6,7,8' if self.engine == 'c': msg = 'Usecols do not match names' else: msg = 'is not in list' usecols = ['a', 'b', 'c', 'd'] df = self.read_csv(StringIO(data), usecols=usecols) expected = DataFrame({'a': [1, 5], 'b': [2, 6], 'c': [3, 7], 'd': [4, 8]}) tm.assert_frame_equal(df, expected) usecols = ['a', 'b', 'c', 'f'] with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), usecols=usecols) usecols = ['a', 'b', 'f'] with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), usecols=usecols) names = ['A', 'B', 'C', 'D'] df = self.read_csv(StringIO(data), header=0, names=names) expected = DataFrame({'A': [1, 5], 'B': [2, 6], 'C': [3, 7], 'D': [4, 8]}) tm.assert_frame_equal(df, expected) # TODO: https://github.com/pandas-dev/pandas/issues/16469 # usecols = ['A','C'] # df = self.read_csv(StringIO(data), header=0, names=names, # usecols=usecols) # expected = DataFrame({'A': [1,5], 'C': [3,7]}) # tm.assert_frame_equal(df, expected) # # usecols = [0,2] # df = self.read_csv(StringIO(data), header=0, names=names, # usecols=usecols) # expected = DataFrame({'A': [1,5], 'C': [3,7]}) # tm.assert_frame_equal(df, expected) usecols = ['A', 'B', 'C', 'f'] with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), header=0, names=names, usecols=usecols) usecols = ['A', 'B', 'f'] with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), names=names, usecols=usecols)
bsd-3-clause
mandarfsl/lvm-deduplicaton
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
Allend575/opencog
opencog/python/pln_old/rules/context_rules_old.py
32
2706
from opencog.atomspace import types, TruthValue import formulas from pln.rule import Rule # Todo: # It may be better to use SubsetLinks instead of ContextLinks, or at # least implicitly convert them. # (Context C x).tv = (Subset C x).tv # (Context C: Subset x y).tv = (Subset (x AND C) (y AND C)) # DeductionRule produces # Context C: Subset x z # using # Context C: Subset x y # Context C: Subset y z # Context C: y # Context C: z # Special case for direct evaluation Rules. # Subset A B requires # Member x A, Member x B # # Context C: Subset A B requires # Member x A # Member x B # Member x C # or something. and then change the math. # Or class ContextualRule(Rule): def __init__(self, chainer, rule): self._chainer = chainer self.name = 'Contextual' + rule.name self.full_name = 'Contextual' + rule.full_name self._outputs = rule._outputs self._inputs = rule._inputs self.formula = rule.formula context = chainer.new_variable() self._outputs = [self.contextlink(context, out) for out in self._outputs] is_evaluation_rule = 'EvaluationRule' in rule.name if is_evaluation_rule: raise "Direct evaluation in a context is not handled yet" else: self._inputs = [self.contextlink(context, input) for input in self._inputs] print self.name print self._outputs print self._inputs def andlink(self, context, expression): return self._chainer.link(types.AndLink, [context, expression]) def contextlink(self, context, expression): return self._chainer.link(types.ContextLink, [context, expression]) def extract_context(self, contextlink): # Todo: The variable 'context' is never used. Is it supposed to # be returned instead of 'contextlink'? context = contextlink.out[0] expression = contextlink.out[1] return contextlink, expression class AndToContextRule(Rule): """ (Context C: Subset x y).tv = (Subset (x AND C) (y AND C)) """ def __init__(self, chainer, link_type): A = chainer.new_variable() B = chainer.new_variable() C = chainer.new_variable() link = chainer.link(link_type, [A, B]) contextlink = chainer.link(types.ContextLink, [C, link]) andAC = chainer.link(types.AndLink, [A, C]) andBC = chainer.link(types.AndLink, [B, C]) input = chainer.link(link_type, [andAC, andBC]) Rule.__init__(self, formula=formulas.identityFormula, outputs=[contextlink], inputs=[input])
agpl-3.0
sys-bio/tellurium
tellurium/tests/sedml/test_phrasedml.py
2
30460
""" Testing phrasedml. test_sedml_phrasedml.py : phrasedml based tests. test_sedml_kisao.py : SED-ML kisao support test_sedml_omex.py : SED-ML tests based on Combine Archives test_sedml_sedml.py : sed-ml tests """ from __future__ import absolute_import, print_function, division import os import shutil import tempfile import unittest import pytest import matplotlib import tellurium as te try: import tesedml as libsedml except ImportError: import libsedml import phrasedml from tellurium.sedml.utils import run_case from tellurium import temiriam from tellurium.utils import omex from tellurium.sedml.tesedml import executeSEDML, executeCombineArchive class PhrasedmlTestCase(unittest.TestCase): """ Testing execution and archives based on phrasedml input. """ def setUp(self): # switch the backend of matplotlib, so plots can be tested self.backend = matplotlib.rcParams['backend'] matplotlib.pyplot.switch_backend("Agg") # create a test instance self.antimony = ''' model myModel S1 -> S2; k1*S1; S1 = 10; S2 = 0; k1 = 1; end ''' self.phrasedml = ''' model1 = model "myModel" sim1 = simulate uniform(0, 5, 100) task1 = run sim1 on model1 plot "Figure 1" time vs S1, S2 ''' # self.tep = tephrasedml.experiment(self.antimony, self.phrasedml) self.a1 = """ model m1() J0: S1 -> S2; k1*S1; S1 = 10.0; S2=0.0; k1 = 0.1; end """ self.a2 = """ model m2() v0: X1 -> X2; p1*X1; X1 = 5.0; X2 = 20.0; p1 = 0.2; end """ def tearDown(self): matplotlib.pyplot.switch_backend(self.backend) matplotlib.pyplot.close('all') def test_execute(self): """Test execute.""" inline_omex = '\n'.join([self.antimony, self.phrasedml]) te.executeInlineOmex(inline_omex) def test_exportAsCombine(self): """ Test exportAsCombine. """ inline_omex = '\n'.join([self.antimony, self.phrasedml]) tmpdir = tempfile.mkdtemp() te.exportInlineOmex(inline_omex, os.path.join(tmpdir, 'archive.omex')) shutil.rmtree(tmpdir) def test_1Model1PhrasedML(self): """ Minimal example which should work. """ antimony_str = """ model test J0: S1 -> S2; k1*S1; S1 = 10.0; S2=0.0; k1 = 0.1; end """ phrasedml_str = """ model0 = model "test" sim0 = simulate uniform(0, 10, 100) task0 = run sim0 on model0 plot task0.time vs task0.S1 """ inline_omex = '\n'.join([antimony_str, phrasedml_str]) te.executeInlineOmex(inline_omex) def test_1Model2PhrasedML(self): """ Test multiple models and multiple phrasedml files. """ p1 = """ model1 = model "m1" sim1 = simulate uniform(0, 6, 100) task1 = run sim1 on model1 plot task1.time vs task1.S1, task1.S2 """ p2 = """ model1 = model "m1" model2 = model model1 with S1=S2+20 sim1 = simulate uniform(0, 6, 100) task1 = run sim1 on model2 plot task1.time vs task1.S1, task1.S2 """ inline_omex = '\n'.join([self.a1, p1]) te.executeInlineOmex(inline_omex) inline_omex = '\n'.join([self.a1, p2]) te.executeInlineOmex(inline_omex) inline_omex = '\n'.join([self.a1, p1, p2]) te.executeInlineOmex(inline_omex) def test_2Model1PhrasedML(self): """ Test multiple models and multiple phrasedml files. """ p1 = """ model1 = model "m1" model2 = model "m2" model3 = model model1 with S1=S2+20 sim1 = simulate uniform(0, 6, 100) task1 = run sim1 on model1 task2 = run sim1 on model2 plot "Timecourse test1" task1.time vs task1.S1, task1.S2 plot "Timecourse test2" task2.time vs task2.X1, task2.X2 """ inline_omex = '\n'.join([self.a1, self.a2, p1]) te.executeInlineOmex(inline_omex) def test_2Model2PhrasedML(self): """ Test multiple models and multiple phrasedml files. """ p1 = """ model1 = model "m1" model2 = model "m2" sim1 = simulate uniform(0, 6, 100) task1 = run sim1 on model1 task2 = run sim1 on model2 plot task1.time vs task1.S1, task1.S2, task2.time vs task2.X1, task2.X2 """ p2 = """ model1 = model "m1" model2 = model "m2" sim1 = simulate uniform(0, 20, 20) task1 = run sim1 on model1 task2 = run sim1 on model2 plot task1.time vs task1.S1, task1.S2, task2.time vs task2.X1, task2.X2 """ inline_omex = '\n'.join([self.a1, self.a2, p1, p2]) te.executeInlineOmex(inline_omex) ############################################ # Real world tests ############################################ def run_example(self, a_str, p_str): # execute tmpdir = tempfile.mkdtemp() try: run_case( call_file=os.path.realpath(__file__), antimony_str=a_str, phrasedml_str=p_str, working_dir=tmpdir ) finally: shutil.rmtree(tmpdir) def test_case_01(self): a_str = """ model case_01 J0: S1 -> S2; k1*S1; S1 = 10.0; S2=0.0; k1 = 0.1; end """ p_str = """ model0 = model "case_01" sim0 = simulate uniform(0, 10, 100) task0 = run sim0 on model0 plot "UniformTimecourse" task0.time vs task0.S1 report task0.time vs task0.S1 """ self.run_example(a_str, p_str) def test_case_02(self): a_str = """ model case_02 J0: S1 -> S2; k1*S1; S1 = 10.0; S2=0.0; k1 = 0.1; end """ p_str = """ model0 = model "case_02" model1 = model model0 with S1=5.0 sim0 = simulate uniform(0, 6, 100) task0 = run sim0 on model1 task1 = repeat task0 for k1 in uniform(0.0, 5.0, 5), reset = true plot "Repeated task with reset" task1.time vs task1.S1, task1.S2 report task1.time vs task1.S1, task1.S2 plot "Repeated task varying k1" task1.k1 vs task1.S1 report task1.k1 vs task1.S1 """ self.run_example(a_str, p_str) def test_case_03(self): a_str = ''' model case_03() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_03" mod2 = model mod1 with S2=S1+4 sim1 = simulate uniform(0, 10, 100) task1 = run sim1 on mod1 task2 = run sim1 on mod2 plot "ComputeChanges" task1.time vs task1.S1, task1.S2, task2.S1, task2.S2 report task1.time vs task1.S1, task1.S2, task2.S1, task2.S2 ''' self.run_example(a_str, p_str) def test_case_04(self): a_str = ''' model case_04() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_04" mod2 = model mod1 with S2=S1+4 mod3 = model mod2 with S1=20.0 sim1 = simulate uniform(0, 10, 100) task1 = run sim1 on mod1 task2 = run sim1 on mod2 task3 = run sim1 on mod3 plot "Example plot" task1.time vs task1.S1, task1.S2, task2.S1, task2.S2, task3.S1, task3.S2 report task1.time vs task1.S1, task1.S2, task2.S1, task2.S2, task3.S1, task3.S2 ''' self.run_example(a_str, p_str) def test_case_05(self): a_str = ''' model case_05() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_05" sim1 = simulate uniform(0, 10, 100) task1 = run sim1 on mod1 plot "Example plot" task1.time vs task1.S1, task1.S2, task1.S1/task1.S2 report task1.time vs task1.S1, task1.S2, task1.S1/task1.S2 plot "Normalized plot" task1.S1/max(task1.S1) vs task1.S2/max(task1.S2) report task1.S1/max(task1.S1) vs task1.S2/max(task1.S2) ''' self.run_example(a_str, p_str) def test_case_06(self): a_str = ''' model case_06() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_06" sim1 = simulate uniform(0, 10, 100) task1 = run sim1 on mod1 repeat1 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=True repeat2 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=False plot "Example plot" repeat1.time vs repeat1.S1, repeat1.S2 report repeat1.time vs repeat1.S1, repeat1.S2 plot "Example plot" repeat2.time vs repeat2.S1, repeat2.S2 report repeat2.time vs repeat2.S1, repeat2.S2 ''' self.run_example(a_str, p_str) def test_case_07(self): a_str = ''' model case_07() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_07" sim1 = simulate uniform(0, 10, 100) task1 = run sim1 on mod1 repeat1 = repeat task1 for S1 in [1, 3, 5], reset=True report task1.time, task1.S1, task1.S2, task1.S1/task1.S2 report repeat1.time, repeat1.S1, repeat1.S2, repeat1.S1/repeat1.S2 ''' self.run_example(a_str, p_str) def test_case_08(self): a_str = ''' model case_08() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_08" mod2 = model "case_08" sim1 = simulate uniform(0, 10, 20) sim2 = simulate uniform(0, 3, 10) task1 = run sim1 on mod1 task2 = run sim2 on mod1 repeat1 = repeat [task1, task2] for S2 in uniform(0, 10, 9), mod1.S1 = S2+3, reset=False plot "Repeated Multiple Subtasks" repeat1.mod1.time vs repeat1.mod1.S1, repeat1.mod1.S2 # plot "Repeated Multiple Subtasks" repeat1.mod2.time vs repeat1.mod2.S1, repeat1.mod2.S2 ''' self.run_example(a_str, p_str) def test_case_09(self): a_str = ''' // Created by libAntimony v2.9 model *case_09() // Compartments and Species: compartment compartment_; species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_; species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_; species MAPK_P in compartment_, MAPK_PP in compartment_; // Reactions: J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK)); J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P); J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK); J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P); J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP); J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P); J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK); J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P); J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP); J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P); // Species initializations: MKKK = 90; MKKK_P = 10; MKK = 280; MKK_P = 10; MKK_PP = 10; MAPK = 280; MAPK_P = 10; MAPK_PP = 10; // Compartment initializations: compartment_ = 1; // Variable initializations: J0_V1 = 2.5; J0_Ki = 9; J0_n = 1; J0_K1 = 10; J1_V2 = 0.25; J1_KK2 = 8; J2_k3 = 0.025; J2_KK3 = 15; J3_k4 = 0.025; J3_KK4 = 15; J4_V5 = 0.75; J4_KK5 = 15; J5_V6 = 0.75; J5_KK6 = 15; J6_k7 = 0.025; J6_KK7 = 15; J7_k8 = 0.025; J7_KK8 = 15; J8_V9 = 0.5; J8_KK9 = 15; J9_V10 = 0.5; J9_KK10 = 15; // Other declarations: const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3; const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8; const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10; end ''' p_str = ''' mod1 = model "case_09" # sim1 = simulate uniform_stochastic(0, 4000, 1000) sim1 = simulate uniform(0, 4000, 1000) task1 = run sim1 on mod1 repeat1 = repeat task1 for local.x in uniform(0, 10, 10), reset=true plot "MAPK oscillations" repeat1.MAPK vs repeat1.time vs repeat1.MAPK_P, repeat1.MAPK vs repeat1.time vs repeat1.MAPK_PP, repeat1.MAPK vs repeat1.time vs repeat1.MKK report repeat1.MAPK vs repeat1.time vs repeat1.MAPK_P, repeat1.MAPK vs repeat1.time vs repeat1.MAPK_PP, repeat1.MAPK vs repeat1.time vs repeat1.MKK ''' self.run_example(a_str, p_str) def test_case_10(self): a_str = ''' model case_10() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_10" mod2 = model "case_10" sim1 = simulate uniform(0, 10, 100) sim2 = simulate uniform(0, 3, 10) task1 = run sim1 on mod1 task2 = run sim2 on mod2 repeat1 = repeat [task1, task2] for local.X in uniform(0, 10, 9), mod1.S1 = X, mod2.S1 = X+3 plot repeat1.mod1.time vs repeat1.mod1.S1, repeat1.mod1.S2, repeat1.mod2.time vs repeat1.mod2.S1, repeat1.mod2.S2 ''' self.run_example(a_str, p_str) def test_case_11(self): a_str = ''' model case_11() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.5; k2=0.4 end ''' p_str = ''' mod1 = model "case_11" sim1 = simulate uniform(0, 10, 100) task1 = run sim1 on mod1 rtask1 = repeat task1 for k1 in uniform(0, 1, 2) rtask2 = repeat rtask1 for k2 in uniform(0, 1, 3) rtask3 = repeat rtask2 for S1 in [5, 10], reset=true plot "RepeatedTask of RepeatedTask" rtask3.time vs rtask3.S1, rtask3.S2 plot rtask3.k1 vs rtask3.k2 vs rtask3.S1 ''' self.run_example(a_str, p_str) def test_case_12(self): a_str = ''' model case_12() J0: S1 -> S2; k1*S1-k2*S2 S1 = 10.0; S2 = 0.0; k1 = 0.2; k2=0.01 end ''' p_str = ''' mod1 = model "case_12" sim1 = simulate uniform(0, 2, 10, 49) sim2 = simulate uniform(0, 15, 49) task1 = run sim1 on mod1 task2 = run sim2 on mod1 repeat1 = repeat task1 for S1 in uniform(0, 10, 4), S2 = S1+20, reset=true repeat2 = repeat task2 for S1 in uniform(0, 10, 4), S2 = S1+20, reset=true plot "Offset simulation" repeat2.time vs repeat2.S1, repeat2.S2, repeat1.time vs repeat1.S1, repeat1.S2 report repeat2.time vs repeat2.S1, repeat2.S2, repeat1.time vs repeat1.S1, repeat1.S2 ''' self.run_example(a_str, p_str) def test_lorenz(self): a_str = ''' model lorenz x' = sigma*(y - x); y' = x*(rho - z) - y; z' = x*y - beta*z; x = 0.96259; y = 2.07272; z = 18.65888; sigma = 10; rho = 28; beta = 2.67; end ''' p_str = ''' model1 = model "lorenz" sim1 = simulate uniform(0,15,2000) task1 = run sim1 on model1 plot task1.z vs task1.x ''' self.run_example(a_str, p_str) def test_oneStep(self): a_str = ''' // Created by libAntimony v2.9 model *oneStep() // Compartments and Species: compartment compartment_; species S1 in compartment_, S2 in compartment_, $X0 in compartment_, $X1 in compartment_; species $X2 in compartment_; // Reactions: J0: $X0 => S1; J0_v0; J1: S1 => $X1; J1_k3*S1; J2: S1 => S2; (J2_k1*S1 - J2_k_1*S2)*(1 + J2_c*S2^J2_q); J3: S2 => $X2; J3_k2*S2; // Species initializations: S1 = 0; S2 = 1; X0 = 1; X1 = 0; X2 = 0; // Compartment initializations: compartment_ = 1; // Variable initializations: J0_v0 = 8; J1_k3 = 0; J2_k1 = 1; J2_k_1 = 0; J2_c = 1; J2_q = 3; J3_k2 = 5; // Other declarations: const compartment_, J0_v0, J1_k3, J2_k1, J2_k_1, J2_c, J2_q, J3_k2; end ''' p_str = ''' model1 = model "oneStep" stepper = simulate onestep(0.1) task0 = run stepper on model1 task1 = repeat task0 for local.x in uniform(0, 10, 100), J0_v0 = piecewise(8, x<4, 0.1, 4<=x<6, 8) plot "One Step Simulation" task1.time vs task1.S1, task1.S2, task1.J0_v0 report task1.time vs task1.S1, task1.S2, task1.J0_v0 ''' self.run_example(a_str, p_str) def test_parameterScan1D(self): a_str = ''' // Created by libAntimony v2.9 model *parameterScan1D() // Compartments and Species: compartment compartment_; species S1 in compartment_, S2 in compartment_, $X0 in compartment_, $X1 in compartment_; species $X2 in compartment_; // Reactions: J0: $X0 => S1; J0_v0; J1: S1 => $X1; J1_k3*S1; J2: S1 => S2; (J2_k1*S1 - J2_k_1*S2)*(1 + J2_c*S2^J2_q); J3: S2 => $X2; J3_k2*S2; // Species initializations: S1 = 0; S2 = 1; X0 = 1; X1 = 0; X2 = 0; // Compartment initializations: compartment_ = 1; // Variable initializations: J0_v0 = 8; J1_k3 = 0; J2_k1 = 1; J2_k_1 = 0; J2_c = 1; J2_q = 3; J3_k2 = 5; // Other declarations: const compartment_, J0_v0, J1_k3, J2_k1, J2_k_1, J2_c, J2_q, J3_k2; end ''' p_str = ''' model1 = model "parameterScan1D" timecourse1 = simulate uniform(0, 20, 1000) task0 = run timecourse1 on model1 task1 = repeat task0 for J0_v0 in [8, 4, 0.4], reset=true plot task1.time vs task1.S1, task1.S2 ''' self.run_example(a_str, p_str) def test_parameterScan2D(self): a_str = ''' // Created by libAntimony v2.9 model *parameterScan2D() // Compartments and Species: compartment compartment_; species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_; species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_; species MAPK_P in compartment_, MAPK_PP in compartment_; // Reactions: J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK)); J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P); J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK); J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P); J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP); J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P); J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK); J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P); J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP); J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P); // Species initializations: MKKK = 90; MKKK_P = 10; MKK = 280; MKK_P = 10; MKK_PP = 10; MAPK = 280; MAPK_P = 10; MAPK_PP = 10; // Compartment initializations: compartment_ = 1; // Variable initializations: J0_V1 = 2.5; J0_Ki = 9; J0_n = 1; J0_K1 = 10; J1_V2 = 0.25; J1_KK2 = 8; J2_k3 = 0.025; J2_KK3 = 15; J3_k4 = 0.025; J3_KK4 = 15; J4_V5 = 0.75; J4_KK5 = 15; J5_V6 = 0.75; J5_KK6 = 15; J6_k7 = 0.025; J6_KK7 = 15; J7_k8 = 0.025; J7_KK8 = 15; J8_V9 = 0.5; J8_KK9 = 15; J9_V10 = 0.5; J9_KK10 = 15; // Other declarations: const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3; const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8; const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10; end ''' p_str = ''' model_3 = model "parameterScan2D" sim_repeat = simulate uniform(0,3000,100) task_1 = run sim_repeat on model_3 repeatedtask_1 = repeat task_1 for J1_KK2 in [1, 5, 10, 50, 60, 70, 80, 90, 100], reset=true repeatedtask_2 = repeat repeatedtask_1 for J4_KK5 in uniform(1, 40, 10), reset=true plot repeatedtask_2.J4_KK5 vs repeatedtask_2.J1_KK2 plot repeatedtask_2.time vs repeatedtask_2.MKK, repeatedtask_2.MKK_P ''' self.run_example(a_str, p_str) def test_repeatedStochastic(self): a_str = ''' // Created by libAntimony v2.9 model *repeatedStochastic() // Compartments and Species: compartment compartment_; species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_; species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_; species MAPK_P in compartment_, MAPK_PP in compartment_; // Reactions: J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK)); J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P); J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK); J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P); J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP); J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P); J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK); J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P); J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP); J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P); // Species initializations: MKKK = 90; MKKK_P = 10; MKK = 280; MKK_P = 10; MKK_PP = 10; MAPK = 280; MAPK_P = 10; MAPK_PP = 10; // Compartment initializations: compartment_ = 1; // Variable initializations: J0_V1 = 2.5; J0_Ki = 9; J0_n = 1; J0_K1 = 10; J1_V2 = 0.25; J1_KK2 = 8; J2_k3 = 0.025; J2_KK3 = 15; J3_k4 = 0.025; J3_KK4 = 15; J4_V5 = 0.75; J4_KK5 = 15; J5_V6 = 0.75; J5_KK6 = 15; J6_k7 = 0.025; J6_KK7 = 15; J7_k8 = 0.025; J7_KK8 = 15; J8_V9 = 0.5; J8_KK9 = 15; J9_V10 = 0.5; J9_KK10 = 15; // Other declarations: const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3; const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8; const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10; end ''' p_str = ''' model1 = model "repeatedStochastic" timecourse1 = simulate uniform_stochastic(0, 4000, 1000) timecourse1.algorithm.seed = 1003 timecourse2 = simulate uniform_stochastic(0, 4000, 1000) task1 = run timecourse1 on model1 task2 = run timecourse2 on model1 repeat1 = repeat task1 for local.x in uniform(0, 10, 10), reset=true repeat2 = repeat task2 for local.x in uniform(0, 10, 10), reset=true plot "Repeats with SEED" repeat1.time vs repeat1.MAPK, repeat1.MAPK_P, repeat1.MAPK_PP, repeat1.MKK, repeat1.MKK_P, repeat1.MKKK, repeat1.MKKK_P plot "Repeats without SEED" repeat2.time vs repeat2.MAPK, repeat2.MAPK_P, repeat2.MAPK_PP, repeat2.MKK, repeat2.MKK_P, repeat2.MKKK, repeat2.MKKK_P ''' self.run_example(a_str, p_str) def test_repressilator(self): # Get SBML from URN and set for phrasedml urn = "urn:miriam:biomodels.db:BIOMD0000000012" sbml_str = temiriam.getSBMLFromBiomodelsURN(urn=urn) return_code = phrasedml.setReferencedSBML(urn, sbml_str) assert return_code # valid SBML # <SBML species> # PX - LacI protein # PY - TetR protein # PZ - cI protein # X - LacI mRNA # Y - TetR mRNA # Z - cI mRNA # <SBML parameters> # ps_a - tps_active: Transcription from free promotor in transcripts per second and promotor # ps_0 - tps_repr: Transcription from fully repressed promotor in transcripts per second and promotor phrasedml_str = """ model1 = model "{}" model2 = model model1 with ps_0=1.3E-5, ps_a=0.013 sim1 = simulate uniform(0, 1000, 1000) task1 = run sim1 on model1 task2 = run sim1 on model2 # A simple timecourse simulation plot "Timecourse of repressilator" task1.time vs task1.PX, task1.PZ, task1.PY # Applying preprocessing plot "Timecourse after pre-processing" task2.time vs task2.PX, task2.PZ, task2.PY # Applying postprocessing plot "Timecourse after post-processing" task1.PX/max(task1.PX) vs task1.PZ/max(task1.PZ), \ task1.PY/max(task1.PY) vs task1.PX/max(task1.PX), \ task1.PZ/max(task1.PZ) vs task1.PY/max(task1.PY) """.format(urn) # convert to sedml print(phrasedml_str) sedml_str = phrasedml.convertString(phrasedml_str) if sedml_str is None: print(phrasedml.getLastError()) raise IOError("sedml could not be generated") # run SEDML directly try: tmp_dir = tempfile.mkdtemp() executeSEDML(sedml_str, workingDir=tmp_dir) finally: shutil.rmtree(tmp_dir) # create combine archive and execute try: tmp_dir = tempfile.mkdtemp() sedml_location = "repressilator_sedml.xml" sedml_path = os.path.join(tmp_dir, sedml_location) omex_path = os.path.join(tmp_dir, "repressilator.omex") with open(sedml_path, "w") as f: f.write(sedml_str) entries = [ omex.Entry(location=sedml_location, formatKey="sedml", master=True) ] omex.combineArchiveFromEntries(omexPath=omex_path, entries=entries, workingDir=tmp_dir) executeCombineArchive(omex_path, workingDir=tmp_dir) finally: shutil.rmtree(tmp_dir) def test_simpletimecourse(self): a_str = ''' // Created by libAntimony v2.9 model MAPKcascade() // Compartments and Species: compartment compartment_; species MKKK in compartment_, MKKK_P in compartment_, MKK in compartment_; species MKK_P in compartment_, MKK_PP in compartment_, MAPK in compartment_; species MAPK_P in compartment_, MAPK_PP in compartment_; // Reactions: J0: MKKK => MKKK_P; (J0_V1*MKKK)/((1 + (MAPK_PP/J0_Ki)^J0_n)*(J0_K1 + MKKK)); J1: MKKK_P => MKKK; (J1_V2*MKKK_P)/(J1_KK2 + MKKK_P); J2: MKK => MKK_P; (J2_k3*MKKK_P*MKK)/(J2_KK3 + MKK); J3: MKK_P => MKK_PP; (J3_k4*MKKK_P*MKK_P)/(J3_KK4 + MKK_P); J4: MKK_PP => MKK_P; (J4_V5*MKK_PP)/(J4_KK5 + MKK_PP); J5: MKK_P => MKK; (J5_V6*MKK_P)/(J5_KK6 + MKK_P); J6: MAPK => MAPK_P; (J6_k7*MKK_PP*MAPK)/(J6_KK7 + MAPK); J7: MAPK_P => MAPK_PP; (J7_k8*MKK_PP*MAPK_P)/(J7_KK8 + MAPK_P); J8: MAPK_PP => MAPK_P; (J8_V9*MAPK_PP)/(J8_KK9 + MAPK_PP); J9: MAPK_P => MAPK; (J9_V10*MAPK_P)/(J9_KK10 + MAPK_P); // Species initializations: MKKK = 90; MKKK_P = 10; MKK = 280; MKK_P = 10; MKK_PP = 10; MAPK = 280; MAPK_P = 10; MAPK_PP = 10; // Compartment initializations: compartment_ = 1; // Variable initializations: J0_V1 = 2.5; J0_Ki = 9; J0_n = 1; J0_K1 = 10; J1_V2 = 0.25; J1_KK2 = 8; J2_k3 = 0.025; J2_KK3 = 15; J3_k4 = 0.025; J3_KK4 = 15; J4_V5 = 0.75; J4_KK5 = 15; J5_V6 = 0.75; J5_KK6 = 15; J6_k7 = 0.025; J6_KK7 = 15; J7_k8 = 0.025; J7_KK8 = 15; J8_V9 = 0.5; J8_KK9 = 15; J9_V10 = 0.5; J9_KK10 = 15; // Other declarations: const compartment_, J0_V1, J0_Ki, J0_n, J0_K1, J1_V2, J1_KK2, J2_k3, J2_KK3; const J3_k4, J3_KK4, J4_V5, J4_KK5, J5_V6, J5_KK6, J6_k7, J6_KK7, J7_k8; const J7_KK8, J8_V9, J8_KK9, J9_V10, J9_KK10; end ''' p_str = ''' model1 = model "MAPKcascade" sim1 = simulate uniform(0,4000,1000) task1 = run sim1 on model1 plot task1.time vs task1.MAPK, task1.MAPK_P, task1.MAPK_PP ''' self.run_example(a_str, p_str)
apache-2.0
gangadhar-kadam/verve_erp
erpnext/accounts/page/accounts_browser/accounts_browser.py
6
1476
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import frappe.defaults from frappe.utils import flt from erpnext.accounts.utils import get_balance_on @frappe.whitelist() def get_companies(): """get a list of companies based on permission""" return [d.name for d in frappe.get_list("Company", fields=["name"], order_by="name")] @frappe.whitelist() def get_children(): args = frappe.local.form_dict ctype, company = args['ctype'], args['comp'] # root if args['parent'] in ("Accounts", "Cost Centers"): acc = frappe.db.sql(""" select name as value, if(group_or_ledger='Group', 1, 0) as expandable from `tab%s` where ifnull(parent_%s,'') = '' and `company` = %s and docstatus<2 order by name""" % (ctype, ctype.lower().replace(' ','_'), '%s'), company, as_dict=1) else: # other acc = frappe.db.sql("""select name as value, if(group_or_ledger='Group', 1, 0) as expandable from `tab%s` where ifnull(parent_%s,'') = %s and docstatus<2 order by name""" % (ctype, ctype.lower().replace(' ','_'), '%s'), args['parent'], as_dict=1) if ctype == 'Account': currency = frappe.db.sql("select default_currency from `tabCompany` where name = %s", company)[0][0] for each in acc: bal = get_balance_on(each.get("value")) each["currency"] = currency each["balance"] = flt(bal) return acc
agpl-3.0
reubano/csvkit
tests/test_table.py
21
7151
#!/usr/bin/env python import datetime import six try: import unittest2 as unittest except ImportError: import unittest from csvkit import table class TestColumn(unittest.TestCase): def setUp(self): self.c = table.Column(0, u'test', [u'test', u'column', None]) self.c2 = table.Column(0, u'test', [0, 1, 42], normal_type=int) self.c3 = table.Column(0, u'test', [datetime.datetime(2007, 1, 1, 12, 13, 14)], normal_type=datetime.datetime) def test_create_column(self): self.assertEqual(type(self.c), table.Column) self.assertEqual(self.c.order, 0) self.assertEqual(self.c.name, u'test') self.assertEqual(self.c.type, six.text_type) self.assertEqual(self.c, [u'test', u'column', None]) def test_slice(self): self.assertEqual(self.c[1:], [u'column', None]) def test_access(self): self.assertEqual(self.c[-1], None) def test_out_of_bounds(self): self.assertEqual(self.c[27], None) def test_has_nulls(self): self.assertEqual(self.c.has_nulls(), True) def test_no_null(self): self.assertEqual(self.c2.has_nulls(), False) def test_max_length(self): self.assertEqual(self.c.max_length(), 6) self.assertEqual(self.c2.max_length(), 0) self.assertEqual(self.c3.max_length(), 0) class TestTable(unittest.TestCase): def test_from_csv(self): with open('examples/testfixed_converted.csv', 'r') as f: t = table.Table.from_csv(f) self.assertEqual(type(t), table.Table) self.assertEqual(type(t[0]), table.Column) self.assertEqual(len(t), 8) self.assertEqual(t[2][0], 40) self.assertEqual(type(t[2][0]), int) self.assertEqual(t[3][0], True) self.assertEqual(type(t[3][0]), bool) def test_extra_header(self): with open('examples/test_extra_header.csv', 'r') as f: t = table.Table.from_csv(f) self.assertEqual(type(t), table.Table) self.assertEqual(type(t[0]), table.Column) self.assertEqual(len(t), 4) self.assertEqual(t[0], [1]) self.assertEqual(t[1], [2]) self.assertEqual(t[2], [3]) self.assertEqual(t[3], [None]) def test_from_csv_no_inference(self): with open('examples/testfixed_converted.csv', 'r') as f: t = table.Table.from_csv(f, infer_types=False) self.assertEqual(type(t), table.Table) self.assertEqual(type(t[0]), table.Column) self.assertEqual(len(t), 8) self.assertEqual(t[2][0], '40') self.assertEqual(type(t[2][0]), six.text_type) self.assertEqual(t[3][0], 'True') self.assertEqual(type(t[3][0]), six.text_type) def test_to_csv(self): with open('examples/testfixed_converted.csv', 'r') as f: contents = f.read() f.seek(0) o = six.StringIO() table.Table.from_csv(f).to_csv(o) conversion = o.getvalue() o.close() self.assertEqual(contents, conversion) def test_table_append(self): c = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table() t.append(c) self.assertEqual(len(t), 1) self.assertEqual(t[0], c) def test_table_append_duplicate_name(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c2 = table.Column(0, u'test', [u'test', u'column', u'']) c3 = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table() t.append(c) t.append(c2) t.append(c3) self.assertEqual(t[0].name, 'test') self.assertEqual(t[1].name, 'test_2') self.assertEqual(t[2].name, 'test_3') def test_table_insert(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c2 = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table([c]) t.insert(0, c2) self.assertEqual(len(t), 2) self.assertEqual(t[0], c2) self.assertEqual(t[1], c) self.assertEqual(t[0].order, 0) self.assertEqual(t[1].order, 1) def test_table_extend(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c2 = table.Column(0, u'test', [u'test', u'column', u'']) c3 = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table([c]) t.extend([c2, c3]) self.assertEqual(len(t), 3) self.assertEqual(t[0], c) self.assertEqual(t[1], c2) self.assertEqual(t[2], c3) self.assertEqual(t[0].order, 0) self.assertEqual(t[1].order, 1) self.assertEqual(t[2].order, 2) def test_table_remove(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c2 = table.Column(0, u'test', [u'test', u'column', u'']) c3 = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table([c, c2, c3]) t.remove(c2) self.assertEqual(len(t), 2) self.assertEqual(t[0], c) self.assertEqual(t[1], c3) self.assertEqual(t[0].order, 0) self.assertEqual(t[1].order, 1) def test_table_sort(self): t = table.Table() self.assertRaises(NotImplementedError, t.sort) def test_table_reverse(self): t = table.Table() self.assertRaises(NotImplementedError, t.reverse) def test_table_count_rows(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c_short = table.Column(0, u'test', [u'test']) c_long = table.Column(0, u'test', [u'', u'', u'', u'']) t = table.Table() self.assertEqual(t.count_rows(), 0) t.append(c) self.assertEqual(t.count_rows(), 3) t.append(c_short) self.assertEqual(t.count_rows(), 3) t.append(c_long) self.assertEqual(t.count_rows(), 4) def test_table_row(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c2 = table.Column(0, u'test', [u'test', u'column', u'']) c3 = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table([c, c2, c3]) self.assertEqual(t.row(1), [u'column', u'column', u'column']) def test_table_row_out_of_bounds(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c2 = table.Column(0, u'test', [u'test', u'column', u'']) c3 = table.Column(0, u'test', [u'test', u'column', u'']) t = table.Table([c, c2, c3]) self.assertRaises(IndexError, t.row, -1) self.assertRaises(IndexError, t.row, 3) def test_table_uneven_columns(self): c = table.Column(0, u'test', [u'test', u'column', u'']) c_short = table.Column(0, u'test', [u'test']) c_long = table.Column(0, u'test', [u'', u'', u'', u'way out here']) t = table.Table([c, c_short, c_long]) self.assertEqual(t.row(0), [u'test', u'test', None]) self.assertEqual(t.row(1), [u'column', None, None]) self.assertEqual(t.row(2), [None, None, None]) self.assertEqual(t.row(3), [None, None, u'way out here'])
mit
Reivajar/MushroomsProject
lib/flask/__init__.py
345
1672
# -*- coding: utf-8 -*- """ flask ~~~~~ A microframework based on Werkzeug. It's extensively documented and follows best practice patterns. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ __version__ = '0.10' # utilities we import from Werkzeug and Jinja2 that are unused # in the module but are exported as public interface. from werkzeug.exceptions import abort from werkzeug.utils import redirect from jinja2 import Markup, escape from .app import Flask, Request, Response from .config import Config from .helpers import url_for, flash, send_file, send_from_directory, \ get_flashed_messages, get_template_attribute, make_response, safe_join, \ stream_with_context from .globals import current_app, g, request, session, _request_ctx_stack, \ _app_ctx_stack from .ctx import has_request_context, has_app_context, \ after_this_request, copy_current_request_context from .module import Module from .blueprints import Blueprint from .templating import render_template, render_template_string # the signals from .signals import signals_available, template_rendered, request_started, \ request_finished, got_request_exception, request_tearing_down, \ appcontext_tearing_down, appcontext_pushed, \ appcontext_popped, message_flashed # We're not exposing the actual json module but a convenient wrapper around # it. from . import json # This was the only thing that flask used to export at one point and it had # a more generic name. jsonify = json.jsonify # backwards compat, goes away in 1.0 from .sessions import SecureCookieSession as Session json_available = True
apache-2.0
rasata/pypes
ui/pypesvds/plugins/geocodetransformer/geocodetransformer.py
4
5316
import json import urllib2 import logging #import traceback from pypes.component import Component log = logging.getLogger(__name__) class GeoCode(Component): __metatype__ = 'TRANSFORMER' def __init__(self): # initialize parent class Component.__init__(self) # the field that contains the address information # most likely from the AddressExtractor self.set_parameter('address_field', 'addresses') # log successful initialization message log.info('Component Initialized: %s' % self.__class__.__name__) def run(self): # Define our components entry point while True: # get parameters outside doc loop for better performace try: addrfield = self.get_parameter('address_field') if addrfield is None: raise ValueError, 'Address field not defined' except Exception as e: log.error('Component Failed: %s' % self.__class__.__name__) log.error('Reason: %s' % str(e)) # optionally send all docs without processing for d in self.receive_all('in'): self.send('out', d) self.yield_ctrl() continue # so next time we are called we continue at the top # for each document waiting on our input port for doc in self.receive_all('in'): try: # check that the address field actually exists if not doc.has(addrfield): log.debug('Address field does not exist') # check that the address contains zip codes elif not doc.has_meta('zipcodes', attr=addrfield): log.debug('No zipcodes found in address') else: # gather the geo information for each zipcode zipcodes = doc.get_meta('zipcodes', addrfield, []) cities, states, coordinates = set(), set(), set() for zipcode in zipcodes: response = None try: response = urllib2.urlopen( \ 'http://ws.geonames.org/postalCodeLookup' \ 'JSON?postalcode=%s&country=US&maxRows=1' \ % zipcode, timeout=2) # the above service returns JSON result = response.read() except: log.debug('Error getting GeoCode info for %s' \ % zipcode) else: # payload is mapped to 'postalcodes' and we're # only asking for the first result try: data = json.loads(result)['postalcodes'][0] except (KeyError, IndexError): log.debug('No GeoCode information for %s' \ % zipcode) else: # try and grab some data from the response city = data.get('placeName', None) if city is not None: cities.add(city) state = data.get('adminCode1', None) if state is not None: states.add(state) # only save coords if we have lat and lon lat = data.get('lat', None) lon = data.get('lng', None) if (lat or lon) is not None: coordinates.add((lat, lon)) finally: response.close() # save the GeoCode information to the document if cities: doc.set('cities', [c for c in cities], multi=True) if states: doc.set('states', [s for s in states], multi=True) if coordinates: doc.set('coordinates', [c for c in coordinates], multi=True) except Exception as e: log.error('Component Failed: %s' % self.__class__.__name__) log.error('Reason: %s' % str(e)) #log.error(traceback.print_exc()) # send the document to the next component self.send('out', doc) # yield the CPU, allowing another component to run self.yield_ctrl()
apache-2.0
flyfei/python-for-android
python3-alpha/python3-src/Lib/tkinter/messagebox.py
164
3701
# tk common message boxes # # this module provides an interface to the native message boxes # available in Tk 4.2 and newer. # # written by Fredrik Lundh, May 1997 # # # options (all have default values): # # - default: which button to make default (one of the reply codes) # # - icon: which icon to display (see below) # # - message: the message to display # # - parent: which window to place the dialog on top of # # - title: dialog title # # - type: dialog type; that is, which buttons to display (see below) # from tkinter.commondialog import Dialog # # constants # icons ERROR = "error" INFO = "info" QUESTION = "question" WARNING = "warning" # types ABORTRETRYIGNORE = "abortretryignore" OK = "ok" OKCANCEL = "okcancel" RETRYCANCEL = "retrycancel" YESNO = "yesno" YESNOCANCEL = "yesnocancel" # replies ABORT = "abort" RETRY = "retry" IGNORE = "ignore" OK = "ok" CANCEL = "cancel" YES = "yes" NO = "no" # # message dialog class class Message(Dialog): "A message box" command = "tk_messageBox" # # convenience stuff # Rename _icon and _type options to allow overriding them in options def _show(title=None, message=None, _icon=None, _type=None, **options): if _icon and "icon" not in options: options["icon"] = _icon if _type and "type" not in options: options["type"] = _type if title: options["title"] = title if message: options["message"] = message res = Message(**options).show() # In some Tcl installations, yes/no is converted into a boolean. if isinstance(res, bool): if res: return YES return NO # In others we get a Tcl_Obj. return str(res) def showinfo(title=None, message=None, **options): "Show an info message" return _show(title, message, INFO, OK, **options) def showwarning(title=None, message=None, **options): "Show a warning message" return _show(title, message, WARNING, OK, **options) def showerror(title=None, message=None, **options): "Show an error message" return _show(title, message, ERROR, OK, **options) def askquestion(title=None, message=None, **options): "Ask a question" return _show(title, message, QUESTION, YESNO, **options) def askokcancel(title=None, message=None, **options): "Ask if operation should proceed; return true if the answer is ok" s = _show(title, message, QUESTION, OKCANCEL, **options) return s == OK def askyesno(title=None, message=None, **options): "Ask a question; return true if the answer is yes" s = _show(title, message, QUESTION, YESNO, **options) return s == YES def askyesnocancel(title=None, message=None, **options): "Ask a question; return true if the answer is yes, None if cancelled." s = _show(title, message, QUESTION, YESNOCANCEL, **options) # s might be a Tcl index object, so convert it to a string s = str(s) if s == CANCEL: return None return s == YES def askretrycancel(title=None, message=None, **options): "Ask if operation should be retried; return true if the answer is yes" s = _show(title, message, WARNING, RETRYCANCEL, **options) return s == RETRY # -------------------------------------------------------------------- # test stuff if __name__ == "__main__": print("info", showinfo("Spam", "Egg Information")) print("warning", showwarning("Spam", "Egg Warning")) print("error", showerror("Spam", "Egg Alert")) print("question", askquestion("Spam", "Question?")) print("proceed", askokcancel("Spam", "Proceed?")) print("yes/no", askyesno("Spam", "Got it?")) print("yes/no/cancel", askyesnocancel("Spam", "Want it?")) print("try again", askretrycancel("Spam", "Try again?"))
apache-2.0
joemarchese/PolyNanna
participants.py
1
3229
""" How to Use this File. participants is a dictionary where a key is the name of the participant and the value is a set of all the invalid selections for that participant. participants = {'Bob': {'Sue', 'Jim'}, 'Jim': {'Bob', 'Betty'}, } # And so on. history is a dictionary where a key is the name of the participant and the value is a list of tuples that contain a year and that participant's recipient for that year. history = {'Bob': [(2010, 'Betty'), (2011, 'Freddie')], 'Jim': [(2011, 'Sue'] # And so on. } """ participants = {'Adam': {'Adam', 'Jeff', 'Joe', 'David'}, 'Adrienne': {'Adrienne', 'Joe'}, 'Amanda': {'Amanda', 'Stefan' ,'Angela'}, 'Angela': {'Angela', 'Renee', 'Jeff', 'Nanna', 'Stefan', 'Justin', 'Amanda'}, 'David': {'David', 'Jeff', 'Joe', 'Adam', 'Shaina'}, 'Francesca': {'Francesca', 'Renee', 'George'}, 'George': {'George', 'Renee', 'Francesca'}, 'Jeff': {'Jeff', 'Renee', 'Angela', 'Nanna', 'Joe', 'Adam', 'David'}, 'Joe': {'Joe', 'Jeff', 'David', 'Adam', 'Adrienne'}, 'Justin': {'Justin', 'Angela', 'Stefan'}, 'Nanna': {'Nanna', 'Jeff', 'Angela', 'Renee'}, 'Renee': {'Renee', 'Jeff', 'Angela', 'Nanna', 'Francesca', 'George'}, 'Shaina': {'Shaina', 'David'}, 'Stefan': {'Stefan', 'Angela', 'Justin', 'Amanda'}, } history = {'Adam': [(2015, 'Justin'), (2016, 'Amanda'), (2017, 'Angela'), (2018, 'Stefan')], 'Adrienne': [(2016, 'Jeff'), (2017, 'Stefan'), (2018, 'Justin')], 'Amanda': [(2015, 'Adam'), (2016, 'Adrienne'), (2017, 'Jeff'), (2018, 'George')], 'Angela': [(2015, 'Joe'), (2016, 'David'), (2017, 'Francesca'), (2018, 'Adrienne')], 'David': [(2015, 'Stefan'), (2016, 'Francesca'), (2017, 'Renee')], 'Francesca': [(2015, 'Angela'), (2016, 'Joe'), (2017, 'Adam'), (2018, 'Jeff')], 'George': [(2015, 'Jeff'), (2016, 'Angela'), (2017, 'Adrienne'), (2018, 'Joe')], 'Jeff': [(2015, 'Nanna'), (2016, 'Justin'), (2017, 'Shaina'), (2018, 'Amanda')], 'Joe': [(2015, 'Renee'), (2016, 'George'), (2017, 'Justin'), (2018, 'Angela')], 'Justin': [(2015, 'Francesca'), (2016, 'Adam'), (2017, 'George'), (2018, 'Renee')], 'Nanna': [(2015, 'David')], 'Renee': [(2015, 'Amanda'), (2016, 'Stefan'), (2017, 'David'), (2018, 'Adam')], 'Shaina': [(2017, 'Amanda')], 'Stefan': [(2015, 'George'), (2016, 'Renee'), (2017, 'Joe'), (2018, 'Francesca')], } class Participant: """The class for individual participants that contains their attributes.""" def __init__(self, name, restricted_set=None, giving_to=None): self.name = name self.restricted_set = restricted_set self.restricted_set = participants.get(self.name)|set([y[1] for y in history.get(self.name)]) self.giving_to = giving_to def main(): return sorted([Participant(p) for p in participants.keys()], key=lambda p: len(p.restricted_set), reverse=True) if __name__ == '__main__': main()
mit
jakevdp/networkx
networkx/algorithms/centrality/tests/test_degree_centrality.py
101
3046
""" Unit tests for degree centrality. """ from nose.tools import * import networkx as nx class TestDegreeCentrality: def __init__(self): self.K = nx.krackhardt_kite_graph() self.P3 = nx.path_graph(3) self.K5 = nx.complete_graph(5) F = nx.Graph() # Florentine families F.add_edge('Acciaiuoli','Medici') F.add_edge('Castellani','Peruzzi') F.add_edge('Castellani','Strozzi') F.add_edge('Castellani','Barbadori') F.add_edge('Medici','Barbadori') F.add_edge('Medici','Ridolfi') F.add_edge('Medici','Tornabuoni') F.add_edge('Medici','Albizzi') F.add_edge('Medici','Salviati') F.add_edge('Salviati','Pazzi') F.add_edge('Peruzzi','Strozzi') F.add_edge('Peruzzi','Bischeri') F.add_edge('Strozzi','Ridolfi') F.add_edge('Strozzi','Bischeri') F.add_edge('Ridolfi','Tornabuoni') F.add_edge('Tornabuoni','Guadagni') F.add_edge('Albizzi','Ginori') F.add_edge('Albizzi','Guadagni') F.add_edge('Bischeri','Guadagni') F.add_edge('Guadagni','Lamberteschi') self.F = F G = nx.DiGraph() G.add_edge(0,5) G.add_edge(1,5) G.add_edge(2,5) G.add_edge(3,5) G.add_edge(4,5) G.add_edge(5,6) G.add_edge(5,7) G.add_edge(5,8) self.G = G def test_degree_centrality_1(self): d = nx.degree_centrality(self.K5) exact = dict(zip(range(5), [1]*5)) for n,dc in d.items(): assert_almost_equal(exact[n], dc) def test_degree_centrality_2(self): d = nx.degree_centrality(self.P3) exact = {0:0.5, 1:1, 2:0.5} for n,dc in d.items(): assert_almost_equal(exact[n], dc) def test_degree_centrality_3(self): d = nx.degree_centrality(self.K) exact = {0:.444, 1:.444, 2:.333, 3:.667, 4:.333, 5:.556, 6:.556, 7:.333, 8:.222, 9:.111} for n,dc in d.items(): assert_almost_equal(exact[n], float("%5.3f" % dc)) def test_degree_centrality_4(self): d = nx.degree_centrality(self.F) names = sorted(self.F.nodes()) dcs = [0.071, 0.214, 0.143, 0.214, 0.214, 0.071, 0.286, 0.071, 0.429, 0.071, 0.214, 0.214, 0.143, 0.286, 0.214] exact = dict(zip(names, dcs)) for n,dc in d.items(): assert_almost_equal(exact[n], float("%5.3f" % dc)) def test_indegree_centrality(self): d = nx.in_degree_centrality(self.G) exact = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.625, 6: 0.125, 7: 0.125, 8: 0.125} for n,dc in d.items(): assert_almost_equal(exact[n], dc) def test_outdegree_centrality(self): d = nx.out_degree_centrality(self.G) exact = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.375, 6: 0.0, 7: 0.0, 8: 0.0} for n,dc in d.items(): assert_almost_equal(exact[n], dc)
bsd-3-clause
cl4rke/scikit-learn
benchmarks/bench_20newsgroups.py
377
3555
from __future__ import print_function, division from time import time import argparse import numpy as np from sklearn.dummy import DummyClassifier from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.metrics import accuracy_score from sklearn.utils.validation import check_array from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB ESTIMATORS = { "dummy": DummyClassifier(), "random_forest": RandomForestClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "extra_trees": ExtraTreesClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "logistic_regression": LogisticRegression(), "naive_bayes": MultinomialNB(), "adaboost": AdaBoostClassifier(n_estimators=10), } ############################################################################### # Data if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-e', '--estimators', nargs="+", required=True, choices=ESTIMATORS) args = vars(parser.parse_args()) data_train = fetch_20newsgroups_vectorized(subset="train") data_test = fetch_20newsgroups_vectorized(subset="test") X_train = check_array(data_train.data, dtype=np.float32, accept_sparse="csc") X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr") y_train = data_train.target y_test = data_test.target print("20 newsgroups") print("=============") print("X_train.shape = {0}".format(X_train.shape)) print("X_train.format = {0}".format(X_train.format)) print("X_train.dtype = {0}".format(X_train.dtype)) print("X_train density = {0}" "".format(X_train.nnz / np.product(X_train.shape))) print("y_train {0}".format(y_train.shape)) print("X_test {0}".format(X_test.shape)) print("X_test.format = {0}".format(X_test.format)) print("X_test.dtype = {0}".format(X_test.dtype)) print("y_test {0}".format(y_test.shape)) print() print("Classifier Training") print("===================") accuracy, train_time, test_time = {}, {}, {} for name in sorted(args["estimators"]): clf = ESTIMATORS[name] try: clf.set_params(random_state=0) except (TypeError, ValueError): pass print("Training %s ... " % name, end="") t0 = time() clf.fit(X_train, y_train) train_time[name] = time() - t0 t0 = time() y_pred = clf.predict(X_test) test_time[name] = time() - t0 accuracy[name] = accuracy_score(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print() print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "Accuracy")) print("-" * 44) for name in sorted(accuracy, key=accuracy.get): print("%s %s %s %s" % (name.ljust(16), ("%.4fs" % train_time[name]).center(10), ("%.4fs" % test_time[name]).center(10), ("%.4f" % accuracy[name]).center(10))) print()
bsd-3-clause
Archenemy-xiatian/foursquared
util/common.py
262
2820
#!/usr/bin/python import logging from xml.dom import minidom from xml.dom import pulldom BOOLEAN = "boolean" STRING = "String" GROUP = "Group" # Interfaces that all FoursquareTypes implement. DEFAULT_INTERFACES = ['FoursquareType'] # Interfaces that specific FoursqureTypes implement. INTERFACES = { } DEFAULT_CLASS_IMPORTS = [ ] CLASS_IMPORTS = { # 'Checkin': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Venue': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Tip': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], } COMPLEX = [ 'Group', 'Badge', 'Beenhere', 'Checkin', 'CheckinResponse', 'City', 'Credentials', 'Data', 'Mayor', 'Rank', 'Score', 'Scoring', 'Settings', 'Stats', 'Tags', 'Tip', 'User', 'Venue', ] TYPES = COMPLEX + ['boolean'] def WalkNodesForAttributes(path): """Parse the xml file getting all attributes. <venue> <attribute>value</attribute> </venue> Returns: type_name - The java-style name the top node will have. "Venue" top_node_name - unadultured name of the xml stanza, probably the type of java class we're creating. "venue" attributes - {'attribute': 'value'} """ doc = pulldom.parse(path) type_name = None top_node_name = None attributes = {} level = 0 for event, node in doc: # For skipping parts of a tree. if level > 0: if event == pulldom.END_ELEMENT: level-=1 logging.warn('(%s) Skip end: %s' % (str(level), node)) continue elif event == pulldom.START_ELEMENT: logging.warn('(%s) Skipping: %s' % (str(level), node)) level+=1 continue if event == pulldom.START_ELEMENT: logging.warn('Parsing: ' + node.tagName) # Get the type name to use. if type_name is None: type_name = ''.join([word.capitalize() for word in node.tagName.split('_')]) top_node_name = node.tagName logging.warn('Found Top Node Name: ' + top_node_name) continue typ = node.getAttribute('type') child = node.getAttribute('child') # We don't want to walk complex types. if typ in COMPLEX: logging.warn('Found Complex: ' + node.tagName) level = 1 elif typ not in TYPES: logging.warn('Found String: ' + typ) typ = STRING else: logging.warn('Found Type: ' + typ) logging.warn('Adding: ' + str((node, typ))) attributes.setdefault(node.tagName, (typ, [child])) logging.warn('Attr: ' + str((type_name, top_node_name, attributes))) return type_name, top_node_name, attributes
apache-2.0
markrawlingson/SickRage
autoProcessTV/lib/requests/packages/urllib3/util/timeout.py
1004
9544
# The default socket timeout, used by httplib to indicate that no timeout was # specified by the user from socket import _GLOBAL_DEFAULT_TIMEOUT import time from ..exceptions import TimeoutStateError # A sentinel value to indicate that no timeout was specified by the user in # urllib3 _Default = object() def current_time(): """ Retrieve the current time. This function is mocked out in unit testing. """ return time.time() class Timeout(object): """ Timeout configuration. Timeouts can be defined as a default for a pool:: timeout = Timeout(connect=2.0, read=7.0) http = PoolManager(timeout=timeout) response = http.request('GET', 'http://example.com/') Or per-request (which overrides the default for the pool):: response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) Timeouts can be disabled by setting all the parameters to ``None``:: no_timeout = Timeout(connect=None, read=None) response = http.request('GET', 'http://example.com/, timeout=no_timeout) :param total: This combines the connect and read timeouts into one; the read timeout will be set to the time leftover from the connect attempt. In the event that both a connect timeout and a total are specified, or a read timeout and a total are specified, the shorter timeout will be applied. Defaults to None. :type total: integer, float, or None :param connect: The maximum amount of time to wait for a connection attempt to a server to succeed. Omitting the parameter will default the connect timeout to the system default, probably `the global default timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout for connection attempts. :type connect: integer, float, or None :param read: The maximum amount of time to wait between consecutive read operations for a response from the server. Omitting the parameter will default the read timeout to the system default, probably `the global default timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout. :type read: integer, float, or None .. note:: Many factors can affect the total amount of time for urllib3 to return an HTTP response. For example, Python's DNS resolver does not obey the timeout specified on the socket. Other factors that can affect total request time include high CPU load, high swap, the program running at a low priority level, or other behaviors. In addition, the read and total timeouts only measure the time between read operations on the socket connecting the client and the server, not the total amount of time for the request to return a complete response. For most requests, the timeout is raised because the server has not sent the first byte in the specified time. This is not always the case; if a server streams one byte every fifteen seconds, a timeout of 20 seconds will not trigger, even though the request will take several minutes to complete. If your goal is to cut off any request after a set amount of wall clock time, consider having a second "watcher" thread to cut off a slow request. """ #: A sentinel object representing the default timeout value DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT def __init__(self, total=None, connect=_Default, read=_Default): self._connect = self._validate_timeout(connect, 'connect') self._read = self._validate_timeout(read, 'read') self.total = self._validate_timeout(total, 'total') self._start_connect = None def __str__(self): return '%s(connect=%r, read=%r, total=%r)' % ( type(self).__name__, self._connect, self._read, self.total) @classmethod def _validate_timeout(cls, value, name): """ Check that a timeout attribute is valid. :param value: The timeout value to validate :param name: The name of the timeout attribute to validate. This is used to specify in error messages. :return: The validated and casted version of the given value. :raises ValueError: If the type is not an integer or a float, or if it is a numeric value less than zero. """ if value is _Default: return cls.DEFAULT_TIMEOUT if value is None or value is cls.DEFAULT_TIMEOUT: return value try: float(value) except (TypeError, ValueError): raise ValueError("Timeout value %s was %s, but it must be an " "int or float." % (name, value)) try: if value < 0: raise ValueError("Attempted to set %s timeout to %s, but the " "timeout cannot be set to a value less " "than 0." % (name, value)) except TypeError: # Python 3 raise ValueError("Timeout value %s was %s, but it must be an " "int or float." % (name, value)) return value @classmethod def from_float(cls, timeout): """ Create a new Timeout from a legacy timeout value. The timeout value used by httplib.py sets the same timeout on the connect(), and recv() socket requests. This creates a :class:`Timeout` object that sets the individual timeouts to the ``timeout`` value passed to this function. :param timeout: The legacy timeout value. :type timeout: integer, float, sentinel default object, or None :return: Timeout object :rtype: :class:`Timeout` """ return Timeout(read=timeout, connect=timeout) def clone(self): """ Create a copy of the timeout object Timeout properties are stored per-pool but each request needs a fresh Timeout object to ensure each one has its own start/stop configured. :return: a copy of the timeout object :rtype: :class:`Timeout` """ # We can't use copy.deepcopy because that will also create a new object # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to # detect the user default. return Timeout(connect=self._connect, read=self._read, total=self.total) def start_connect(self): """ Start the timeout clock, used during a connect() attempt :raises urllib3.exceptions.TimeoutStateError: if you attempt to start a timer that has been started already. """ if self._start_connect is not None: raise TimeoutStateError("Timeout timer has already been started.") self._start_connect = current_time() return self._start_connect def get_connect_duration(self): """ Gets the time elapsed since the call to :meth:`start_connect`. :return: Elapsed time. :rtype: float :raises urllib3.exceptions.TimeoutStateError: if you attempt to get duration for a timer that hasn't been started. """ if self._start_connect is None: raise TimeoutStateError("Can't get connect duration for timer " "that has not started.") return current_time() - self._start_connect @property def connect_timeout(self): """ Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None """ if self.total is None: return self._connect if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: return self.total return min(self._connect, self.total) @property def read_timeout(self): """ Get the value for the read timeout. This assumes some time has elapsed in the connection timeout and computes the read timeout appropriately. If self.total is set, the read timeout is dependent on the amount of time taken by the connect timeout. If the connection time has not been established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be raised. :return: Value to use for the read timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` has not yet been called on this object. """ if (self.total is not None and self.total is not self.DEFAULT_TIMEOUT and self._read is not None and self._read is not self.DEFAULT_TIMEOUT): # In case the connect timeout has not yet been established. if self._start_connect is None: return self._read return max(0, min(self.total - self.get_connect_duration(), self._read)) elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: return max(0, self.total - self.get_connect_duration()) else: return self._read
gpl-3.0
premanandchandrasekar/boto
boto/datapipeline/layer1.py
14
28991
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.datapipeline import exceptions class DataPipelineConnection(AWSQueryConnection): """ This is the AWS Data Pipeline API Reference . This guide provides descriptions and samples of the AWS Data Pipeline API. AWS Data Pipeline is a web service that configures and manages a data-driven workflow called a pipeline. AWS Data Pipeline handles the details of scheduling and ensuring that data dependencies are met so your application can focus on processing the data. The AWS Data Pipeline API implements two main sets of functionality. The first set of actions configure the pipeline in the web service. You call these actions to create a pipeline and define data sources, schedules, dependencies, and the transforms to be performed on the data. The second set of actions are used by a task runner application that calls the AWS Data Pipeline API to receive the next task ready for processing. The logic for performing the task, such as querying the data, running data analysis, or converting the data from one format to another, is contained within the task runner. The task runner performs the task assigned to it by the web service, reporting progress to the web service as it does so. When the task is done, the task runner reports the final success or failure of the task to the web service. AWS Data Pipeline provides an open-source implementation of a task runner called AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic for common data management scenarios, such as performing database queries and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data Pipeline Task Runner as your task runner, or you can write your own task runner to provide custom data management. The AWS Data Pipeline API uses the Signature Version 4 protocol for signing requests. For more information about how to sign a request with this protocol, see `Signature Version 4 Signing Process`_. In the code examples in this reference, the Signature Version 4 Request parameters are represented as AuthParams. """ APIVersion = "2012-10-29" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com" ServiceName = "DataPipeline" TargetPrefix = "DataPipeline" ResponseError = JSONResponseError _faults = { "PipelineDeletedException": exceptions.PipelineDeletedException, "InvalidRequestException": exceptions.InvalidRequestException, "TaskNotFoundException": exceptions.TaskNotFoundException, "PipelineNotFoundException": exceptions.PipelineNotFoundException, "InternalServiceError": exceptions.InternalServiceError, } def __init__(self, **kwargs): region = kwargs.get('region') if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) kwargs['host'] = region.endpoint AWSQueryConnection.__init__(self, **kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def activate_pipeline(self, pipeline_id): """ Validates a pipeline and initiates processing. If the pipeline does not pass validation, activation fails. Call this action to start processing pipeline tasks of a pipeline you've created using the CreatePipeline and PutPipelineDefinition actions. A pipeline cannot be modified after it has been successfully activated. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline to activate. """ params = {'pipelineId': pipeline_id, } return self.make_request(action='ActivatePipeline', body=json.dumps(params)) def create_pipeline(self, name, unique_id, description=None): """ Creates a new empty pipeline. When this action succeeds, you can then use the PutPipelineDefinition action to populate the pipeline. :type name: string :param name: The name of the new pipeline. You can use the same name for multiple pipelines associated with your AWS account, because AWS Data Pipeline assigns each new pipeline a unique pipeline identifier. :type unique_id: string :param unique_id: A unique identifier that you specify. This identifier is not the same as the pipeline identifier assigned by AWS Data Pipeline. You are responsible for defining the format and ensuring the uniqueness of this identifier. You use this parameter to ensure idempotency during repeated calls to CreatePipeline. For example, if the first call to CreatePipeline does not return a clear success, you can pass in the same unique identifier and pipeline name combination on a subsequent call to CreatePipeline. CreatePipeline ensures that if a pipeline already exists with the same name and unique identifier, a new pipeline will not be created. Instead, you'll receive the pipeline identifier from the previous attempt. The uniqueness of the name and unique identifier combination is scoped to the AWS account or IAM user credentials. :type description: string :param description: The description of the new pipeline. """ params = {'name': name, 'uniqueId': unique_id, } if description is not None: params['description'] = description return self.make_request(action='CreatePipeline', body=json.dumps(params)) def delete_pipeline(self, pipeline_id): """ Permanently deletes a pipeline, its pipeline definition and its run history. You cannot query or restore a deleted pipeline. AWS Data Pipeline will attempt to cancel instances associated with the pipeline that are currently being processed by task runners. Deleting a pipeline cannot be undone. To temporarily pause a pipeline instead of deleting it, call SetStatus with the status set to Pause on individual components. Components that are paused by SetStatus can be resumed. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline to be deleted. """ params = {'pipelineId': pipeline_id, } return self.make_request(action='DeletePipeline', body=json.dumps(params)) def describe_objects(self, object_ids, pipeline_id, marker=None, evaluate_expressions=None): """ Returns the object definitions for a set of objects associated with the pipeline. Object definitions are composed of a set of fields that define the properties of the object. :type pipeline_id: string :param pipeline_id: Identifier of the pipeline that contains the object definitions. :type object_ids: list :param object_ids: Identifiers of the pipeline objects that contain the definitions to be described. You can pass as many as 25 identifiers in a single call to DescribeObjects. :type evaluate_expressions: boolean :param evaluate_expressions: Indicates whether any expressions in the object should be evaluated when the object descriptions are returned. :type marker: string :param marker: The starting point for the results to be returned. The first time you call DescribeObjects, this value should be empty. As long as the action returns `HasMoreResults` as `True`, you can call DescribeObjects again and pass the marker value from the response to retrieve the next set of results. """ params = { 'pipelineId': pipeline_id, 'objectIds': object_ids, } if evaluate_expressions is not None: params['evaluateExpressions'] = evaluate_expressions if marker is not None: params['marker'] = marker return self.make_request(action='DescribeObjects', body=json.dumps(params)) def describe_pipelines(self, pipeline_ids): """ Retrieve metadata about one or more pipelines. The information retrieved includes the name of the pipeline, the pipeline identifier, its current state, and the user account that owns the pipeline. Using account credentials, you can retrieve metadata about pipelines that you or your IAM users have created. If you are using an IAM user account, you can retrieve metadata about only those pipelines you have read permission for. To retrieve the full pipeline definition instead of metadata about the pipeline, call the GetPipelineDefinition action. :type pipeline_ids: list :param pipeline_ids: Identifiers of the pipelines to describe. You can pass as many as 25 identifiers in a single call to DescribePipelines. You can obtain pipeline identifiers by calling ListPipelines. """ params = {'pipelineIds': pipeline_ids, } return self.make_request(action='DescribePipelines', body=json.dumps(params)) def evaluate_expression(self, pipeline_id, expression, object_id): """ Evaluates a string in the context of a specified object. A task runner can use this action to evaluate SQL queries stored in Amazon S3. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline. :type object_id: string :param object_id: The identifier of the object. :type expression: string :param expression: The expression to evaluate. """ params = { 'pipelineId': pipeline_id, 'objectId': object_id, 'expression': expression, } return self.make_request(action='EvaluateExpression', body=json.dumps(params)) def get_pipeline_definition(self, pipeline_id, version=None): """ Returns the definition of the specified pipeline. You can call GetPipelineDefinition to retrieve the pipeline definition you provided using PutPipelineDefinition. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline. :type version: string :param version: The version of the pipeline definition to retrieve. This parameter accepts the values `latest` (default) and `active`. Where `latest` indicates the last definition saved to the pipeline and `active` indicates the last definition of the pipeline that was activated. """ params = {'pipelineId': pipeline_id, } if version is not None: params['version'] = version return self.make_request(action='GetPipelineDefinition', body=json.dumps(params)) def list_pipelines(self, marker=None): """ Returns a list of pipeline identifiers for all active pipelines. Identifiers are returned only for pipelines you have permission to access. :type marker: string :param marker: The starting point for the results to be returned. The first time you call ListPipelines, this value should be empty. As long as the action returns `HasMoreResults` as `True`, you can call ListPipelines again and pass the marker value from the response to retrieve the next set of results. """ params = {} if marker is not None: params['marker'] = marker return self.make_request(action='ListPipelines', body=json.dumps(params)) def poll_for_task(self, worker_group, hostname=None, instance_identity=None): """ Task runners call this action to receive a task to perform from AWS Data Pipeline. The task runner specifies which tasks it can perform by setting a value for the workerGroup parameter of the PollForTask call. The task returned by PollForTask may come from any of the pipelines that match the workerGroup value passed in by the task runner and that was launched using the IAM user credentials specified by the task runner. If tasks are ready in the work queue, PollForTask returns a response immediately. If no tasks are available in the queue, PollForTask uses long-polling and holds on to a poll connection for up to a 90 seconds during which time the first newly scheduled task is handed to the task runner. To accomodate this, set the socket timeout in your task runner to 90 seconds. The task runner should not call PollForTask again on the same `workerGroup` until it receives a response, and this may take up to 90 seconds. :type worker_group: string :param worker_group: Indicates the type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for `workerGroup` in the call to PollForTask. There are no wildcard values permitted in `workerGroup`, the string must be an exact, case-sensitive, match. :type hostname: string :param hostname: The public DNS name of the calling task runner. :type instance_identity: dict :param instance_identity: Identity information for the Amazon EC2 instance that is hosting the task runner. You can get this value by calling the URI, `http://169.254.169.254/latest/meta-data/instance- id`, from the EC2 instance. For more information, go to `Instance Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves that your task runner is running on an EC2 instance, and ensures the proper AWS Data Pipeline service charges are applied to your pipeline. """ params = {'workerGroup': worker_group, } if hostname is not None: params['hostname'] = hostname if instance_identity is not None: params['instanceIdentity'] = instance_identity return self.make_request(action='PollForTask', body=json.dumps(params)) def put_pipeline_definition(self, pipeline_objects, pipeline_id): """ Adds tasks, schedules, and preconditions that control the behavior of the pipeline. You can use PutPipelineDefinition to populate a new pipeline or to update an existing pipeline that has not yet been activated. PutPipelineDefinition also validates the configuration as it adds it to the pipeline. Changes to the pipeline are saved unless one of the following three validation errors exists in the pipeline. #. An object is missing a name or identifier field. #. A string or reference field is empty. #. The number of objects in the pipeline exceeds the maximum allowed objects. Pipeline object definitions are passed to the PutPipelineDefinition action and returned by the GetPipelineDefinition action. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline to be configured. :type pipeline_objects: list :param pipeline_objects: The objects that define the pipeline. These will overwrite the existing pipeline definition. """ params = { 'pipelineId': pipeline_id, 'pipelineObjects': pipeline_objects, } return self.make_request(action='PutPipelineDefinition', body=json.dumps(params)) def query_objects(self, pipeline_id, sphere, marker=None, query=None, limit=None): """ Queries a pipeline for the names of objects that match a specified set of conditions. The objects returned by QueryObjects are paginated and then filtered by the value you set for query. This means the action may return an empty result set with a value set for marker. If `HasMoreResults` is set to `True`, you should continue to call QueryObjects, passing in the returned value for marker, until `HasMoreResults` returns `False`. :type pipeline_id: string :param pipeline_id: Identifier of the pipeline to be queried for object names. :type query: dict :param query: Query that defines the objects to be returned. The Query object can contain a maximum of ten selectors. The conditions in the query are limited to top-level String fields in the object. These filters can be applied to components, instances, and attempts. :type sphere: string :param sphere: Specifies whether the query applies to components or instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`. :type marker: string :param marker: The starting point for the results to be returned. The first time you call QueryObjects, this value should be empty. As long as the action returns `HasMoreResults` as `True`, you can call QueryObjects again and pass the marker value from the response to retrieve the next set of results. :type limit: integer :param limit: Specifies the maximum number of object names that QueryObjects will return in a single call. The default value is 100. """ params = {'pipelineId': pipeline_id, 'sphere': sphere, } if query is not None: params['query'] = query if marker is not None: params['marker'] = marker if limit is not None: params['limit'] = limit return self.make_request(action='QueryObjects', body=json.dumps(params)) def report_task_progress(self, task_id): """ Updates the AWS Data Pipeline service on the progress of the calling task runner. When the task runner is assigned a task, it should call ReportTaskProgress to acknowledge that it has the task within 2 minutes. If the web service does not recieve this acknowledgement within the 2 minute window, it will assign the task in a subsequent PollForTask call. After this initial acknowledgement, the task runner only needs to report progress every 15 minutes to maintain its ownership of the task. You can change this reporting time from 15 minutes by specifying a `reportProgressTimeout` field in your pipeline. If a task runner does not report its status after 5 minutes, AWS Data Pipeline will assume that the task runner is unable to process the task and will reassign the task in a subsequent response to PollForTask. task runners should call ReportTaskProgress every 60 seconds. :type task_id: string :param task_id: Identifier of the task assigned to the task runner. This value is provided in the TaskObject that the service returns with the response for the PollForTask action. """ params = {'taskId': task_id, } return self.make_request(action='ReportTaskProgress', body=json.dumps(params)) def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None, hostname=None): """ Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service can use this call to detect when the task runner application has failed and restart a new instance. :type taskrunner_id: string :param taskrunner_id: The identifier of the task runner. This value should be unique across your AWS account. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service provides a unique identifier when it launches the application. If you have written a custom task runner, you should assign a unique identifier for the task runner. :type worker_group: string :param worker_group: Indicates the type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for `workerGroup` in the call to ReportTaskRunnerHeartbeat. There are no wildcard values permitted in `workerGroup`, the string must be an exact, case-sensitive, match. :type hostname: string :param hostname: The public DNS name of the calling task runner. """ params = {'taskrunnerId': taskrunner_id, } if worker_group is not None: params['workerGroup'] = worker_group if hostname is not None: params['hostname'] = hostname return self.make_request(action='ReportTaskRunnerHeartbeat', body=json.dumps(params)) def set_status(self, object_ids, status, pipeline_id): """ Requests that the status of an array of physical or logical pipeline objects be updated in the pipeline. This update may not occur immediately, but is eventually consistent. The status that can be set depends on the type of object. :type pipeline_id: string :param pipeline_id: Identifies the pipeline that contains the objects. :type object_ids: list :param object_ids: Identifies an array of objects. The corresponding objects can be either physical or components, but not a mix of both types. :type status: string :param status: Specifies the status to be set on all the objects in `objectIds`. For components, this can be either `PAUSE` or `RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or `MARK_FINISHED`. """ params = { 'pipelineId': pipeline_id, 'objectIds': object_ids, 'status': status, } return self.make_request(action='SetStatus', body=json.dumps(params)) def set_task_status(self, task_id, task_status, error_id=None, error_message=None, error_stack_trace=None): """ Notifies AWS Data Pipeline that a task is completed and provides information about the final status. The task runner calls this action regardless of whether the task was sucessful. The task runner does not need to call SetTaskStatus for tasks that are canceled by the web service during a call to ReportTaskProgress. :type task_id: string :param task_id: Identifies the task assigned to the task runner. This value is set in the TaskObject that is returned by the PollForTask action. :type task_status: string :param task_status: If `FINISHED`, the task successfully completed. If `FAILED` the task ended unsuccessfully. The `FALSE` value is used by preconditions. :type error_id: string :param error_id: If an error occurred during the task, this value specifies an id value that represents the error. This value is set on the physical attempt object. It is used to display error information to the user. It should not start with string "Service_" which is reserved by the system. :type error_message: string :param error_message: If an error occurred during the task, this value specifies a text description of the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value. :type error_stack_trace: string :param error_stack_trace: If an error occurred during the task, this value specifies the stack trace associated with the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value. """ params = {'taskId': task_id, 'taskStatus': task_status, } if error_id is not None: params['errorId'] = error_id if error_message is not None: params['errorMessage'] = error_message if error_stack_trace is not None: params['errorStackTrace'] = error_stack_trace return self.make_request(action='SetTaskStatus', body=json.dumps(params)) def validate_pipeline_definition(self, pipeline_objects, pipeline_id): """ Tests the pipeline definition with a set of validation checks to ensure that it is well formed and can run without error. :type pipeline_id: string :param pipeline_id: Identifies the pipeline whose definition is to be validated. :type pipeline_objects: list :param pipeline_objects: A list of objects that define the pipeline changes to validate against the pipeline. """ params = { 'pipelineId': pipeline_id, 'pipelineObjects': pipeline_objects, } return self.make_request(action='ValidatePipelineDefinition', body=json.dumps(params)) def make_request(self, action, body): headers = { 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), 'Host': self.region.endpoint, 'Content-Type': 'application/x-amz-json-1.1', 'Content-Length': str(len(body)), } http_request = self.build_base_http_request( method='POST', path='/', auth_path='/', params={}, headers=headers, data=body) response = self._mexe(http_request, sender=None, override_num_retries=10) response_body = response.read() boto.log.debug(response_body) if response.status == 200: if response_body: return json.loads(response_body) else: json_body = json.loads(response_body) fault_name = json_body.get('__type', None) exception_class = self._faults.get(fault_name, self.ResponseError) raise exception_class(response.status, response.reason, body=json_body)
mit
yueshu/kernel_rk3036
tools/perf/python/twatch.py
625
2726
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(context_switch = 0, thread = -1): cpus = perf.cpu_map() threads = perf.thread_map(thread) evsel = perf.evsel(type = perf.TYPE_SOFTWARE, config = perf.COUNT_SW_DUMMY, task = 1, comm = 1, mmap = 0, freq = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, context_switch = context_switch, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU) """What we want are just the PERF_RECORD_ lifetime events for threads, using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1 (the default), makes perf reenable irq_vectors:local_timer_entry, when disabling nohz, not good for some use cases where all we want is to get threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY, freq=0) instead.""" evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': """ To test the PERF_RECORD_SWITCH record, pick a pid and replace in the following line. Example output: cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 } cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 } cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 } cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 } It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT to figure out if this is a context switch in or out of the monitored threads. If bored, please add command line option parsing support for these options :-) """ # main(context_switch = 1, thread = 31463) main()
gpl-2.0
cloudfoundry/php-buildpack-legacy
builds/runtimes/python-2.7.6/lib/python2.7/distutils/tests/setuptools_build_ext.py
149
11489
from distutils.command.build_ext import build_ext as _du_build_ext try: # Attempt to use Pyrex for building extensions, if available from Pyrex.Distutils.build_ext import build_ext as _build_ext except ImportError: _build_ext = _du_build_ext import os, sys from distutils.file_util import copy_file from distutils.tests.setuptools_extension import Library from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var get_config_var("LDSHARED") # make sure _config_vars is initialized from distutils.sysconfig import _config_vars from distutils import log from distutils.errors import * have_rtld = False use_stubs = False libtype = 'shared' if sys.platform == "darwin": use_stubs = True elif os.name != 'nt': try: from dl import RTLD_NOW have_rtld = True use_stubs = True except ImportError: pass def if_dl(s): if have_rtld: return s return '' class build_ext(_build_ext): def run(self): """Build extensions in build directory, then copy if --inplace""" old_inplace, self.inplace = self.inplace, 0 _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source() def copy_extensions_to_source(self): build_py = self.get_finalized_command('build_py') for ext in self.extensions: fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) modpath = fullname.split('.') package = '.'.join(modpath[:-1]) package_dir = build_py.get_package_dir(package) dest_filename = os.path.join(package_dir,os.path.basename(filename)) src_filename = os.path.join(self.build_lib,filename) # Always copy, even if source is older than destination, to ensure # that the right extensions for the current Python/platform are # used. copy_file( src_filename, dest_filename, verbose=self.verbose, dry_run=self.dry_run ) if ext._needs_stub: self.write_stub(package_dir or os.curdir, ext, True) if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'): # Workaround for problems using some Pyrex versions w/SWIG and/or 2.4 def swig_sources(self, sources, *otherargs): # first do any Pyrex processing sources = _build_ext.swig_sources(self, sources) or sources # Then do any actual SWIG stuff on the remainder return _du_build_ext.swig_sources(self, sources, *otherargs) def get_ext_filename(self, fullname): filename = _build_ext.get_ext_filename(self,fullname) ext = self.ext_map[fullname] if isinstance(ext,Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn,libtype) elif use_stubs and ext._links_to_dynamic: d,fn = os.path.split(filename) return os.path.join(d,'dl-'+fn) else: return filename def initialize_options(self): _build_ext.initialize_options(self) self.shlib_compiler = None self.shlibs = [] self.ext_map = {} def finalize_options(self): _build_ext.finalize_options(self) self.extensions = self.extensions or [] self.check_extensions_list(self.extensions) self.shlibs = [ext for ext in self.extensions if isinstance(ext,Library)] if self.shlibs: self.setup_shlib_compiler() for ext in self.extensions: ext._full_name = self.get_ext_fullname(ext.name) for ext in self.extensions: fullname = ext._full_name self.ext_map[fullname] = ext ltd = ext._links_to_dynamic = \ self.shlibs and self.links_to_dynamic(ext) or False ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library) filename = ext._file_name = self.get_ext_filename(fullname) libdir = os.path.dirname(os.path.join(self.build_lib,filename)) if ltd and libdir not in ext.library_dirs: ext.library_dirs.append(libdir) if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: ext.runtime_library_dirs.append(os.curdir) def setup_shlib_compiler(self): compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force ) if sys.platform == "darwin": tmp = _config_vars.copy() try: # XXX Help! I don't have any idea whether these are right... _config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup" _config_vars['CCSHARED'] = " -dynamiclib" _config_vars['SO'] = ".dylib" customize_compiler(compiler) finally: _config_vars.clear() _config_vars.update(tmp) else: customize_compiler(compiler) if self.include_dirs is not None: compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name,value) in self.define: compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: compiler.undefine_macro(macro) if self.libraries is not None: compiler.set_libraries(self.libraries) if self.library_dirs is not None: compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: compiler.set_link_objects(self.link_objects) # hack so distutils' build_extension() builds a library instead compiler.link_shared_object = link_shared_object.__get__(compiler) def get_export_symbols(self, ext): if isinstance(ext,Library): return ext.export_symbols return _build_ext.get_export_symbols(self,ext) def build_extension(self, ext): _compiler = self.compiler try: if isinstance(ext,Library): self.compiler = self.shlib_compiler _build_ext.build_extension(self,ext) if ext._needs_stub: self.write_stub( self.get_finalized_command('build_py').build_lib, ext ) finally: self.compiler = _compiler def links_to_dynamic(self, ext): """Return true if 'ext' links to a dynamic lib in the same package""" # XXX this should check to ensure the lib is actually being built # XXX as dynamic, and not just using a locally-found version or a # XXX static-compiled version libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) pkg = '.'.join(ext._full_name.split('.')[:-1]+['']) for libname in ext.libraries: if pkg+libname in libnames: return True return False def get_outputs(self): outputs = _build_ext.get_outputs(self) optimize = self.get_finalized_command('build_py').optimize for ext in self.extensions: if ext._needs_stub: base = os.path.join(self.build_lib, *ext._full_name.split('.')) outputs.append(base+'.py') outputs.append(base+'.pyc') if optimize: outputs.append(base+'.pyo') return outputs def write_stub(self, output_dir, ext, compile=False): log.info("writing stub loader for %s to %s",ext._full_name, output_dir) stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py' if compile and os.path.exists(stub_file): raise DistutilsError(stub_file+" already exists! Please delete.") if not self.dry_run: f = open(stub_file,'w') f.write('\n'.join([ "def __bootstrap__():", " global __bootstrap__, __file__, __loader__", " import sys, os, pkg_resources, imp"+if_dl(", dl"), " __file__ = pkg_resources.resource_filename(__name__,%r)" % os.path.basename(ext._file_name), " del __bootstrap__", " if '__loader__' in globals():", " del __loader__", if_dl(" old_flags = sys.getdlopenflags()"), " old_dir = os.getcwd()", " try:", " os.chdir(os.path.dirname(__file__))", if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), " imp.load_dynamic(__name__,__file__)", " finally:", if_dl(" sys.setdlopenflags(old_flags)"), " os.chdir(old_dir)", "__bootstrap__()", "" # terminal \n ])) f.close() if compile: from distutils.util import byte_compile byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run) optimize = self.get_finalized_command('install_lib').optimize if optimize > 0: byte_compile([stub_file], optimize=optimize, force=True, dry_run=self.dry_run) if os.path.exists(stub_file) and not self.dry_run: os.unlink(stub_file) if use_stubs or os.name=='nt': # Build shared libraries # def link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None ): self.link( self.SHARED_LIBRARY, objects, output_libname, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang ) else: # Build static libraries everywhere else libtype = 'static' def link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None ): # XXX we need to either disallow these attrs on Library instances, # or warn/abort here if set, or something... #libraries=None, library_dirs=None, runtime_library_dirs=None, #export_symbols=None, extra_preargs=None, extra_postargs=None, #build_temp=None assert output_dir is None # distutils build_ext doesn't pass this output_dir,filename = os.path.split(output_libname) basename, ext = os.path.splitext(filename) if self.library_filename("x").startswith('lib'): # strip 'lib' prefix; this is kludgy if some platform uses # a different prefix basename = basename[3:] self.create_static_lib( objects, basename, output_dir, debug, target_lang )
mit
sanket4373/keystone
keystone/common/manager.py
4
3009
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone.openstack.common import importutils def response_truncated(f): """Truncate the list returned by the wrapped function. This is designed to wrap Manager list_{entity} methods to ensure that any list limits that are defined are passed to the driver layer. If a hints list is provided, the wrapper will insert the relevant limit into the hints so that the underlying driver call can try and honor it. If the driver does truncate the response, it will update the 'truncated' attribute in the 'limit' entry in the hints list, which enables the caller of this function to know if truncation has taken place. If, however, the driver layer is unable to perform truncation, the 'limit' entry is simply left in the hints list for the caller to handle. A _get_list_limit() method is required to be present in the object class hierarchy, which returns the limit for this backend to which we will truncate. If a hints list is not provided in the arguments of the wrapped call then any limits set in the config file are ignored. This allows internal use of such wrapped methods where the entire data set is needed as input for the calculations of some other API (e.g. get role assignments for a given project). """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if kwargs.get('hints') is None: return f(self, *args, **kwargs) list_limit = self.driver._get_list_limit() if list_limit: kwargs['hints'].set_limit(list_limit) return f(self, *args, **kwargs) return wrapper class Manager(object): """Base class for intermediary request layer. The Manager layer exists to support additional logic that applies to all or some of the methods exposed by a service that are not specific to the HTTP interface. It also provides a stable entry point to dynamic backends. An example of a probable use case is logging all the calls. """ def __init__(self, driver_name): self.driver = importutils.import_object(driver_name) def __getattr__(self, name): """Forward calls to the underlying driver.""" f = getattr(self.driver, name) @functools.wraps(f) def _wrapper(*args, **kw): return f(*args, **kw) setattr(self, name, _wrapper) return _wrapper
apache-2.0
home-assistant/home-assistant
homeassistant/components/heos/__init__.py
1
12521
"""Denon HEOS Media Player.""" from __future__ import annotations import asyncio from datetime import timedelta import logging from pyheos import Heos, HeosError, const as heos_const import voluptuous as vol from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import ConfigType from homeassistant.util import Throttle from . import services from .config_flow import format_title from .const import ( COMMAND_RETRY_ATTEMPTS, COMMAND_RETRY_DELAY, DATA_CONTROLLER_MANAGER, DATA_SOURCE_MANAGER, DOMAIN, SIGNAL_HEOS_UPDATED, ) PLATFORMS = [MEDIA_PLAYER_DOMAIN] CONFIG_SCHEMA = vol.Schema( vol.All( cv.deprecated(DOMAIN), {DOMAIN: vol.Schema({vol.Required(CONF_HOST): cv.string})}, ), extra=vol.ALLOW_EXTRA, ) MIN_UPDATE_SOURCES = timedelta(seconds=1) _LOGGER = logging.getLogger(__name__) async def async_setup(hass: HomeAssistant, config: ConfigType): """Set up the HEOS component.""" if DOMAIN not in config: return True host = config[DOMAIN][CONF_HOST] entries = hass.config_entries.async_entries(DOMAIN) if not entries: # Create new entry based on config hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: host} ) ) else: # Check if host needs to be updated entry = entries[0] if entry.data[CONF_HOST] != host: hass.config_entries.async_update_entry( entry, title=format_title(host), data={**entry.data, CONF_HOST: host} ) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """Initialize config entry which represents the HEOS controller.""" # For backwards compat if entry.unique_id is None: hass.config_entries.async_update_entry(entry, unique_id=DOMAIN) host = entry.data[CONF_HOST] # Setting all_progress_events=False ensures that we only receive a # media position update upon start of playback or when media changes controller = Heos(host, all_progress_events=False) try: await controller.connect(auto_reconnect=True) # Auto reconnect only operates if initial connection was successful. except HeosError as error: await controller.disconnect() _LOGGER.debug("Unable to connect to controller %s: %s", host, error) raise ConfigEntryNotReady from error # Disconnect when shutting down async def disconnect_controller(event): await controller.disconnect() entry.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect_controller) ) # Get players and sources try: players = await controller.get_players() favorites = {} if controller.is_signed_in: favorites = await controller.get_favorites() else: _LOGGER.warning( "%s is not logged in to a HEOS account and will be unable to retrieve " "HEOS favorites: Use the 'heos.sign_in' service to sign-in to a HEOS account", host, ) inputs = await controller.get_input_sources() except HeosError as error: await controller.disconnect() _LOGGER.debug("Unable to retrieve players and sources: %s", error) raise ConfigEntryNotReady from error controller_manager = ControllerManager(hass, controller) await controller_manager.connect_listeners() source_manager = SourceManager(favorites, inputs) source_manager.connect_update(hass, controller) hass.data[DOMAIN] = { DATA_CONTROLLER_MANAGER: controller_manager, DATA_SOURCE_MANAGER: source_manager, MEDIA_PLAYER_DOMAIN: players, } services.register(hass, controller) hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" controller_manager = hass.data[DOMAIN][DATA_CONTROLLER_MANAGER] await controller_manager.disconnect() hass.data.pop(DOMAIN) services.remove(hass) return await hass.config_entries.async_unload_platforms(entry, PLATFORMS) class ControllerManager: """Class that manages events of the controller.""" def __init__(self, hass, controller): """Init the controller manager.""" self._hass = hass self._device_registry = None self._entity_registry = None self.controller = controller self._signals = [] async def connect_listeners(self): """Subscribe to events of interest.""" self._device_registry, self._entity_registry = await asyncio.gather( self._hass.helpers.device_registry.async_get_registry(), self._hass.helpers.entity_registry.async_get_registry(), ) # Handle controller events self._signals.append( self.controller.dispatcher.connect( heos_const.SIGNAL_CONTROLLER_EVENT, self._controller_event ) ) # Handle connection-related events self._signals.append( self.controller.dispatcher.connect( heos_const.SIGNAL_HEOS_EVENT, self._heos_event ) ) async def disconnect(self): """Disconnect subscriptions.""" for signal_remove in self._signals: signal_remove() self._signals.clear() self.controller.dispatcher.disconnect_all() await self.controller.disconnect() async def _controller_event(self, event, data): """Handle controller event.""" if event == heos_const.EVENT_PLAYERS_CHANGED: self.update_ids(data[heos_const.DATA_MAPPED_IDS]) # Update players self._hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_HEOS_UPDATED) async def _heos_event(self, event): """Handle connection event.""" if event == heos_const.EVENT_CONNECTED: try: # Retrieve latest players and refresh status data = await self.controller.load_players() self.update_ids(data[heos_const.DATA_MAPPED_IDS]) except HeosError as ex: _LOGGER.error("Unable to refresh players: %s", ex) # Update players self._hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_HEOS_UPDATED) def update_ids(self, mapped_ids: dict[int, int]): """Update the IDs in the device and entity registry.""" # mapped_ids contains the mapped IDs (new:old) for new_id, old_id in mapped_ids.items(): # update device registry entry = self._device_registry.async_get_device({(DOMAIN, old_id)}) new_identifiers = {(DOMAIN, new_id)} if entry: self._device_registry.async_update_device( entry.id, new_identifiers=new_identifiers ) _LOGGER.debug( "Updated device %s identifiers to %s", entry.id, new_identifiers ) # update entity registry entity_id = self._entity_registry.async_get_entity_id( MEDIA_PLAYER_DOMAIN, DOMAIN, str(old_id) ) if entity_id: self._entity_registry.async_update_entity( entity_id, new_unique_id=str(new_id) ) _LOGGER.debug("Updated entity %s unique id to %s", entity_id, new_id) class SourceManager: """Class that manages sources for players.""" def __init__( self, favorites, inputs, *, retry_delay: int = COMMAND_RETRY_DELAY, max_retry_attempts: int = COMMAND_RETRY_ATTEMPTS, ): """Init input manager.""" self.retry_delay = retry_delay self.max_retry_attempts = max_retry_attempts self.favorites = favorites self.inputs = inputs self.source_list = self._build_source_list() def _build_source_list(self): """Build a single list of inputs from various types.""" source_list = [] source_list.extend([favorite.name for favorite in self.favorites.values()]) source_list.extend([source.name for source in self.inputs]) return source_list async def play_source(self, source: str, player): """Determine type of source and play it.""" index = next( ( index for index, favorite in self.favorites.items() if favorite.name == source ), None, ) if index is not None: await player.play_favorite(index) return input_source = next( ( input_source for input_source in self.inputs if input_source.name == source ), None, ) if input_source is not None: await player.play_input_source(input_source) return _LOGGER.error("Unknown source: %s", source) def get_current_source(self, now_playing_media): """Determine current source from now playing media.""" # Match input by input_name:media_id if now_playing_media.source_id == heos_const.MUSIC_SOURCE_AUX_INPUT: return next( ( input_source.name for input_source in self.inputs if input_source.input_name == now_playing_media.media_id ), None, ) # Try matching favorite by name:station or media_id:album_id return next( ( source.name for source in self.favorites.values() if source.name == now_playing_media.station or source.media_id == now_playing_media.album_id ), None, ) def connect_update(self, hass, controller): """ Connect listener for when sources change and signal player update. EVENT_SOURCES_CHANGED is often raised multiple times in response to a physical event therefore throttle it. Retrieving sources immediately after the event may fail so retry. """ @Throttle(MIN_UPDATE_SOURCES) async def get_sources(): retry_attempts = 0 while True: try: favorites = {} if controller.is_signed_in: favorites = await controller.get_favorites() inputs = await controller.get_input_sources() return favorites, inputs except HeosError as error: if retry_attempts < self.max_retry_attempts: retry_attempts += 1 _LOGGER.debug( "Error retrieving sources and will retry: %s", error ) await asyncio.sleep(self.retry_delay) else: _LOGGER.error("Unable to update sources: %s", error) return async def update_sources(event, data=None): if event in ( heos_const.EVENT_SOURCES_CHANGED, heos_const.EVENT_USER_CHANGED, heos_const.EVENT_CONNECTED, ): sources = await get_sources() # If throttled, it will return None if sources: self.favorites, self.inputs = sources self.source_list = self._build_source_list() _LOGGER.debug("Sources updated due to changed event") # Let players know to update hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_HEOS_UPDATED) controller.dispatcher.connect( heos_const.SIGNAL_CONTROLLER_EVENT, update_sources ) controller.dispatcher.connect(heos_const.SIGNAL_HEOS_EVENT, update_sources)
apache-2.0
grob/FrameworkBenchmarks
toolset/benchmark/test_types/db_type.py
36
1824
from benchmark.test_types.framework_test_type import FrameworkTestType from benchmark.test_types.verifications import basic_body_verification, verify_headers, verify_randomnumber_object import json class DBTestType(FrameworkTestType): def __init__(self): kwargs = { 'name': 'db', 'accept_header': self.accept('json'), 'requires_db': True, 'args': ['db_url'] } FrameworkTestType.__init__(self, **kwargs) def get_url(self): return self.db_url def verify(self, base_url): '''Ensures body is valid JSON with a key 'id' and a key 'randomNumber', both of which must map to integers ''' url = base_url + self.db_url headers, body = self.request_headers_and_body(url) response, problems = basic_body_verification(body, url) if len(problems) > 0: return problems # We are allowing the single-object array # e.g. [{'id':5, 'randomNumber':10}] for now, # but will likely make this fail at some point if type(response) == list: response = response[0] problems.append( ('warn', 'Response is a JSON array. Expected JSON object (e.g. [] vs {})', url)) # Make sure there was a JSON object inside the array if type(response) != dict: problems.append( ('fail', 'Response is not a JSON object or an array of JSON objects', url)) return problems # Verify response content problems += verify_randomnumber_object(response, url) problems += verify_headers(headers, url, should_be='json') if len(problems) == 0: return [('pass', '', url)] else: return problems
bsd-3-clause
woodshop/complex-chainer
chainer/functions/split_axis.py
4
4921
import collections import numpy import six from chainer import cuda from chainer import function from chainer.utils import type_check _args = 'float* y, float* x, int cdimy, int cdimx, int rdim, int coffset' _preamble = ''' #define COPY(statement) \ int l = i / (rdim * cdimy); \ int c = i / rdim % cdimy + coffset; \ int r = i % rdim; \ int idx = r + rdim * (c + cdimx * l); \ statement; ''' class SplitAxis(function.Function): """Function that splits multiple arrays towards the specified axis.""" def __init__(self, indices_or_sections, axis): if not isinstance(indices_or_sections, (int, collections.Iterable)): raise TypeError('indices_or_sections must be integer or 1-D array') self.indices_or_sections = indices_or_sections self.axis = axis def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) type_check.expect(in_types[0].ndim >= self.axis) if isinstance(self.indices_or_sections, collections.Iterable): max_index = type_check.Variable( self.indices_or_sections[-1], 'max_index') type_check.expect(in_types[0].shape[self.axis] > max_index) else: sections = type_check.Variable( self.indices_or_sections, 'sections') type_check.expect(in_types[0].shape[self.axis] % sections == 0) def forward_cpu(self, x): if isinstance(self.indices_or_sections, collections.Iterable): cdimx = x[0].shape[self.axis] ind = list(self.indices_or_sections) ind.append(cdimx) prev_i = 0 for i in ind: cdimy = max(0, min(i, cdimx) - prev_i) if cdimy == 0: raise ValueError('Not support if shape contains 0') prev_i = i return tuple(numpy.split(x[0], self.indices_or_sections, self.axis)) def forward_gpu(self, x): xshape = x[0].shape self.cdimx = xshape[self.axis] self.rdim = numpy.prod(xshape[self.axis + 1:], dtype=int) if isinstance(self.indices_or_sections, collections.Iterable): ind = list(self.indices_or_sections) ind.append(self.cdimx) else: sec = self.indices_or_sections if self.cdimx % sec: raise ValueError( 'array split does not result in an equal division') ind = numpy.arange(1, sec + 1) * (self.cdimx // sec) ys = [] kernel = cuda.elementwise( _args, 'COPY(y[i] = x[idx])', 'split_fwd', preamble=_preamble) prev_i = 0 for i in ind: cdimy = max(0, min(i, self.cdimx) - prev_i) s = list(xshape) s[self.axis] = cdimy y = cuda.empty(tuple(s), dtype=x[0].dtype) if cdimy == 0: raise ValueError('Not support if shape contains 0') kernel(y, x[0], cdimy, self.cdimx, self.rdim, prev_i) prev_i = i ys.append(y) return tuple(ys) def backward_cpu(self, x, gys): if any(gy is None for gy in gys): gx = numpy.zeros_like(x[0]) gxs = numpy.split(gx, self.indices_or_sections, self.axis) for gxi, gy in six.moves.zip(gxs, gys): if gy is None: continue gxi[:] = gy return gx, else: return numpy.concatenate(gys, axis=self.axis), def backward_gpu(self, x, gys): gx = cuda.zeros_like(x[0]) coffset = 0 kernel = cuda.elementwise( _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble) for gy in gys: if gy is None: continue cdimy = gy.shape[self.axis] if cdimy != 0: kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset) coffset += cdimy return gx, def split_axis(x, indices_or_sections, axis): """Splits given variables along an axis. Args: x (tuple of Variables): Variables to be split. indices_or_sections (int or 1-D array): If this argument is an integer, N, the array will be divided into N equal arrays along axis. If it is a 1-D array of sorted integers, it indicates the positions where the array is split. axis (int): Axis that the input array is split along. Returns: ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects if the number of outputs is more than 1 or :class:`~chainer.Variable` otherwise. .. note:: This function raises ``ValueError`` if at least one of the outputs is splitted to zero-size (i.e. `axis`-th value of its shape is zero). """ return SplitAxis(indices_or_sections, axis)(x)
mit
coreyfarrell/testsuite
tests/rest_api/applications/subscribe-device-state/subscribe_device_state.py
2
1075
""" Copyright (C) 2013, Digium, Inc. Kevin Harwell <kharwell@digium.com> This program is free software, distributed under the terms of the GNU General Public License Version 2. """ URL = 'deviceStates' DEVICE = 'Stasis:Test' INITIAL_STATE = 'NOT_INUSE' CHANGED_STATE = 'INUSE' def on_start(ari, event, test_obj): # add a device state ari.put(URL, DEVICE, deviceState=INITIAL_STATE) # subscribe to device ari.post("applications", "testsuite", "subscription", eventSource="deviceState:%s" % DEVICE) # change the device state ari.put(URL, DEVICE, deviceState=CHANGED_STATE) # unsubscribe from device ari.delete("applications", "testsuite", "subscription", eventSource="deviceState:%s" % DEVICE) # remove device ari.delete(URL, DEVICE) ari.delete('channels', event['channel']['id']) return True def on_state_change(ari, event, test_obj): assert event['device_state']['name'] == DEVICE assert event['device_state']['state'] == CHANGED_STATE test_obj.stop_reactor() return True
gpl-2.0
Microvellum/Fluid-Designer
win64-vc/2.78/python/lib/ctypes/test/test_byteswap.py
32
11411
import sys, unittest, struct, math, ctypes from binascii import hexlify from ctypes import * def bin(s): return hexlify(memoryview(s)).decode().upper() # Each *simple* type that supports different byte orders has an # __ctype_be__ attribute that specifies the same type in BIG ENDIAN # byte order, and a __ctype_le__ attribute that is the same type in # LITTLE ENDIAN byte order. # # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): @unittest.skip('test disabled') def test_X(self): print(sys.byteorder, file=sys.stderr) for i in range(32): bits = BITS() setattr(bits, "i%s" % i, 1) dump(bits) def test_slots(self): class BigPoint(BigEndianStructure): __slots__ = () _fields_ = [("x", c_int), ("y", c_int)] class LowPoint(LittleEndianStructure): __slots__ = () _fields_ = [("x", c_int), ("y", c_int)] big = BigPoint() little = LowPoint() big.x = 4 big.y = 2 little.x = 2 little.y = 4 with self.assertRaises(AttributeError): big.z = 42 with self.assertRaises(AttributeError): little.z = 24 def test_endian_short(self): if sys.byteorder == "little": self.assertIs(c_short.__ctype_le__, c_short) self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: self.assertIs(c_short.__ctype_be__, c_short) self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") self.assertEqual(s.value, 0x1234) s = c_short.__ctype_le__(0x1234) self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412") self.assertEqual(bin(s), "3412") self.assertEqual(s.value, 0x1234) s = c_ushort.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") self.assertEqual(s.value, 0x1234) s = c_ushort.__ctype_le__(0x1234) self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412") self.assertEqual(bin(s), "3412") self.assertEqual(s.value, 0x1234) def test_endian_int(self): if sys.byteorder == "little": self.assertIs(c_int.__ctype_le__, c_int) self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: self.assertIs(c_int.__ctype_be__, c_int) self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") self.assertEqual(bin(s), "12345678") self.assertEqual(s.value, 0x12345678) s = c_int.__ctype_le__(0x12345678) self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412") self.assertEqual(bin(s), "78563412") self.assertEqual(s.value, 0x12345678) s = c_uint.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678") self.assertEqual(bin(s), "12345678") self.assertEqual(s.value, 0x12345678) s = c_uint.__ctype_le__(0x12345678) self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412") self.assertEqual(bin(s), "78563412") self.assertEqual(s.value, 0x12345678) def test_endian_longlong(self): if sys.byteorder == "little": self.assertIs(c_longlong.__ctype_le__, c_longlong) self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: self.assertIs(c_longlong.__ctype_be__, c_longlong) self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") self.assertEqual(bin(s), "1234567890ABCDEF") self.assertEqual(s.value, 0x1234567890ABCDEF) s = c_longlong.__ctype_le__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412") self.assertEqual(bin(s), "EFCDAB9078563412") self.assertEqual(s.value, 0x1234567890ABCDEF) s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF") self.assertEqual(bin(s), "1234567890ABCDEF") self.assertEqual(s.value, 0x1234567890ABCDEF) s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412") self.assertEqual(bin(s), "EFCDAB9078563412") self.assertEqual(s.value, 0x1234567890ABCDEF) def test_endian_float(self): if sys.byteorder == "little": self.assertIs(c_float.__ctype_le__, c_float) self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: self.assertIs(c_float.__ctype_be__, c_float) self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? self.assertAlmostEqual(s.value, math.pi, places=6) s = c_float.__ctype_le__(math.pi) self.assertAlmostEqual(s.value, math.pi, places=6) self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s)) s = c_float.__ctype_be__(math.pi) self.assertAlmostEqual(s.value, math.pi, places=6) self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s)) def test_endian_double(self): if sys.byteorder == "little": self.assertIs(c_double.__ctype_le__, c_double) self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: self.assertIs(c_double.__ctype_be__, c_double) self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) s = c_double.__ctype_le__(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s)) s = c_double.__ctype_be__(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): self.assertIs(c_byte.__ctype_le__, c_byte) self.assertIs(c_byte.__ctype_be__, c_byte) self.assertIs(c_ubyte.__ctype_le__, c_ubyte) self.assertIs(c_ubyte.__ctype_be__, c_ubyte) self.assertIs(c_char.__ctype_le__, c_char) self.assertIs(c_char.__ctype_be__, c_char) def test_struct_fields_1(self): if sys.byteorder == "little": base = BigEndianStructure else: base = LittleEndianStructure class T(base): pass _fields_ = [("a", c_ubyte), ("b", c_byte), ("c", c_short), ("d", c_ushort), ("e", c_int), ("f", c_uint), ("g", c_long), ("h", c_ulong), ("i", c_longlong), ("k", c_ulonglong), ("l", c_float), ("m", c_double), ("n", c_char), ("b1", c_byte, 3), ("b2", c_byte, 3), ("b3", c_byte, 2), ("a", c_int * 3 * 3 * 3)] T._fields_ = _fields_ # these fields do not support different byte order: for typ in c_wchar, c_void_p, POINTER(c_int): _fields_.append(("x", typ)) class T(base): pass self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)]) def test_struct_struct(self): # nested structures with different byteorders # create nested structures with given byteorders and set memory to data for nested, data in ( (BigEndianStructure, b'\0\0\0\1\0\0\0\2'), (LittleEndianStructure, b'\1\0\0\0\2\0\0\0'), ): for parent in ( BigEndianStructure, LittleEndianStructure, Structure, ): class NestedStructure(nested): _fields_ = [("x", c_uint32), ("y", c_uint32)] class TestStructure(parent): _fields_ = [("point", NestedStructure)] self.assertEqual(len(data), sizeof(TestStructure)) ptr = POINTER(TestStructure) s = cast(data, ptr)[0] del ctypes._pointer_type_cache[TestStructure] self.assertEqual(s.point.x, 1) self.assertEqual(s.point.y, 2) def test_struct_fields_2(self): # standard packing in struct uses no alignment. # So, we have to align using pad bytes. # # Unaligned accesses will crash Python (on those platforms that # don't allow it, like sparc solaris). if sys.byteorder == "little": base = BigEndianStructure fmt = ">bxhid" else: base = LittleEndianStructure fmt = "<bxhid" class S(base): _fields_ = [("b", c_byte), ("h", c_short), ("i", c_int), ("d", c_double)] s1 = S(0x12, 0x1234, 0x12345678, 3.14) s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14) self.assertEqual(bin(s1), bin(s2)) def test_unaligned_nonnative_struct_fields(self): if sys.byteorder == "little": base = BigEndianStructure fmt = ">b h xi xd" else: base = LittleEndianStructure fmt = "<b h xi xd" class S(base): _pack_ = 1 _fields_ = [("b", c_byte), ("h", c_short), ("_1", c_byte), ("i", c_int), ("_2", c_byte), ("d", c_double)] s1 = S() s1.b = 0x12 s1.h = 0x1234 s1.i = 0x12345678 s1.d = 3.14 s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14) self.assertEqual(bin(s1), bin(s2)) def test_unaligned_native_struct_fields(self): if sys.byteorder == "little": fmt = "<b h xi xd" else: base = LittleEndianStructure fmt = ">b h xi xd" class S(Structure): _pack_ = 1 _fields_ = [("b", c_byte), ("h", c_short), ("_1", c_byte), ("i", c_int), ("_2", c_byte), ("d", c_double)] s1 = S() s1.b = 0x12 s1.h = 0x1234 s1.i = 0x12345678 s1.d = 3.14 s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14) self.assertEqual(bin(s1), bin(s2)) if __name__ == "__main__": unittest.main()
gpl-3.0
kun--hust/libcloud_with_cn
libcloud/compute/drivers/digitalocean.py
4
7643
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Digital Ocean Driver """ from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionUserAndKey, JsonResponse from libcloud.compute.types import Provider, NodeState, InvalidCredsError from libcloud.compute.base import NodeDriver from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation class DigitalOceanResponse(JsonResponse): def parse_error(self): if self.status == httplib.FOUND and '/api/error' in self.body: # Hacky, but DigitalOcean error responses are awful raise InvalidCredsError(self.body) elif self.status == httplib.UNAUTHORIZED: body = self.parse_body() raise InvalidCredsError(body['message']) else: body = self.parse_body() if 'error_message' in body: error = '%s (code: %s)' % (body['error_message'], self.status) else: error = body return error class SSHKey(object): def __init__(self, id, name, pub_key): self.id = id self.name = name self.pub_key = pub_key def __repr__(self): return (('<SSHKey: id=%s, name=%s, pub_key=%s>') % (self.id, self.name, self.pub_key)) class DigitalOceanConnection(ConnectionUserAndKey): """ Connection class for the DigitalOcean driver. """ host = 'api.digitalocean.com' responseCls = DigitalOceanResponse def add_default_params(self, params): """ Add parameters that are necessary for every request This method adds ``client_id`` and ``api_key`` to the request. """ params['client_id'] = self.user_id params['api_key'] = self.key return params class DigitalOceanNodeDriver(NodeDriver): """ DigitalOceanNode node driver. """ connectionCls = DigitalOceanConnection type = Provider.DIGITAL_OCEAN name = 'Digital Ocean' website = 'https://www.digitalocean.com' NODE_STATE_MAP = {'new': NodeState.PENDING, 'off': NodeState.REBOOTING, 'active': NodeState.RUNNING} def list_nodes(self): data = self.connection.request('/droplets').object['droplets'] return list(map(self._to_node, data)) def list_locations(self): data = self.connection.request('/regions').object['regions'] return list(map(self._to_location, data)) def list_images(self): data = self.connection.request('/images').object['images'] return list(map(self._to_image, data)) def list_sizes(self): data = self.connection.request('/sizes').object['sizes'] return list(map(self._to_size, data)) def create_node(self, name, size, image, location, ex_ssh_key_ids=None, **kwargs): """ Create a node. :keyword ex_ssh_key_ids: A list of ssh key ids which will be added to the server. (optional) :type ex_ssh_key_ids: ``list`` of ``str`` :return: The newly created node. :rtype: :class:`Node` """ params = {'name': name, 'size_id': size.id, 'image_id': image.id, 'region_id': location.id} if ex_ssh_key_ids: params['ssh_key_ids'] = ','.join(ex_ssh_key_ids) data = self.connection.request('/droplets/new', params=params).object return self._to_node(data=data['droplet']) def reboot_node(self, node): res = self.connection.request('/droplets/%s/reboot/' % (node.id)) return res.status == httplib.OK def destroy_node(self, node): params = {'scrub_data': '1'} res = self.connection.request('/droplets/%s/destroy/' % (node.id), params=params) return res.status == httplib.OK def ex_rename_node(self, node, name): params = {'name': name} res = self.connection.request('/droplets/%s/rename/' % (node.id), params=params) return res.status == httplib.OK def ex_list_ssh_keys(self): """ List all the available SSH keys. :return: Available SSH keys. :rtype: ``list`` of :class:`SSHKey` """ data = self.connection.request('/ssh_keys').object['ssh_keys'] return list(map(self._to_ssh_key, data)) def ex_create_ssh_key(self, name, ssh_key_pub): """ Create a new SSH key. :param name: Key name (required) :type name: ``str`` :param name: Valid public key string (required) :type name: ``str`` """ params = {'name': name, 'ssh_pub_key': ssh_key_pub} data = self.connection.request('/ssh_keys/new/', method='GET', params=params).object assert 'ssh_key' in data return self._to_ssh_key(data=data['ssh_key']) def ex_destroy_ssh_key(self, key_id): """ Delete an existing SSH key. :param key_id: SSH key id (required) :type key_id: ``str`` """ res = self.connection.request('/ssh_keys/%s/destroy/' % (key_id)) return res.status == httplib.OK def _to_node(self, data): extra_keys = ['backups_active', 'region_id', 'image_id', 'size_id'] if 'status' in data: state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) else: state = NodeState.UNKNOWN if 'ip_address' in data and data['ip_address'] is not None: public_ips = [data['ip_address']] else: public_ips = [] extra = {} for key in extra_keys: if key in data: extra[key] = data[key] node = Node(id=data['id'], name=data['name'], state=state, public_ips=public_ips, private_ips=None, extra=extra, driver=self) return node def _to_image(self, data): extra = {'distribution': data['distribution']} return NodeImage(id=data['id'], name=data['name'], extra=extra, driver=self) def _to_location(self, data): return NodeLocation(id=data['id'], name=data['name'], country=None, driver=self) def _to_size(self, data): ram = data['name'].lower() if 'mb' in ram: ram = int(ram.replace('mb', '')) elif 'gb' in ram: ram = int(ram.replace('gb', '')) * 1024 return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0, bandwidth=0, price=0, driver=self) def _to_ssh_key(self, data): return SSHKey(id=data['id'], name=data['name'], pub_key=data.get('ssh_pub_key', None))
apache-2.0
dwightgunning/django
tests/known_related_objects/tests.py
363
6425
from __future__ import unicode_literals from django.test import TestCase from .models import Organiser, Pool, PoolStyle, Tournament class ExistingRelatedInstancesTests(TestCase): @classmethod def setUpTestData(cls): cls.t1 = Tournament.objects.create(name='Tourney 1') cls.t2 = Tournament.objects.create(name='Tourney 2') cls.o1 = Organiser.objects.create(name='Organiser 1') cls.p1 = Pool.objects.create(name='T1 Pool 1', tournament=cls.t1, organiser=cls.o1) cls.p2 = Pool.objects.create(name='T1 Pool 2', tournament=cls.t1, organiser=cls.o1) cls.p3 = Pool.objects.create(name='T2 Pool 1', tournament=cls.t2, organiser=cls.o1) cls.p4 = Pool.objects.create(name='T2 Pool 2', tournament=cls.t2, organiser=cls.o1) cls.ps1 = PoolStyle.objects.create(name='T1 Pool 2 Style', pool=cls.p2) cls.ps2 = PoolStyle.objects.create(name='T2 Pool 1 Style', pool=cls.p3) def test_foreign_key(self): with self.assertNumQueries(2): tournament = Tournament.objects.get(pk=self.t1.pk) pool = tournament.pool_set.all()[0] self.assertIs(tournament, pool.tournament) def test_foreign_key_prefetch_related(self): with self.assertNumQueries(2): tournament = (Tournament.objects.prefetch_related('pool_set').get(pk=self.t1.pk)) pool = tournament.pool_set.all()[0] self.assertIs(tournament, pool.tournament) def test_foreign_key_multiple_prefetch(self): with self.assertNumQueries(2): tournaments = list(Tournament.objects.prefetch_related('pool_set').order_by('pk')) pool1 = tournaments[0].pool_set.all()[0] self.assertIs(tournaments[0], pool1.tournament) pool2 = tournaments[1].pool_set.all()[0] self.assertIs(tournaments[1], pool2.tournament) def test_queryset_or(self): tournament_1 = self.t1 tournament_2 = self.t2 with self.assertNumQueries(1): pools = tournament_1.pool_set.all() | tournament_2.pool_set.all() related_objects = set(pool.tournament for pool in pools) self.assertEqual(related_objects, {tournament_1, tournament_2}) def test_queryset_or_different_cached_items(self): tournament = self.t1 organiser = self.o1 with self.assertNumQueries(1): pools = tournament.pool_set.all() | organiser.pool_set.all() first = pools.filter(pk=self.p1.pk)[0] self.assertIs(first.tournament, tournament) self.assertIs(first.organiser, organiser) def test_queryset_or_only_one_with_precache(self): tournament_1 = self.t1 tournament_2 = self.t2 # 2 queries here as pool 3 has tournament 2, which is not cached with self.assertNumQueries(2): pools = tournament_1.pool_set.all() | Pool.objects.filter(pk=self.p3.pk) related_objects = set(pool.tournament for pool in pools) self.assertEqual(related_objects, {tournament_1, tournament_2}) # and the other direction with self.assertNumQueries(2): pools = Pool.objects.filter(pk=self.p3.pk) | tournament_1.pool_set.all() related_objects = set(pool.tournament for pool in pools) self.assertEqual(related_objects, {tournament_1, tournament_2}) def test_queryset_and(self): tournament = self.t1 organiser = self.o1 with self.assertNumQueries(1): pools = tournament.pool_set.all() & organiser.pool_set.all() first = pools.filter(pk=self.p1.pk)[0] self.assertIs(first.tournament, tournament) self.assertIs(first.organiser, organiser) def test_one_to_one(self): with self.assertNumQueries(2): style = PoolStyle.objects.get(pk=self.ps1.pk) pool = style.pool self.assertIs(style, pool.poolstyle) def test_one_to_one_select_related(self): with self.assertNumQueries(1): style = PoolStyle.objects.select_related('pool').get(pk=self.ps1.pk) pool = style.pool self.assertIs(style, pool.poolstyle) def test_one_to_one_multi_select_related(self): with self.assertNumQueries(1): poolstyles = list(PoolStyle.objects.select_related('pool').order_by('pk')) self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle) self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle) def test_one_to_one_prefetch_related(self): with self.assertNumQueries(2): style = PoolStyle.objects.prefetch_related('pool').get(pk=self.ps1.pk) pool = style.pool self.assertIs(style, pool.poolstyle) def test_one_to_one_multi_prefetch_related(self): with self.assertNumQueries(2): poolstyles = list(PoolStyle.objects.prefetch_related('pool').order_by('pk')) self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle) self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle) def test_reverse_one_to_one(self): with self.assertNumQueries(2): pool = Pool.objects.get(pk=self.p2.pk) style = pool.poolstyle self.assertIs(pool, style.pool) def test_reverse_one_to_one_select_related(self): with self.assertNumQueries(1): pool = Pool.objects.select_related('poolstyle').get(pk=self.p2.pk) style = pool.poolstyle self.assertIs(pool, style.pool) def test_reverse_one_to_one_prefetch_related(self): with self.assertNumQueries(2): pool = Pool.objects.prefetch_related('poolstyle').get(pk=self.p2.pk) style = pool.poolstyle self.assertIs(pool, style.pool) def test_reverse_one_to_one_multi_select_related(self): with self.assertNumQueries(1): pools = list(Pool.objects.select_related('poolstyle').order_by('pk')) self.assertIs(pools[1], pools[1].poolstyle.pool) self.assertIs(pools[2], pools[2].poolstyle.pool) def test_reverse_one_to_one_multi_prefetch_related(self): with self.assertNumQueries(2): pools = list(Pool.objects.prefetch_related('poolstyle').order_by('pk')) self.assertIs(pools[1], pools[1].poolstyle.pool) self.assertIs(pools[2], pools[2].poolstyle.pool)
bsd-3-clause
DatapuntAmsterdam/handelsregister
web/handelsregister/datasets/hr/models.py
1
28302
import re from django.contrib.gis.db import models from django.contrib.postgres.fields import JSONField from datasets.sbicodes.models import SBICodeHierarchy class Persoon(models.Model): """ Persoon (PRS) Een {Persoon} is een ieder die rechten en plichten kan hebben. Persoon wordt gebruikt als overkoepelend begrip (een verzamelnaam voor NatuurlijkPersoon, NietNatuurlijkPersoon en NaamPersoon) om er over te kunnen communiceren. Iedere in het handelsregister voorkomende Persoon heeft ofwel een Eigenaarschap en/ of minstens een Functievervulling waarmee de rol van de Persoon is vastgelegd. Persoon typen: Natuurlijk Persoon (NPS) Een NatuurlijkPersoon is een mens. Iedere NatuurlijkPersoon heeft ofwel een {Eigenaarschap} ofwel een {Functievervulling} waarbij hij optreedt in een relevante rol zoals bestuurder, aandeelhouder of gevolmachtigde. Persoonsgegevens zijn alleen authentiek indien de betreffende NatuurlijkPersoon: - een eigenaar is van een eenmanszaak; - deelneemt als maat, vennoot of lid van een rederij bij een Samenwerkingsverband. Niet-natuurlijk Persoon (NNP) Een NietNatuurlijkPersoon is een Persoon met rechten en plichten die geen NatuurlijkPersoon is. De definitie sluit aan bij de definitie in de stelselcatalogus. In het handelsregister wordt de EenmanszaakMetMeerdereEigenaren en RechtspersoonInOprichting niet als Samenwerkingsverband geregistreerd. Voor het handelsregister worden deze beschouwd als niet-natuurlijke personen. NNP subtypen: - Buitenlandse Vennootschap (BRV) Een BuitenlandseVennootschap is opgericht naar buitenlands recht. In het handelsregister wordt van een {BuitenlandseVennootschap} opgenomen: het registratienummer uit het buitenlands register, de naam van het register en de plaats en land waar het register gehouden wordt. - Binnenlandse Niet-natuurlijk Persoon (BNP) Een BinnenlandseNietNatuurlijkPersoon is een NietNatuurlijkPersoon die bestaat naar Nederlands recht. Dit zijn alle Nederlandse rechtsvormen behalve de eenmanszaak. """ type_choices = [ ('natuurlijkPersoon', 'natuurlijkPersoon'), ('naamPersoon', 'naamPersoon'), ('buitenlandseVennootschap', 'buitenlandseVennootschap'), ('eenmanszaak', 'eenmanszaakMetMeerdereEigenaren'), ('rechtspersoon', 'rechtspersoon'), ('rechtspersoonInOprichting', 'rechtspersoonInOprichting'), ('samenwerkingsverband', 'samenwerkingsverband'), ] rol_choices = [ ('EIGENAAR', 'EIGENAAR'), ('AANSPRAKELIJKE', 'AANSPRAKELIJKE'), ] id = models.DecimalField( primary_key=True, max_digits=18, decimal_places=0) rol = models.CharField( max_length=14, blank=True, null=True, choices=rol_choices) rechtsvorm = models.CharField(max_length=50, blank=True, null=True) uitgebreide_rechtsvorm = models.CharField( max_length=240, blank=True, null=True) volledige_naam = models.CharField(max_length=240, blank=True, null=True) typering = models.CharField( max_length=50, blank=True, null=True, choices=type_choices) reden_insolvatie = models.CharField(max_length=50, blank=True, null=True) # BeperkinginRechtshandeling (BIR) natuurlijkpersoon = models.OneToOneField( 'NatuurlijkPersoon', on_delete=models.CASCADE, null=True, blank=True, help_text="niet null bij natuurlijkpersoon", ) niet_natuurlijkpersoon = models.OneToOneField( 'NietNatuurlijkPersoon', on_delete=models.CASCADE, null=True, blank=True, help_text="niet null bij niet-natuurlijkpersoon", ) datum_aanvang = models.DateField( max_length=8, blank=True, null=True, help_text="De datum van aanvang van de MaatschappelijkeActiviteit", ) datum_einde = models.DateField( max_length=8, blank=True, null=True, help_text=""" De datum van beëindiging van de MaatschappelijkeActiviteit""", ) soort = models.CharField(max_length=21, blank=True, null=True) datumuitschrijving = models.DateField( max_length=8, blank=True, null=True, help_text="De datum van aanvang van de MaatschappelijkeActiviteit", ) naam = models.CharField(max_length=600, blank=True, null=True) # communicatie nummer = models.CharField(max_length=15, blank=True, null=True) toegangscode = models.DecimalField( max_digits=4, decimal_places=0, blank=True, null=True) faillissement = models.BooleanField() status = models.CharField(max_length=21, blank=True, null=True) duur = models.CharField(max_length=240, blank=True, null=True) def __str__(self): display = "{}".format(self.id) if self.volledige_naam: display = "{} - {}".format(display, self.volledige_naam) if self.rechtsvorm: display = "{} - {}".format(display, self.rechtsvorm) if self.uitgebreide_rechtsvorm: display = "{} - {}".format(display, self.uitgebreide_rechtsvorm) return display class NatuurlijkPersoon(models.Model): """ Natuurlijk Persoon. """ id = models.CharField(primary_key=True, max_length=20) voornamen = models.CharField(max_length=240, blank=True, null=True) geslachtsnaam = models.CharField(max_length=240, blank=True, null=True) geslachtsaanduiding = models.CharField( max_length=20, blank=True, null=True) huwelijksdatum = models.DateField( max_length=8, blank=True, null=True) geboortedatum = models.DateField( max_length=8, blank=True, null=True) geboorteland = models.CharField(max_length=50, blank=True, null=True) geboorteplaats = models.CharField(max_length=240, blank=True, null=True) class NietNatuurlijkPersoon(models.Model): """ Niet Natuurlijk Persoon. """ id = models.CharField(primary_key=True, max_length=20) rsin = models.CharField(db_index=True, max_length=9, blank=True, null=True) verkorte_naam = models.CharField(max_length=60, blank=True, null=True) ook_genoemd = models.CharField(max_length=600, blank=True, null=True) class Functievervulling(models.Model): """ Functievervulling (FVV) Een FunctieverVulling is een vervulling door een Persoon van een functie voor een Persoon. Een Functievervulling geeft de relatie weer van de Persoon als functionaris en de Persoon als eigenaar van de Onderneming of MaatschappelijkeActiviteit. """ id = models.CharField(primary_key=True, max_length=20) functietitel = models.CharField(max_length=20) heeft_aansprakelijke = models.ForeignKey( 'Persoon', models.DO_NOTHING, related_name='heeft_aansprakelijke', blank=True, null=True, help_text="", ) is_aansprakelijke = models.ForeignKey( 'Persoon', models.DO_NOTHING, related_name='is_aansprakelijke', blank=True, null=True, help_text="", ) soortbevoegdheid = models.CharField(max_length=20, blank=True, null=True) def __str__(self): naam = '' if self.is_aansprakelijke: naam = self.is_aansprakelijke.volledige_naam return "{} - {} - {}".format( naam, self.functietitel, self.soortbevoegdheid) class Activiteit(models.Model): """ Activiteit (ACT) Van deze entiteit zijn de entiteiten Activiteiten-CommercieleVestiging}, {ActiviteitenNietCommerciele Vestiging en ActiviteitenRechtpersoon afgeleid. Zie ook de toelichting van Activiteiten bij de uitleg van het semantisch gegevensmodel in de officiële catalogus, paragraaf 1.5. """ id = models.CharField( primary_key=True, max_length=21 ) activiteitsomschrijving = models.TextField( blank=True, null=True, help_text=""" De omschrijving van de activiteiten die de Vestiging of Rechtspersoon uitoefent""" ) # This is actually a foreign key to the CBS_sbicode table # However, not all data is included which leads to foreign # constrain failures sbi_code = models.CharField( db_index=True, max_length=6, help_text="De codering van de activiteit conform de SBI2008" ) sbi_omschrijving = models.CharField( max_length=300, help_text="Omschrijving van de activiteit conform de SBI2008" ) hoofdactiviteit = models.BooleanField( help_text=""" Indicatie die aangeeft welke van de activiteiten de hoofdactiviteit is""" ) sbi_code_tree = models.ForeignKey( SBICodeHierarchy, models.DO_NOTHING, null=True, blank=True ) class MaatschappelijkeActiviteit(models.Model): """ Maatschappelijke Activiteit (MAC) Een MaatschappelijkeActiviteit is de activiteit van een NatuurlijkPersoon of NietNatuurlijkPersoon. De MaatschappelijkeActiviteit is het totaal van alle activiteiten uitgeoefend door een NatuurlijkPersoon of een NietNatuurlijkPersoon. Een MaatschappelijkeActiviteit kan ook als Onderneming voorkomen. """ id = models.DecimalField( primary_key=True, max_digits=18, decimal_places=0) naam = models.CharField( max_length=600, blank=True, null=True, help_text=""" De (statutaire) naam of eerste handelsnaam van de inschrijving""", ) kvk_nummer = models.CharField( unique=True, max_length=8, blank=True, null=True, help_text=""" Betreft het identificerende gegeven voor de MaatschappelijkeActiviteit, het KvK-nummer""", ) datum_aanvang = models.DateField( max_length=8, blank=True, null=True, help_text="De datum van aanvang van de MaatschappelijkeActiviteit", ) datum_einde = models.DateField( max_length=8, blank=True, null=True, help_text=""" De datum van beëindiging van de MaatschappelijkeActiviteit""", ) incidenteel_uitlenen_arbeidskrachten = models.NullBooleanField( help_text=""" Indicatie die aangeeft of de ondernemer tijdelijk arbeidskrachten ter beschikking stelt en dit niet onderdeel is van zijn 'reguliere' activiteiten.""", ) non_mailing = models.NullBooleanField( help_text=""" Indicator die aangeeft of de inschrijving haar adresgegevens beschikbaar stelt voor mailing-doeleinden.""", ) communicatiegegevens = models.ManyToManyField( 'Communicatiegegevens', help_text="Afgeleid van communicatiegegevens van inschrijving", ) activiteiten = models.ManyToManyField( 'Activiteit', help_text=""" De SBI-activiteiten van de MaatschappelijkeActiviteit is het totaal van alle SBI-activiteiten die voorkomen bij de MaatschappelijkeActiviteit behorende " NietCommercieleVestigingen en bij de Rechtspersoon""" ) postadres = models.ForeignKey( 'Locatie', models.DO_NOTHING, related_name="+", blank=True, null=True, help_text="postadres", ) bezoekadres = models.ForeignKey( 'Locatie', models.DO_NOTHING, related_name="+", blank=True, null=True, help_text="bezoekadres", ) eigenaar = models.ForeignKey( 'Persoon', models.DO_NOTHING, related_name="maatschappelijke_activiteit", blank=True, null=True, help_text="", ) # eigenaar zit niet ons systeem # iets met kvk doen? eigenaar_mks_id = models.DecimalField( blank=True, null=True, db_index=True, max_digits=18, decimal_places=0) onderneming = models.OneToOneField( 'Onderneming', on_delete=models.CASCADE, null=True, blank=True, help_text="", ) hoofdvestiging = models.ForeignKey( 'Vestiging', null=True, blank=True, on_delete=models.SET_NULL ) @property def locatie(self): """ locatie """ return self.bezoekadres if self.bezoekadres else self.postadres def __str__(self): return "{}".format(self.naam) class Onderneming(models.Model): """ Van een Onderneming is sprake indien een voldoende zelfstandig optredende organisatorische eenheid van één of meer personen bestaat waarin door voldoende inbreng van arbeid of middelen, ten behoeve van derden diensten of goederen worden geleverd of werken tot stand worden gebracht met het oogmerk daarmee materieel voordeel te behalen. """ id = models.CharField(primary_key=True, max_length=20) totaal_werkzame_personen = models.IntegerField( blank=True, null=True ) fulltime_werkzame_personen = models.IntegerField( blank=True, null=True ) parttime_werkzame_personen = models.IntegerField( blank=True, null=True ) handelsnamen = models.ManyToManyField('Handelsnaam') class CommercieleVestiging(models.Model): """ Een classificatie van de Vestiging van de Onderneming. """ id = models.CharField( primary_key=True, max_length=20 ) totaal_werkzame_personen = models.IntegerField( blank=True, null=True ) fulltime_werkzame_personen = models.IntegerField( blank=True, null=True ) parttime_werkzame_personen = models.IntegerField( blank=True, null=True ) import_activiteit = models.NullBooleanField() export_activiteit = models.NullBooleanField() class NietCommercieleVestiging(models.Model): id = models.CharField( primary_key=True, max_length=20 ) ook_genoemd = models.CharField(max_length=200, null=True, blank=True) verkorte_naam = models.CharField(max_length=60, null=True, blank=True) KVK_ADRES = re.compile(r' \d\d\d\d[A-Z][A-Z] ') class Vestiging(models.Model): """ Vestiging (VES) Een Vestiging is gebouw of een complex van gebouwen waar duurzame uitoefening van activiteiten van een Onderneming of Rechtspersoon plaatsvindt. De vestiging is een combinatie van Activiteiten en Locatie. """ id = models.CharField(primary_key=True, max_length=20) maatschappelijke_activiteit = models.ForeignKey( 'MaatschappelijkeActiviteit', related_name='vestigingen', db_index=True, on_delete=models.DO_NOTHING, ) vestigingsnummer = models.CharField( max_length=12, unique=True, help_text="Betreft het identificerende gegeven voor de Vestiging" ) hoofdvestiging = models.BooleanField() naam = models.CharField(max_length=200, null=True, blank=True) datum_aanvang = models.DateField( null=True, blank=True, help_text="De datum van aanvang van de Vestiging" ) datum_einde = models.DateField( null=True, blank=True, help_text="De datum van beëindiging van de Vestiging" ) datum_voortzetting = models.DateField( null=True, blank=True, help_text="De datum van voortzetting van de Vestiging" ) communicatiegegevens = models.ManyToManyField( 'Communicatiegegevens', help_text="Afgeleid van communicatiegegevens van inschrijving", ) postadres = models.ForeignKey( 'Locatie', models.DO_NOTHING, related_name="+", blank=True, null=True, help_text="postadres", ) bezoekadres = models.ForeignKey( 'Locatie', models.DO_NOTHING, related_name="+", blank=True, null=True, help_text="bezoekadres", ) commerciele_vestiging = models.OneToOneField( 'CommercieleVestiging', on_delete=models.CASCADE, null=True, blank=True) niet_commerciele_vestiging = models.OneToOneField( 'NietCommercieleVestiging', on_delete=models.CASCADE, null=True, blank=True) activiteiten = models.ManyToManyField('Activiteit') handelsnamen = models.ManyToManyField('Handelsnaam') @property def _adres(self): adres = None if self.bezoekadres: toevoeging = "" if self.bezoekadres.huisletter: toevoeging = self.bezoekadres.huisletter if self.bezoekadres.huisnummertoevoeging: toevoeging = "{}-{}".format( toevoeging, self.bezoekadres.huisnummertoevoeging) adres = "{} {}{}".format( self.bezoekadres.straatnaam, self.bezoekadres.huisnummer, toevoeging, ) elif self.postadres: adres = "{} (post)".format(self.postadres.volledig_adres) return adres @property def locatie(self): """ locatie """ return self.bezoekadres if self.bezoekadres else self.postadres def __str__(self): kvk_adres_short = None handelsnaam = "{}".format(self.naam) # adres = self._adres if self.locatie: kvk_adres = self.locatie.volledig_adres kvk_adres_short = KVK_ADRES.split(kvk_adres)[0] if kvk_adres_short: return "{} - {}".format(handelsnaam, kvk_adres_short) return handelsnaam class Locatie(models.Model): """ Locatie (LOC) Een Locatie is een aanwijsbare plek op aarde. """ id = models.CharField( primary_key=True, max_length=18 ) volledig_adres = models.CharField( max_length=550, blank=True, null=True, help_text="Samengesteld adres " ) toevoeging_adres = models.TextField( blank=True, null=True, help_text="Vrije tekst om een Adres nader aan te kunnen duiden" ) afgeschermd = models.BooleanField( help_text="Geeft aan of het adres afgeschermd is of niet" ) postbus_nummer = models.CharField( db_index=True, max_length=10, blank=True, null=True, ) bag_numid = models.CharField( max_length=16, db_index=True, blank=True, null=True) bag_vbid = models.CharField( max_length=16, db_index=True, blank=True, null=True) bag_nummeraanduiding = models.URLField( max_length=200, blank=True, null=True, help_text="Link naar de BAG Nummeraanduiding" ) bag_adresseerbaar_object = models.URLField( max_length=200, blank=True, null=True, help_text="Link naar het BAG Adresseerbaar object" ) straat_huisnummer = models.CharField(max_length=220, blank=True, null=True) postcode_woonplaats = models.CharField( max_length=220, blank=True, null=True) regio = models.CharField(max_length=170, blank=True, null=True) land = models.CharField(max_length=50, blank=True, null=True) geometrie = models.PointField(srid=28992, blank=True, null=True) # locatie meuk die er nu wel is. straatnaam = models.CharField( db_index=True, max_length=100, blank=True, null=True) toevoegingadres = models.CharField(max_length=100, blank=True, null=True) huisletter = models.CharField(max_length=1, blank=True, null=True) huisnummer = models.DecimalField( db_index=True, max_digits=5, decimal_places=0, blank=True, null=True) huisnummertoevoeging = models.CharField( max_length=5, blank=True, null=True) postcode = models.CharField( db_index=True, max_length=6, blank=True, null=True) # plaats. plaats = models.CharField( db_index=True, max_length=100, blank=True, null=True) # Auto fix related # Indication if corrected by auto search correctie = models.NullBooleanField() correctie_level = models.IntegerField(null=True) # Last updated (by search) updated_at = models.DateTimeField(auto_now=True, null=True) # QS string used to fix the search data query_string = models.CharField( db_index=True, max_length=180, blank=True, null=True, ) def __str__(self): return "{}".format(self.volledig_adres) class Handelsnaam(models.Model): """ Handelsnaam (HN) Een Handelsnaam is een naam waaronder een Onderneming of een Vestiging van een Onderneming handelt. Een Onderneming mag meerdere Handelsnamen hebben. De Vestiging heeft tenminste één, of meerdere, Handelsna(a)m(en) waarmee die naar buiten treedt. Bij privaatrechtelijke Rechtspersonen is de statutaire naam altijd ook een van de Handelsnamen van de bijbehorende Onderneming. De Handelsnamen van de Onderneming zijn een opsomming van alle Handelsnamen van alle Vestigingen. Indien een Handelsnaam dubbel voorkomt zal deze slechts éénmaal worden getoond. """ id = models.CharField( primary_key=True, max_length=20 ) handelsnaam = models.CharField(max_length=500, blank=True, null=True) def __str__(self): return "{}".format(self.handelsnaam) class Communicatiegegevens(models.Model): """ Communicatiegegevens (COM) In het handelsregister worden over een Rechtspersoon waaraan geen Onderneming toebehoort en die geen Vestiging heeft of van een Vestiging, opgenomen: - telefoonnummer - faxnummer - e-mailadres - internetadres """ SOORT_COMMUNICATIE_TELEFOON = 'Telefoon' SOORT_COMMUNICATIE_FAX = 'Fax' SOORT_COMMUNICATIE_CHOICES = ( (SOORT_COMMUNICATIE_TELEFOON, SOORT_COMMUNICATIE_TELEFOON), (SOORT_COMMUNICATIE_FAX, SOORT_COMMUNICATIE_FAX), ) id = models.CharField( primary_key=True, max_length=21 ) domeinnaam = models.URLField( max_length=300, blank=True, null=True, help_text="Het internetadres (URL)" ) emailadres = models.EmailField( max_length=200, blank=True, null=True, help_text="Het e-mailadres waar op de onderneming gemaild kan worden" ) toegangscode = models.CharField( max_length=10, blank=True, null=True, help_text=""" De internationale toegangscode van het land waarop het nummer (telefoon of fax) betrekking heeft""" ) communicatie_nummer = models.CharField( max_length=15, blank=True, null=True, help_text="Nummer is het telefoon- of faxnummer zonder opmaak" ) soort_communicatie_nummer = models.CharField( max_length=10, blank=True, null=True, choices=SOORT_COMMUNICATIE_CHOICES ) class RechterlijkeUitspraak(models.Model): """ Abstracte klasse Rechtelijke Uitspraak (UIT) Een uitspraak van een rechter die invloed heeft op de registratie in het handelsregister. Het betreft hier een abstractie om andere klassen daadwerkelijk van een RechtelijkeUitspraak gegevens te kunnen voorzien. """ pass class Kapitaal(models.Model): """ Kapitaal (KAP) In het handelsregister worden over een naamloze vennootschap, een besloten vennootschap met beperkte aansprakelijkheid, een Europese naamloze vennootschap of een Europese coöperatieve vennootschap opgenomen: het maatschappelijke kapitaal en het bedrag van het geplaatste kapitaal en van het gestorte deel daarvan, onderverdeeld naar soort indien er verschillende soorten aandelen zijn. """ pass class GeoVestigingen(models.Model): """ geo table of joined tables to make mapserver lightning speed These are MAC and VES (Inschrijvingen) IN Amsterdam """ # NOTE merdere activiteiten per vestigings nummer mogelijk vestigingsnummer = models.CharField( max_length=12, db_index=True, null=True, help_text="Betreft het identificerende gegeven voor de Vestiging" ) kvk_nummer = models.CharField( max_length=8, db_index=True, null=True, help_text="Betreft het identificerende gegeven voor Maatschappelijke" ) sbi_code = models.CharField( null=True, db_index=True, max_length=5, help_text="De codering van de activiteit conform de SBI2008" ) activiteitsomschrijving = models.TextField( blank=True, null=True, help_text=""" De omschrijving van de activiteiten die de Vestiging of Rechtspersoon uitoefent""" ) # type indicatie gebruikt door geosearch services subtype = models.CharField( db_index=True, max_length=200, null=True, blank=True, ) naam = models.CharField( max_length=600, null=True, blank=True, ) uri = models.CharField( max_length=200, null=True, blank=True, ) hoofdvestiging = models.BooleanField() locatie_type = models.CharField( max_length=1, blank=True, null=True, choices=[ ('B', 'Bezoek'), ('P', 'Post'), ('V', 'Vestiging')]) geometrie = models.PointField(srid=28992, blank=True, null=True) sbi_tree = JSONField(null=True) sbi_main_category = models.CharField( max_length=1, db_index=True, null=True) sbi_sub_category = models.CharField( max_length=2, db_index=True, null=True) sbi_sub_sub_category = models.CharField( max_length=3, db_index=True, null=True) # qa_tree = JSONField(null=True) # sbi QA options q1 = models.TextField(null=True, db_index=True) q2 = models.TextField(null=True, db_index=True) q3 = models.TextField(null=True, db_index=True) postadres = models.ForeignKey( 'Locatie', models.DO_NOTHING, related_name="+", blank=True, null=True, help_text="postadres") bezoekadres = models.ForeignKey( 'Locatie', models.DO_NOTHING, related_name="+", blank=True, null=True, help_text="bezoekadres") bag_vbid = models.CharField( max_length=16, blank=True, null=True) # Indication if corrected by auto search correctie = models.NullBooleanField() class DataSelectie(models.Model): uid = models.CharField( max_length=21, db_index=True, unique=True, ) bag_numid = models.CharField( max_length=16, blank=True, null=True ) api_json = JSONField() # SQL VIEW ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ class BetrokkenPersonen(models.Model): class Meta: db_table = 'hr_betrokken_personen' managed = False mac_naam = models.CharField( max_length=600, help_text='Maatschappelijke activiteit naam') kvk_nummer = models.CharField( max_length=8, blank=True, null=True, help_text="Kvk nummer" ) vestiging = models.ForeignKey( DataSelectie, models.DO_NOTHING, to_field="id", db_column="vestiging_id", blank=True, null=True, help_text="Vestiging id" ) vestigingsnummer = models.CharField( max_length=12, unique=True, help_text="Betreft het identificerende gegeven voor de Vestiging" ) persoons_id = models.IntegerField( null=True) rol = models.CharField( max_length=14, blank=True, null=True, help_text="Rol" ) naam = models.CharField( max_length=600, blank=True, null=True, help_text="Persoonsnaam (handelsregister terminologie)" ) rechtsvorm = models.CharField( max_length=50, blank=True, null=True, help_text="Rechtsvorm" ) functietitel = models.CharField( max_length=20, blank=True, null=True, help_text="Titel van de functionaris" ) soortbevoegdheid = models.CharField( max_length=20, blank=True, null=True, help_text="Bevoegdheid van de functionaris" ) bevoegde_naam = models.CharField( max_length=240, blank=True, null=True, help_text="Bevoegdheid van de functionaris" ) datum_aanvang = models.DateField( max_length=8, blank=True, null=True, help_text="De datum van aanvang van de MaatschappelijkeActiviteit", ) datum_einde = models.DateField( max_length=8, blank=True, null=True, help_text=""" De datum van beëindiging van de MaatschappelijkeActiviteit""", )
mpl-2.0
xmission/d-note
venv/lib/python2.7/site-packages/werkzeug/http.py
54
35264
# -*- coding: utf-8 -*- """ werkzeug.http ~~~~~~~~~~~~~ Werkzeug comes with a bunch of utilities that help Werkzeug to deal with HTTP data. Most of the classes and functions provided by this module are used by the wrappers, but they are useful on their own, too, especially if the response and request objects are not used. This covers some of the more HTTP centric features of WSGI, some other utilities such as cookie handling are documented in the `werkzeug.utils` module. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re from time import time, gmtime try: from email.utils import parsedate_tz except ImportError: # pragma: no cover from email.Utils import parsedate_tz try: from urllib2 import parse_http_list as _parse_list_header except ImportError: # pragma: no cover from urllib.request import parse_http_list as _parse_list_header from datetime import datetime, timedelta from hashlib import md5 import base64 from werkzeug._internal import _cookie_quote, _make_cookie_domain, \ _cookie_parse_impl from werkzeug._compat import to_unicode, iteritems, text_type, \ string_types, try_coerce_native, to_bytes, PY2, \ integer_types _cookie_charset = 'latin1' # for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231 _accept_re = re.compile( r'''( # media-range capturing-parenthesis [^\s;,]+ # type/subtype (?:[ \t]*;[ \t]* # ";" (?: # parameter non-capturing-parenthesis [^\s;,q][^\s;,]* # token that doesn't start with "q" | # or q[^\s;,=][^\s;,]* # token that is more than just "q" ) )* # zero or more parameters ) # end of media-range (?:[ \t]*;[ \t]*q= # weight is a "q" parameter (\d*(?:\.\d+)?) # qvalue capturing-parentheses [^,]* # "extension" accept params: who cares? )? # accept params are optional ''', re.VERBOSE) _token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" '^_`abcdefghijklmnopqrstuvwxyz|~') _etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)') _unsafe_header_chars = set('()<>@,;:\"/[]?={} \t') _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*(%s|[^\s;,=]+)\s*(?:=\s*(%s|[^;,]+)?)?\s*' % (_quoted_string_re, _quoted_string_re) ) _option_header_start_mime_type = re.compile(r',\s*([^;,\s]+)([;,]\s*.+)?') _entity_headers = frozenset([ 'allow', 'content-encoding', 'content-language', 'content-length', 'content-location', 'content-md5', 'content-range', 'content-type', 'expires', 'last-modified' ]) _hop_by_hop_headers = frozenset([ 'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailer', 'transfer-encoding', 'upgrade' ]) HTTP_STATUS_CODES = { 100: 'Continue', 101: 'Switching Protocols', 102: 'Processing', 200: 'OK', 201: 'Created', 202: 'Accepted', 203: 'Non Authoritative Information', 204: 'No Content', 205: 'Reset Content', 206: 'Partial Content', 207: 'Multi Status', 226: 'IM Used', # see RFC 3229 300: 'Multiple Choices', 301: 'Moved Permanently', 302: 'Found', 303: 'See Other', 304: 'Not Modified', 305: 'Use Proxy', 307: 'Temporary Redirect', 400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', # unused 403: 'Forbidden', 404: 'Not Found', 405: 'Method Not Allowed', 406: 'Not Acceptable', 407: 'Proxy Authentication Required', 408: 'Request Timeout', 409: 'Conflict', 410: 'Gone', 411: 'Length Required', 412: 'Precondition Failed', 413: 'Request Entity Too Large', 414: 'Request URI Too Long', 415: 'Unsupported Media Type', 416: 'Requested Range Not Satisfiable', 417: 'Expectation Failed', 418: 'I\'m a teapot', # see RFC 2324 422: 'Unprocessable Entity', 423: 'Locked', 424: 'Failed Dependency', 426: 'Upgrade Required', 428: 'Precondition Required', # see RFC 6585 429: 'Too Many Requests', 431: 'Request Header Fields Too Large', 449: 'Retry With', # proprietary MS extension 500: 'Internal Server Error', 501: 'Not Implemented', 502: 'Bad Gateway', 503: 'Service Unavailable', 504: 'Gateway Timeout', 505: 'HTTP Version Not Supported', 507: 'Insufficient Storage', 510: 'Not Extended' } def wsgi_to_bytes(data): """coerce wsgi unicode represented bytes to real ones """ if isinstance(data, bytes): return data return data.encode('latin1') # XXX: utf8 fallback? def bytes_to_wsgi(data): assert isinstance(data, bytes), 'data must be bytes' if isinstance(data, str): return data else: return data.decode('latin1') def quote_header_value(value, extra_chars='', allow_token=True): """Quote a header value if necessary. .. versionadded:: 0.5 :param value: the value to quote. :param extra_chars: a list of extra characters to skip quoting. :param allow_token: if this is enabled token values are returned unchanged. """ if isinstance(value, bytes): value = bytes_to_wsgi(value) value = str(value) if allow_token: token_chars = _token_chars | set(extra_chars) if set(value).issubset(token_chars): return value return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. .. versionadded:: 0.5 :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value def dump_options_header(header, options): """The reverse function to :func:`parse_options_header`. :param header: the header to dump :param options: a dict of options to append. """ segments = [] if header is not None: segments.append(header) for key, value in iteritems(options): if value is None: segments.append(key) else: segments.append('%s=%s' % (key, quote_header_value(value))) return '; '.join(segments) def dump_header(iterable, allow_token=True): """Dump an HTTP header again. This is the reversal of :func:`parse_list_header`, :func:`parse_set_header` and :func:`parse_dict_header`. This also quotes strings that include an equals sign unless you pass it as dict of key, value pairs. >>> dump_header({'foo': 'bar baz'}) 'foo="bar baz"' >>> dump_header(('foo', 'bar baz')) 'foo, "bar baz"' :param iterable: the iterable or dict of values to quote. :param allow_token: if set to `False` tokens as values are disallowed. See :func:`quote_header_value` for more details. """ if isinstance(iterable, dict): items = [] for key, value in iteritems(iterable): if value is None: items.append(key) else: items.append('%s=%s' % ( key, quote_header_value(value, allow_token=allow_token) )) else: items = [quote_header_value(x, allow_token=allow_token) for x in iterable] return ', '.join(items) def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_dict_header(value, cls=dict): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict (or any other mapping object created from the type with a dict like interface provided by the `cls` arugment): >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. .. versionchanged:: 0.9 Added support for `cls` argument. :param value: a string with a dict header. :param cls: callable to use for storage of parsed results. :return: an instance of `cls` """ result = cls() if not isinstance(value, text_type): # XXX: validate value = bytes_to_wsgi(value) for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result def parse_options_header(value, multiple=False): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('text/html; charset=utf8') ('text/html', {'charset': 'utf8'}) This should not be used to parse ``Cache-Control`` like headers that use a slightly different format. For these headers use the :func:`parse_dict_header` function. .. versionadded:: 0.5 :param value: the header to parse. :param multiple: Whether try to parse and return multiple MIME types :return: (mimetype, options) or (mimetype, options, mimetype, options, …) if multiple=True """ if not value: return '', {} result = [] value = "," + value.replace("\n", ",") while value: match = _option_header_start_mime_type.match(value) if not match: break result.append(match.group(1)) # mimetype options = {} # Parse options rest = match.group(2) while rest: optmatch = _option_header_piece_re.match(rest) if not optmatch: break option, option_value = optmatch.groups() option = unquote_header_value(option) if option_value is not None: option_value = unquote_header_value( option_value, option == 'filename') options[option] = option_value rest = rest[optmatch.end():] result.append(options) if multiple is False: return tuple(result) value = rest return tuple(result) def parse_accept_header(value, cls=None): """Parses an HTTP Accept-* header. This does not implement a complete valid algorithm but one that supports at least value and quality extraction. Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` tuples sorted by the quality with some additional accessor methods). The second parameter can be a subclass of :class:`Accept` that is created with the parsed values and returned. :param value: the accept header string to be parsed. :param cls: the wrapper class for the return value (can be :class:`Accept` or a subclass thereof) :return: an instance of `cls`. """ if cls is None: cls = Accept if not value: return cls(None) result = [] for match in _accept_re.finditer(value): quality = match.group(2) if not quality: quality = 1 else: quality = max(min(float(quality), 1), 0) result.append((match.group(1), quality)) return cls(result) def parse_cache_control_header(value, on_update=None, cls=None): """Parse a cache control header. The RFC differs between response and request cache control, this method does not. It's your responsibility to not use the wrong control statements. .. versionadded:: 0.5 The `cls` was added. If not specified an immutable :class:`~werkzeug.datastructures.RequestCacheControl` is returned. :param value: a cache control header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.CacheControl` object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.RequestCacheControl` is used. :return: a `cls` object. """ if cls is None: cls = RequestCacheControl if not value: return cls(None, on_update) return cls(parse_dict_header(value), on_update) def parse_set_header(value, on_update=None): """Parse a set-like header and return a :class:`~werkzeug.datastructures.HeaderSet` object: >>> hs = parse_set_header('token, "quoted value"') The return value is an object that treats the items case-insensitively and keeps the order of the items: >>> 'TOKEN' in hs True >>> hs.index('quoted value') 1 >>> hs HeaderSet(['token', 'quoted value']) To create a header from the :class:`HeaderSet` again, use the :func:`dump_header` function. :param value: a set header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.HeaderSet` object is changed. :return: a :class:`~werkzeug.datastructures.HeaderSet` """ if not value: return HeaderSet(None, on_update) return HeaderSet(parse_list_header(value), on_update) def parse_authorization_header(value): """Parse an HTTP basic/digest authorization header transmitted by the web browser. The return value is either `None` if the header was invalid or not given, otherwise an :class:`~werkzeug.datastructures.Authorization` object. :param value: the authorization header to parse. :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. """ if not value: return value = wsgi_to_bytes(value) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: return if auth_type == b'basic': try: username, password = base64.b64decode(auth_info).split(b':', 1) except Exception: return return Authorization('basic', {'username': bytes_to_wsgi(username), 'password': bytes_to_wsgi(password)}) elif auth_type == b'digest': auth_map = parse_dict_header(auth_info) for key in 'username', 'realm', 'nonce', 'uri', 'response': if key not in auth_map: return if 'qop' in auth_map: if not auth_map.get('nc') or not auth_map.get('cnonce'): return return Authorization('digest', auth_map) def parse_www_authenticate_header(value, on_update=None): """Parse an HTTP WWW-Authenticate header into a :class:`~werkzeug.datastructures.WWWAuthenticate` object. :param value: a WWW-Authenticate header to parse. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.WWWAuthenticate` object is changed. :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object. """ if not value: return WWWAuthenticate(on_update=on_update) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except (ValueError, AttributeError): return WWWAuthenticate(value.strip().lower(), on_update=on_update) return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) def parse_if_range_header(value): """Parses an if-range header which can be an etag or a date. Returns a :class:`~werkzeug.datastructures.IfRange` object. .. versionadded:: 0.7 """ if not value: return IfRange() date = parse_date(value) if date is not None: return IfRange(date=date) # drop weakness information return IfRange(unquote_etag(value)[0]) def parse_range_header(value, make_inclusive=True): """Parses a range header into a :class:`~werkzeug.datastructures.Range` object. If the header is missing or malformed `None` is returned. `ranges` is a list of ``(start, stop)`` tuples where the ranges are non-inclusive. .. versionadded:: 0.7 """ if not value or '=' not in value: return None ranges = [] last_end = 0 units, rng = value.split('=', 1) units = units.strip().lower() for item in rng.split(','): item = item.strip() if '-' not in item: return None if item.startswith('-'): if last_end < 0: return None begin = int(item) end = None last_end = -1 elif '-' in item: begin, end = item.split('-', 1) begin = int(begin) if begin < last_end or last_end < 0: return None if end: end = int(end) + 1 if begin >= end: return None else: end = None last_end = end ranges.append((begin, end)) return Range(units, ranges) def parse_content_range_header(value, on_update=None): """Parses a range header into a :class:`~werkzeug.datastructures.ContentRange` object or `None` if parsing is not possible. .. versionadded:: 0.7 :param value: a content range header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.ContentRange` object is changed. """ if value is None: return None try: units, rangedef = (value or '').strip().split(None, 1) except ValueError: return None if '/' not in rangedef: return None rng, length = rangedef.split('/', 1) if length == '*': length = None elif length.isdigit(): length = int(length) else: return None if rng == '*': return ContentRange(units, None, None, length, on_update=on_update) elif '-' not in rng: return None start, stop = rng.split('-', 1) try: start = int(start) stop = int(stop) + 1 except ValueError: return None if is_byte_range_valid(start, stop, length): return ContentRange(units, start, stop, length, on_update=on_update) def quote_etag(etag, weak=False): """Quote an etag. :param etag: the etag to quote. :param weak: set to `True` to tag it "weak". """ if '"' in etag: raise ValueError('invalid etag') etag = '"%s"' % etag if weak: etag = 'W/' + etag return etag def unquote_etag(etag): """Unquote a single etag: >>> unquote_etag('W/"bar"') ('bar', True) >>> unquote_etag('"bar"') ('bar', False) :param etag: the etag identifier to unquote. :return: a ``(etag, weak)`` tuple. """ if not etag: return None, None etag = etag.strip() weak = False if etag.startswith(('W/', 'w/')): weak = True etag = etag[2:] if etag[:1] == etag[-1:] == '"': etag = etag[1:-1] return etag, weak def parse_etags(value): """Parse an etag header. :param value: the tag header to parse :return: an :class:`~werkzeug.datastructures.ETags` object. """ if not value: return ETags() strong = [] weak = [] end = len(value) pos = 0 while pos < end: match = _etag_re.match(value, pos) if match is None: break is_weak, quoted, raw = match.groups() if raw == '*': return ETags(star_tag=True) elif quoted: raw = quoted if is_weak: weak.append(raw) else: strong.append(raw) pos = match.end() return ETags(strong, weak) def generate_etag(data): """Generate an etag for some data.""" return md5(data).hexdigest() def parse_date(value): """Parse one of the following date formats into a datetime object: .. sourcecode:: text Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format If parsing fails the return value is `None`. :param value: a string with a supported date format. :return: a :class:`datetime.datetime` object. """ if value: t = parsedate_tz(value.strip()) if t is not None: try: year = t[0] # unfortunately that function does not tell us if two digit # years were part of the string, or if they were prefixed # with two zeroes. So what we do is to assume that 69-99 # refer to 1900, and everything below to 2000 if year >= 0 and year <= 68: year += 2000 elif year >= 69 and year <= 99: year += 1900 return datetime(*((year,) + t[1:7])) - \ timedelta(seconds=t[-1] or 0) except (ValueError, OverflowError): return None def _dump_date(d, delim): """Used for `http_date` and `cookie_date`.""" if d is None: d = gmtime() elif isinstance(d, datetime): d = d.utctimetuple() elif isinstance(d, (integer_types, float)): d = gmtime(d) return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % ( ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday], d.tm_mday, delim, ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')[d.tm_mon - 1], delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec ) def cookie_date(expires=None): """Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. :param expires: If provided that date is used, otherwise the current. """ return _dump_date(expires, '-') def http_date(timestamp=None): """Formats the time to match the RFC1123 date format. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. :param timestamp: If provided that date is used, otherwise the current. """ return _dump_date(timestamp, ' ') def is_resource_modified(environ, etag=None, data=None, last_modified=None): """Convenience method for conditional requests. :param environ: the WSGI environment of the request to be checked. :param etag: the etag for the response for comparison. :param data: or alternatively the data of the response to automatically generate an etag using :func:`generate_etag`. :param last_modified: an optional date of the last modification. :return: `True` if the resource was modified, otherwise `False`. """ if etag is None and data is not None: etag = generate_etag(data) elif data is not None: raise TypeError('both data and etag given') if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'): return False unmodified = False if isinstance(last_modified, string_types): last_modified = parse_date(last_modified) # ensure that microsecond is zero because the HTTP spec does not transmit # that either and we might have some false positives. See issue #39 if last_modified is not None: last_modified = last_modified.replace(microsecond=0) modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE')) if modified_since and last_modified and last_modified <= modified_since: unmodified = True if etag: if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH')) if if_none_match: # http://tools.ietf.org/html/rfc7232#section-3.2 # "A recipient MUST use the weak comparison function when comparing # entity-tags for If-None-Match" etag, _ = unquote_etag(etag) unmodified = if_none_match.contains_weak(etag) return not unmodified def remove_entity_headers(headers, allowed=('expires', 'content-location')): """Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent. .. versionchanged:: 0.5 added `allowed` parameter. :param headers: a list or :class:`Headers` object. :param allowed: a list of headers that should still be allowed even though they are entity headers. """ allowed = set(x.lower() for x in allowed) headers[:] = [(key, value) for key, value in headers if not is_entity_header(key) or key.lower() in allowed] def remove_hop_by_hop_headers(headers): """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place. .. versionadded:: 0.5 :param headers: a list or :class:`Headers` object. """ headers[:] = [(key, value) for key, value in headers if not is_hop_by_hop_header(key)] def is_entity_header(header): """Check if a header is an entity header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise. """ return header.lower() in _entity_headers def is_hop_by_hop_header(header): """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise. """ return header.lower() in _hop_by_hop_headers def parse_cookie(header, charset='utf-8', errors='replace', cls=None): """Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used. """ if isinstance(header, dict): header = header.get('HTTP_COOKIE', '') elif header is None: header = '' # If the value is an unicode string it's mangled through latin1. This # is done because on PEP 3333 on Python 3 all headers are assumed latin1 # which however is incorrect for cookies, which are sent in page encoding. # As a result we if isinstance(header, text_type): header = header.encode('latin1', 'replace') if cls is None: cls = TypeConversionDict def _parse_pairs(): for key, val in _cookie_parse_impl(header): key = to_unicode(key, charset, errors, allow_none_charset=True) val = to_unicode(val, charset, errors, allow_none_charset=True) yield try_coerce_native(key), val return cls(_parse_pairs()) def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, charset='utf-8', sync_expires=True): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) buf = [key + b'=' + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ((b'Domain', domain, True), (b'Expires', expires, False,), (b'Max-Age', max_age, False), (b'Secure', secure, None), (b'HttpOnly', httponly, None), (b'Path', path, False)): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b'=' + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b'; '.join(buf) if not PY2: rv = rv.decode('latin1') return rv def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length # circular dependency fun from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \ WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \ RequestCacheControl # DEPRECATED # backwards compatible imports from werkzeug.datastructures import ( # noqa MIMEAccept, CharsetAccept, LanguageAccept, Headers ) from werkzeug.urls import iri_to_uri
agpl-3.0
ravibhure/ansible
lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
45
5780
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_mon_entity short_description: Create or delete a Rackspace Cloud Monitoring entity description: - Create or delete a Rackspace Cloud Monitoring entity, which represents a device to monitor. Entities associate checks and alarms with a target system and provide a convenient, centralized place to store IP addresses. Rackspace monitoring module flow | *rax_mon_entity* -> rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm version_added: "2.0" options: label: description: - Defines a name for this entity. Must be a non-empty string between 1 and 255 characters long. required: true state: description: - Ensure that an entity with this C(name) exists or does not exist. choices: ["present", "absent"] agent_id: description: - Rackspace monitoring agent on the target device to which this entity is bound. Necessary to collect C(agent.) rax_mon_checks against this entity. named_ip_addresses: description: - Hash of IP addresses that may be referenced by name by rax_mon_checks added to this entity. Must be a dictionary of with keys that are names between 1 and 64 characters long, and values that are valid IPv4 or IPv6 addresses. metadata: description: - Hash of arbitrary C(name), C(value) pairs that are passed to associated rax_mon_alarms. Names and values must all be between 1 and 255 characters long. author: Ash Wilson extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' - name: Entity example gather_facts: False hosts: local connection: local tasks: - name: Ensure an entity exists rax_mon_entity: credentials: ~/.rax_pub state: present label: my_entity named_ip_addresses: web_box: 192.0.2.4 db_box: 192.0.2.5 meta: hurf: durf register: the_entity ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata): if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') changed = False cm = pyrax.cloud_monitoring if not cm: module.fail_json(msg='Failed to instantiate client. This typically ' 'indicates an invalid region or an incorrectly ' 'capitalized region name.') existing = [] for entity in cm.list_entities(): if label == entity.label: existing.append(entity) entity = None if existing: entity = existing[0] if state == 'present': should_update = False should_delete = False should_create = False if len(existing) > 1: module.fail_json(msg='%s existing entities have the label %s.' % (len(existing), label)) if entity: if named_ip_addresses and named_ip_addresses != entity.ip_addresses: should_delete = should_create = True # Change an existing Entity, unless there's nothing to do. should_update = agent_id and agent_id != entity.agent_id or \ (metadata and metadata != entity.metadata) if should_update and not should_delete: entity.update(agent_id, metadata) changed = True if should_delete: entity.delete() else: should_create = True if should_create: # Create a new Entity. entity = cm.create_entity(label=label, agent=agent_id, ip_addresses=named_ip_addresses, metadata=metadata) changed = True else: # Delete the existing Entities. for e in existing: e.delete() changed = True if entity: entity_dict = { "id": entity.id, "name": entity.name, "agent_id": entity.agent_id, } module.exit_json(changed=changed, entity=entity_dict) else: module.exit_json(changed=changed) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), agent_id=dict(), named_ip_addresses=dict(type='dict', default={}), metadata=dict(type='dict', default={}) ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') state = module.params.get('state') label = module.params.get('label') agent_id = module.params.get('agent_id') named_ip_addresses = module.params.get('named_ip_addresses') metadata = module.params.get('metadata') setup_rax_module(module, pyrax) cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) if __name__ == '__main__': main()
gpl-3.0
eneldoserrata/marcos_openerp
addons/document/report/__init__.py
444
1068
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import document_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
pranner/CMPUT410-Lab6-Django
v1/lib/python2.7/site-packages/django/views/i18n.py
68
11000
import importlib import json import os import gettext as gettext_module from django import http from django.apps import apps from django.conf import settings from django.template import Context, Template from django.utils.translation import check_for_language, to_locale, get_language, LANGUAGE_SESSION_KEY from django.utils.encoding import smart_text from django.utils.formats import get_format_modules, get_format from django.utils._os import upath from django.utils.http import is_safe_url from django.utils import six def set_language(request): """ Redirect to a given url while setting the chosen language in the session or cookie. The url and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next = request.POST.get('next', request.GET.get('next')) if not is_safe_url(url=next, host=request.get_host()): next = request.META.get('HTTP_REFERER') if not is_safe_url(url=next, host=request.get_host()): next = '/' response = http.HttpResponseRedirect(next) if request.method == 'POST': lang_code = request.POST.get('language', None) if lang_code and check_for_language(lang_code): if hasattr(request, 'session'): request.session[LANGUAGE_SESSION_KEY] = lang_code else: response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN) return response def get_formats(): """ Returns all formats strings required for i18n to work """ FORMAT_SETTINGS = ( 'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS' ) result = {} for module in [settings] + get_format_modules(reverse=True): for attr in FORMAT_SETTINGS: result[attr] = get_format(attr) formats = {} for k, v in result.items(): if isinstance(v, (six.string_types, int)): formats[k] = smart_text(v) elif isinstance(v, (tuple, list)): formats[k] = [smart_text(value) for value in v] return formats js_catalog_template = r""" {% autoescape off %} (function (globals) { var django = globals.django || (globals.django = {}); {% if plural %} django.pluralidx = function (n) { var v={{ plural }}; if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; {% else %} django.pluralidx = function (count) { return (count == 1) ? 0 : 1; }; {% endif %} {% if catalog_str %} /* gettext library */ django.catalog = {{ catalog_str }}; django.gettext = function (msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function (singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined') { return (count == 1) ? singular : plural; } else { return value[django.pluralidx(count)]; } }; django.gettext_noop = function (msgid) { return msgid; }; django.pgettext = function (context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.indexOf('\x04') != -1) { value = msgid; } return value; }; django.npgettext = function (context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.indexOf('\x04') != -1) { value = django.ngettext(singular, plural, count); } return value; }; {% else %} /* gettext identity library */ django.gettext = function (msgid) { return msgid; }; django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; }; django.gettext_noop = function (msgid) { return msgid; }; django.pgettext = function (context, msgid) { return msgid; }; django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; }; {% endif %} django.interpolate = function (fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = {{ formats_str }}; django.get_format = function (format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; }(this)); {% endautoescape %} """ def render_javascript_catalog(catalog=None, plural=None): template = Template(js_catalog_template) indent = lambda s: s.replace('\n', '\n ') context = Context({ 'catalog_str': indent(json.dumps( catalog, sort_keys=True, indent=2)) if catalog else None, 'formats_str': indent(json.dumps( get_formats(), sort_keys=True, indent=2)), 'plural': plural, }) return http.HttpResponse(template.render(context), 'text/javascript') def get_javascript_catalog(locale, domain, packages): default_locale = to_locale(settings.LANGUAGE_CODE) app_configs = apps.get_app_configs() allowable_packages = set(app_config.name for app_config in app_configs) allowable_packages.add('django.conf') packages = [p for p in packages if p in allowable_packages] t = {} paths = [] en_selected = locale.startswith('en') en_catalog_missing = True # paths of requested packages for package in packages: p = importlib.import_module(package) path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale') paths.append(path) # add the filesystem paths listed in the LOCALE_PATHS setting paths.extend(list(reversed(settings.LOCALE_PATHS))) # first load all english languages files for defaults for path in paths: try: catalog = gettext_module.translation(domain, path, ['en']) t.update(catalog._catalog) except IOError: pass else: # 'en' is the selected language and at least one of the packages # listed in `packages` has an 'en' catalog if en_selected: en_catalog_missing = False # next load the settings.LANGUAGE_CODE translations if it isn't english if default_locale != 'en': for path in paths: try: catalog = gettext_module.translation(domain, path, [default_locale]) except IOError: catalog = None if catalog is not None: t.update(catalog._catalog) # last load the currently selected language, if it isn't identical to the default. if locale != default_locale: # If the currently selected language is English but it doesn't have a # translation catalog (presumably due to being the language translated # from) then a wrong language catalog might have been loaded in the # previous step. It needs to be discarded. if en_selected and en_catalog_missing: t = {} else: locale_t = {} for path in paths: try: catalog = gettext_module.translation(domain, path, [locale]) except IOError: catalog = None if catalog is not None: locale_t.update(catalog._catalog) if locale_t: t = locale_t plural = None if '' in t: for l in t[''].split('\n'): if l.startswith('Plural-Forms:'): plural = l.split(':', 1)[1].strip() if plural is not None: # this should actually be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1] pdict = {} maxcnts = {} catalog = {} for k, v in t.items(): if k == '': continue if isinstance(k, six.string_types): catalog[k] = v elif isinstance(k, tuple): msgid = k[0] cnt = k[1] maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0)) pdict.setdefault(msgid, {})[cnt] = v else: raise TypeError(k) for k, v in pdict.items(): catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)] return catalog, plural def null_javascript_catalog(request, domain=None, packages=None): """ Returns "identity" versions of the JavaScript i18n functions -- i.e., versions that don't actually do anything. """ return render_javascript_catalog() def javascript_catalog(request, domain='djangojs', packages=None): """ Returns the selected language catalog as a javascript library. Receives the list of packages to check for translations in the packages parameter either from an infodict or as a +-delimited string from the request. Default is 'django.conf'. Additionally you can override the gettext domain for this view, but usually you don't want to do that, as JavaScript messages go to the djangojs domain. But this might be needed if you deliver your JavaScript source from Django templates. """ locale = to_locale(get_language()) if request.GET and 'language' in request.GET: if check_for_language(request.GET['language']): locale = to_locale(request.GET['language']) if packages is None: packages = ['django.conf'] if isinstance(packages, six.string_types): packages = packages.split('+') catalog, plural = get_javascript_catalog(locale, domain, packages) return render_javascript_catalog(catalog, plural)
apache-2.0
waelrash1/or-tools
examples/python/linear_programming.py
19
4869
# Copyright 2010-2014 Google # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Linear programming examples that show how to use the APIs.""" from google.apputils import app from ortools.linear_solver import linear_solver2_pb2 from ortools.linear_solver import pywraplp def RunLinearExampleNaturalLanguageAPI(optimization_problem_type): """Example of simple linear program with natural language API.""" solver = pywraplp.Solver('RunLinearExampleNaturalLanguageAPI', optimization_problem_type) infinity = solver.infinity() # x1, x2 and x3 are continuous non-negative variables. x1 = solver.NumVar(0.0, infinity, 'x1') x2 = solver.NumVar(0.0, infinity, 'x2') x3 = solver.NumVar(0.0, infinity, 'x3') solver.Maximize(10 * x1 + 6 * x2 + 4 * x3) c0 = solver.Add(10 * x1 + 4 * x2 + 5 * x3 <= 600, 'ConstraintName0') c1 = solver.Add(2 * x1 + 2 * x2 + 6 * x3 <= 300) sum_of_vars = sum([x1, x2, x3]) c2 = solver.Add(sum_of_vars <= 100.0, 'OtherConstraintName') SolveAndPrint(solver, [x1, x2, x3], [c0, c1, c2]) # Print a linear expression's solution value. print 'Sum of vars: %s = %s' % (sum_of_vars, sum_of_vars.solution_value()) def RunLinearExampleCppStyleAPI(optimization_problem_type): """Example of simple linear program with the C++ style API.""" solver = pywraplp.Solver('RunLinearExampleCppStyle', optimization_problem_type) infinity = solver.infinity() # x1, x2 and x3 are continuous non-negative variables. x1 = solver.NumVar(0.0, infinity, 'x1') x2 = solver.NumVar(0.0, infinity, 'x2') x3 = solver.NumVar(0.0, infinity, 'x3') # Maximize 10 * x1 + 6 * x2 + 4 * x3. objective = solver.Objective() objective.SetCoefficient(x1, 10) objective.SetCoefficient(x2, 6) objective.SetCoefficient(x3, 4) objective.SetMaximization() # x1 + x2 + x3 <= 100. c0 = solver.Constraint(-infinity, 100.0, 'c0') c0.SetCoefficient(x1, 1) c0.SetCoefficient(x2, 1) c0.SetCoefficient(x3, 1) # 10 * x1 + 4 * x2 + 5 * x3 <= 600. c1 = solver.Constraint(-infinity, 600.0, 'c1') c1.SetCoefficient(x1, 10) c1.SetCoefficient(x2, 4) c1.SetCoefficient(x3, 5) # 2 * x1 + 2 * x2 + 6 * x3 <= 300. c2 = solver.Constraint(-infinity, 300.0, 'c2') c2.SetCoefficient(x1, 2) c2.SetCoefficient(x2, 2) c2.SetCoefficient(x3, 6) SolveAndPrint(solver, [x1, x2, x3], [c0, c1, c2]) def SolveAndPrint(solver, variable_list, constraint_list): """Solve the problem and print the solution.""" print 'Number of variables = %d' % solver.NumVariables() print 'Number of constraints = %d' % solver.NumConstraints() result_status = solver.Solve() # The problem has an optimal solution. assert result_status == pywraplp.Solver.OPTIMAL # The solution looks legit (when using solvers others than # GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!). assert solver.VerifySolution(1e-7, True) print 'Problem solved in %f milliseconds' % solver.wall_time() # The objective value of the solution. print 'Optimal objective value = %f' % solver.Objective().Value() # The value of each variable in the solution. for variable in variable_list: print '%s = %f' % (variable.name(), variable.solution_value()) print 'Advanced usage:' print 'Problem solved in %d iterations' % solver.iterations() for variable in variable_list: print '%s: reduced cost = %f' % (variable.name(), variable.reduced_cost()) for i, constraint in enumerate(constraint_list): print ('constraint %d: dual value = %f\n' ' activity = %f' % (i, constraint.dual_value(), constraint.activity())) def main(unused_argv): all_names_and_problem_types = ( linear_solver2_pb2.MPModelRequest.SolverType.items()) for name, problem_type in all_names_and_problem_types: # Skip non-LP problem types. if not name.endswith('LINEAR_PROGRAMMING'): continue # Skip problem types that aren't supported by the current binary. if not pywraplp.Solver.SupportsProblemType(problem_type): continue print '\n------ Linear programming example with %s ------' % name print '\n*** Natural language API ***' RunLinearExampleNaturalLanguageAPI(problem_type) print '\n*** C++ style API ***' RunLinearExampleCppStyleAPI(problem_type) if __name__ == '__main__': app.run()
apache-2.0
rrauenza/factory_boy
factory/fuzzy.py
1
9786
# -*- coding: utf-8 -*- # Copyright: See the LICENSE file. """Additional declarations for "fuzzy" attribute definitions.""" from __future__ import unicode_literals import datetime import decimal import string import warnings from . import compat from . import declarations from . import random random_seed_warning = ( "Setting a specific random seed for {} can still have varying results " "unless you also set a specific end date. For details and potential solutions " "see https://github.com/FactoryBoy/factory_boy/issues/331" ) def get_random_state(): warnings.warn( "`factory.fuzzy.get_random_state` is deprecated. " "You should use `factory.random.get_random_state` instead", DeprecationWarning, stacklevel=2 ) return random.get_random_state() def set_random_state(state): warnings.warn( "`factory.fuzzy.set_random_state` is deprecated. " "You should use `factory.random.set_random_state` instead", DeprecationWarning, stacklevel=2 ) return random.set_random_state(state) def reseed_random(seed): warnings.warn( "`factory.fuzzy.set_random_state` is deprecated. " "You should use `factory.random.reseed_random` instead", DeprecationWarning, stacklevel=2 ) random.reseed_random(seed) class BaseFuzzyAttribute(declarations.BaseDeclaration): """Base class for fuzzy attributes. Custom fuzzers should override the `fuzz()` method. """ def fuzz(self): # pragma: no cover raise NotImplementedError() def evaluate(self, instance, step, extra): return self.fuzz() class FuzzyAttribute(BaseFuzzyAttribute): """Similar to LazyAttribute, but yields random values. Attributes: function (callable): function taking no parameters and returning a random value. """ def __init__(self, fuzzer, **kwargs): super(FuzzyAttribute, self).__init__(**kwargs) self.fuzzer = fuzzer def fuzz(self): return self.fuzzer() class FuzzyText(BaseFuzzyAttribute): """Random string with a given prefix. Generates a random string of the given length from chosen chars. If a prefix or a suffix are supplied, they will be prepended / appended to the generated string. Args: prefix (text): An optional prefix to prepend to the random string length (int): the length of the random part suffix (text): An optional suffix to append to the random string chars (str list): the chars to choose from Useful for generating unique attributes where the exact value is not important. """ def __init__(self, prefix='', length=12, suffix='', chars=string.ascii_letters, **kwargs): super(FuzzyText, self).__init__(**kwargs) self.prefix = prefix self.suffix = suffix self.length = length self.chars = tuple(chars) # Unroll iterators def fuzz(self): chars = [random.randgen.choice(self.chars) for _i in range(self.length)] return self.prefix + ''.join(chars) + self.suffix class FuzzyChoice(BaseFuzzyAttribute): """Handles fuzzy choice of an attribute. Args: choices (iterable): An iterable yielding options; will only be unrolled on the first call. """ def __init__(self, choices, **kwargs): self.choices = None self.choices_generator = choices super(FuzzyChoice, self).__init__(**kwargs) def fuzz(self): if self.choices is None: self.choices = list(self.choices_generator) return random.randgen.choice(self.choices) class FuzzyInteger(BaseFuzzyAttribute): """Random integer within a given range.""" def __init__(self, low, high=None, step=1, **kwargs): if high is None: high = low low = 0 self.low = low self.high = high self.step = step super(FuzzyInteger, self).__init__(**kwargs) def fuzz(self): return random.randgen.randrange(self.low, self.high + 1, self.step) class FuzzyDecimal(BaseFuzzyAttribute): """Random decimal within a given range.""" def __init__(self, low, high=None, precision=2, **kwargs): if high is None: high = low low = 0.0 self.low = low self.high = high self.precision = precision super(FuzzyDecimal, self).__init__(**kwargs) def fuzz(self): base = decimal.Decimal(str(random.randgen.uniform(self.low, self.high))) return base.quantize(decimal.Decimal(10) ** -self.precision) class FuzzyFloat(BaseFuzzyAttribute): """Random float within a given range.""" def __init__(self, low, high=None, **kwargs): if high is None: high = low low = 0 self.low = low self.high = high super(FuzzyFloat, self).__init__(**kwargs) def fuzz(self): return random.randgen.uniform(self.low, self.high) class FuzzyDate(BaseFuzzyAttribute): """Random date within a given date range.""" def __init__(self, start_date, end_date=None, **kwargs): super(FuzzyDate, self).__init__(**kwargs) if end_date is None: if random.randgen.state_set: cls_name = self.__class__.__name__ warnings.warn(random_seed_warning.format(cls_name), stacklevel=2) end_date = datetime.date.today() if start_date > end_date: raise ValueError( "FuzzyDate boundaries should have start <= end; got %r > %r." % (start_date, end_date)) self.start_date = start_date.toordinal() self.end_date = end_date.toordinal() def fuzz(self): return datetime.date.fromordinal(random.randgen.randint(self.start_date, self.end_date)) class BaseFuzzyDateTime(BaseFuzzyAttribute): """Base class for fuzzy datetime-related attributes. Provides fuzz() computation, forcing year/month/day/hour/... """ def _check_bounds(self, start_dt, end_dt): if start_dt > end_dt: raise ValueError( """%s boundaries should have start <= end, got %r > %r""" % ( self.__class__.__name__, start_dt, end_dt)) def _now(self): raise NotImplementedError() def __init__(self, start_dt, end_dt=None, force_year=None, force_month=None, force_day=None, force_hour=None, force_minute=None, force_second=None, force_microsecond=None, **kwargs): super(BaseFuzzyDateTime, self).__init__(**kwargs) if end_dt is None: if random.randgen.state_set: cls_name = self.__class__.__name__ warnings.warn(random_seed_warning.format(cls_name), stacklevel=2) end_dt = self._now() self._check_bounds(start_dt, end_dt) self.start_dt = start_dt self.end_dt = end_dt self.force_year = force_year self.force_month = force_month self.force_day = force_day self.force_hour = force_hour self.force_minute = force_minute self.force_second = force_second self.force_microsecond = force_microsecond def fuzz(self): delta = self.end_dt - self.start_dt microseconds = delta.microseconds + 1000000 * (delta.seconds + (delta.days * 86400)) offset = random.randgen.randint(0, microseconds) result = self.start_dt + datetime.timedelta(microseconds=offset) if self.force_year is not None: result = result.replace(year=self.force_year) if self.force_month is not None: result = result.replace(month=self.force_month) if self.force_day is not None: result = result.replace(day=self.force_day) if self.force_hour is not None: result = result.replace(hour=self.force_hour) if self.force_minute is not None: result = result.replace(minute=self.force_minute) if self.force_second is not None: result = result.replace(second=self.force_second) if self.force_microsecond is not None: result = result.replace(microsecond=self.force_microsecond) return result class FuzzyNaiveDateTime(BaseFuzzyDateTime): """Random naive datetime within a given range. If no upper bound is given, will default to datetime.datetime.utcnow(). """ def _now(self): return datetime.datetime.now() def _check_bounds(self, start_dt, end_dt): if start_dt.tzinfo is not None: raise ValueError( "FuzzyNaiveDateTime only handles naive datetimes, got start=%r" % start_dt) if end_dt.tzinfo is not None: raise ValueError( "FuzzyNaiveDateTime only handles naive datetimes, got end=%r" % end_dt) super(FuzzyNaiveDateTime, self)._check_bounds(start_dt, end_dt) class FuzzyDateTime(BaseFuzzyDateTime): """Random timezone-aware datetime within a given range. If no upper bound is given, will default to datetime.datetime.now() If no timezone is given, will default to utc. """ def _now(self): return datetime.datetime.now(tz=compat.UTC) def _check_bounds(self, start_dt, end_dt): if start_dt.tzinfo is None: raise ValueError( "FuzzyDateTime requires timezone-aware datetimes, got start=%r" % start_dt) if end_dt.tzinfo is None: raise ValueError( "FuzzyDateTime requires timezone-aware datetimes, got end=%r" % end_dt) super(FuzzyDateTime, self)._check_bounds(start_dt, end_dt)
mit