repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
openweave/happy | bin/happy-node-edit.py | 1 | 1893 | #!/usr/bin/env python3
#
# Copyright (c) 2021 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# A Happy command line utility to modify an existing virtual node.
#
# The command is executed by instantiating and running HappyNodeEdit class.
#
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import happy.HappyNodeEdit
from happy.Utils import *
if __name__ == "__main__":
options = happy.HappyNodeEdit.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:n:qasl",
["help", "id=", "new=", "quiet"])
except getopt.GetoptError as err:
print(happy.HappyNodeEdit.HappyNodeEdit.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed to parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print(happy.HappyNodeEdit.HappyNodeEdit.__doc__)
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-i", "--id"):
options["node_id"] = a
elif o in ("-n", "--new"):
options["new_node_id"] = a
else:
assert False, "unhandled option"
cmd = happy.HappyNodeEdit.HappyNodeEdit(options)
cmd.start()
| apache-2.0 |
LEDS/millennium-falcon | millenium_falcon_old/gestao/models.py | 1 | 2775 | from __future__ import unicode_literals
from django.db import models
class Parceiro(models.Model):
#Na verdade é o Cliente com nome mais bonito
nome_parceiro = models.CharField(max_length=255)
nome_representante = models.CharField(max_length=255)
telefone = models.CharField(max_length=255)
email = models.CharField(max_length=255, null=True)
cnpj = models.CharField(max_length=255, null=True)
def __str__(self):
return self.nome_parceiro
class Projeto(models.Model):
nome_projeto = models.CharField(max_length=255)
data_inicio = models.DateField()
data_fim = models.DateField(blank=True, null=True)
parceiro_envolvido = models.ForeignKey(Parceiro, on_delete=models.CASCADE, null=True)
pessoas_envolvidas = models.ManyToManyField("Pessoa")
def __str__(self):
return self.nome_projeto
class Fomento(models.Model):
nome = models.CharField(max_length=255)
parceiro = models.ForeignKey("Parceiro", on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Bolsa(models.Model):
nome_bolsa = models.CharField(max_length=255, blank=True)
fomento = models.ForeignKey("Fomento", on_delete=models.CASCADE)
valor_bolsa = models.IntegerField(default=0)
projeto = models.ForeignKey("Projeto", on_delete=models.CASCADE)
def __str__(self):
return self.nome_bolsa
class Alocacao_bolsa(models.Model):
bolsa = models.ForeignKey("Bolsa", null=True)
inicio = models.DateField(null=True)
fim = models.DateField(null=True)
pessoa = models.ForeignKey("Pessoa", null=True)
class Pessoa(models.Model):
nome = models.CharField(max_length=255)
papel = models.CharField(max_length=255)
entrada_leds = models.DateField()
saida_leds = models.DateField(blank=True, null=True)
projeto_envolvido = models.ManyToManyField(Projeto)
email = models.CharField(max_length=255, null=True)
telefone = models.CharField(max_length=255, null=True)
bolsa = models.ForeignKey("Bolsa",on_delete=models.CASCADE, null=True)
def __str__(self):
return self.nome
class Aluno(Pessoa):
periodo_atual = models.IntegerField()
periodo_saida = models.IntegerField(blank=True, null=True)
data_nascimento = models.DateField(null=True)
lattes = models.URLField(max_length=255, null=True)
cpf = models.CharField(max_length=11,default="")
habilidades = models.ManyToManyField("Habilidade")
class Professor(Pessoa):
setor_vinculado = models.CharField(max_length=255, null=True)
class Servidor(Pessoa):
setor_vinculado = models.CharField(max_length=255, null=True)
class Habilidade(models.Model):
skill = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.skill
| apache-2.0 |
rjsproxy/wagtail | wagtail/wagtailusers/views/groups.py | 11 | 4499 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.models import Group
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.utils import permission_required, any_permission_required
from wagtail.wagtailusers.forms import GroupForm, GroupPagePermissionFormSet
@any_permission_required('auth.add_group', 'auth.change_group', 'auth.delete_group')
@vary_on_headers('X-Requested-With')
def index(request):
q = None
p = request.GET.get("p", 1)
is_searching = False
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search groups"))
if form.is_valid():
q = form.cleaned_data['q']
is_searching = True
groups = Group.objects.filter(name__icontains=q)
else:
form = SearchForm(placeholder=_("Search groups"))
if not is_searching:
groups = Group.objects
groups = groups.order_by('name')
if 'ordering' in request.GET:
ordering = request.GET['ordering']
if ordering in ['name', 'username']:
if ordering != 'name':
groups = groups.order_by(ordering)
else:
ordering = 'name'
paginator = Paginator(groups, 20)
try:
groups = paginator.page(p)
except PageNotAnInteger:
groups = paginator.page(1)
except EmptyPage:
groups = paginator.page(paginator.num_pages)
if request.is_ajax():
return render(request, "wagtailusers/groups/results.html", {
'groups': groups,
'is_searching': is_searching,
'query_string': q,
'ordering': ordering,
})
else:
return render(request, "wagtailusers/groups/index.html", {
'search_form': form,
'groups': groups,
'is_searching': is_searching,
'ordering': ordering,
'query_string': q,
})
@permission_required('auth.add_group')
def create(request):
if request.POST:
form = GroupForm(request.POST)
formset = GroupPagePermissionFormSet(request.POST)
if form.is_valid() and formset.is_valid():
group = form.save()
formset.instance = group
formset.save()
messages.success(request, _("Group '{0}' created.").format(group), buttons=[
messages.button(reverse('wagtailusers_groups:edit', args=(group.id,)), _('Edit'))
])
return redirect('wagtailusers_groups:index')
else:
messages.error(request, _("The group could not be created due to errors."))
else:
form = GroupForm()
formset = GroupPagePermissionFormSet()
return render(request, 'wagtailusers/groups/create.html', {
'form': form,
'formset': formset,
})
@permission_required('auth.change_group')
def edit(request, group_id):
group = get_object_or_404(Group, id=group_id)
if request.POST:
form = GroupForm(request.POST, instance=group)
formset = GroupPagePermissionFormSet(request.POST, instance=group)
if form.is_valid() and formset.is_valid():
group = form.save()
formset.save()
messages.success(request, _("Group '{0}' updated.").format(group), buttons=[
messages.button(reverse('wagtailusers_groups:edit', args=(group.id,)), _('Edit'))
])
return redirect('wagtailusers_groups:index')
else:
messages.error(request, _("The group could not be saved due to errors."))
else:
form = GroupForm(instance=group)
formset = GroupPagePermissionFormSet(instance=group)
return render(request, 'wagtailusers/groups/edit.html', {
'group': group,
'form': form,
'formset': formset,
})
@permission_required('auth.delete_group')
def delete(request, group_id):
group = get_object_or_404(Group, id=group_id)
if request.POST:
group.delete()
messages.success(request, _("Group '{0}' deleted.").format(group.name))
return redirect('wagtailusers_groups:index')
return render(request, "wagtailusers/groups/confirm_delete.html", {
'group': group,
})
| bsd-3-clause |
huanghao/mic | mic/3rdparty/pykickstart/handlers/fc5.py | 10 | 1066 | #
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import *
from pykickstart.version import *
class FC5Handler(BaseHandler):
version = FC5
| gpl-2.0 |
HugoMMRabson/fonsa | src/my/installer/copyintomaple/die.py | 1 | 1324 | #!/usr/bin/python3
'''
Created on Aug 18, 2019
@author: johnrabsonjr
'''
# potential colors are WHITE, PERIWINKLE, YELLOW, GREEN, VIOLET, BLUE, RED, BLACK
import os
from time import sleep
from atxraspisettings import *
os.system('systemctl stop atxraspihello') # Stop the shutdownirq.py script daemon
setup_gpio()
MyLEDController.set_default_led_color(BLUE) # MyLEDController.set_this_group_as_default([(WHITE, 0.1), (PERIWINKLE, 0.1), (VIOLET, 0.2), (YELLOW, 0.2), (RED, 0.2)])
GPIO.output(ATXRASPI_SOFTBTN, 1) # Tell the ATXRaspi board to initiate a shutdown
os.system('sync;sync;sync'); sleep(1)
os.system('sync;sync;sync'); sleep(1)
MyLEDController.set_default_led_color(YELLOW) # MyLEDController.set_this_group_as_default([(VIOLET, 0.2), (YELLOW, 0.2), (RED, 0.2)])
GPIO.output(BOOTOK, 0) # Tell the ATXRaspi board, it's OK to shut down (the board won't shut me down until BOOTOK is LOW)
os.system('sync;sync;sync'); sleep(1)
MyLEDController.set_default_led_color(RED) # MyLEDController.set_this_group_as_default([(YELLOW, 0.2), (RED, 0.2)])
os.system('sync;sync;sync'); sleep(1)
MyLEDController.set_default_led_color(BLACK)
if MAPLEDRIVE:
os.system('sync;sync;sync')
os.system('echo u > /proc/sysrq-trigger') # Sync filesystem forcibly
sleep(2)
poweroff_now(reboot=True)
sleep(10)
| gpl-3.0 |
olomix/django-navbar | navbar/utils.py | 3 | 2177 |
def _Qperm(user=None):
from django.db.models.query import Q
exQ = Q()
if user is None or user.is_anonymous():
exQ = (Q(user_type__exact = 'A') | Q(user_type__exact = 'E')) & Q(
groups__isnull=True)
elif user.is_superuser:
exQ = ~Q(user_type__exact = 'A')
elif user.is_staff:
exQ = (Q(user_type__exact = 'E') | Q(user_type__exact = 'L') |
Q(user_type__exact = 'S')) & (
Q(groups__in=user.groups.all()) | Q(groups__isnull=True))
else:
exQ = (Q(user_type__exact = 'E') | Q(user_type__exact = 'L')) & (
Q(groups__in=user.groups.all()) | Q(groups__isnull=True))
return exQ
def generate_navtree(user=None, maxdepth=-1):
from models import NavBarEntry
if maxdepth == 0: return [] ## silly...
permQ = _Qperm(user)
urls = {}
def navent(ent, invdepth, parent):
current = {'name': ent.name, 'title': ent.title, 'url': ent.url,
'selected': False, 'path_type': ent.path_type, 'parent': parent}
urls.setdefault(ent.url, current)
current['children'] = navlevel(ent.children, invdepth-1, current)
return current
def navlevel(base, invdepth, parent=None):
if invdepth == 0 : return []
return [ navent(ent, invdepth, parent)
for ent in base.filter(permQ).distinct() ]
tree = navlevel(NavBarEntry.top, maxdepth)
urls = sorted(urls.iteritems(), key=lambda x: x[0], reverse=True)
return {'tree': tree, 'byurl': urls}
def get_navtree(user=None, maxdepth=-1):
import models
cachename = 'site_navtree'
timeout = 60*60*24
if user is not None and not user.is_anonymous():
if user.is_superuser:
cachename = 'site_navtree_super'
else:
cachename = 'site_navtree_' + str(user.id)
timeout = 60*15
data = models.cache.get(cachename)
if data is None:
data = generate_navtree(user, maxdepth)
models.cache.set(cachename, data, timeout)
return data
def get_navbar(user=None):
from models import NavBarEntry
return NavBarEntry.top.filter(_Qperm(user))
| mit |
wooster/django-topsoil | topsoil/middleware.py | 1 | 2874 | from django.conf import settings
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import AnonymousUser
from django.utils.translation import ugettext as _
from oauth.oauth import OAuthError
from oauth_provider.decorators import CheckOAuth
from oauth_provider.utils import initialize_server_request, send_oauth_error
from utils import default_is_request_api, default_is_request_oauth
class ApiCsrfExemptionMiddleware(object):
"""If a request is an API request, this will attempt to exempt it
from CSRF protection.
This should only be used with corresponding middleware to disable
cookie-based authentication on API requests.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
if getattr(view_func, 'csrf_exempt', False):
return None
if getattr(request, 'csrf_processing_done', False):
return None
if default_is_request_api(request):
request.csrf_processing_done = True
return None
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
elif default_is_request_api(request):
response.csrf_processing_done = True
return response
class LazyAnonUser(object):
def __get__(self, request, obj_type=None):
from django.contrib.auth.models import AnonymousUser
if not hasattr(request, '_cached_user'):
request._cached_user = AnonymousUser()
return request._cached_user
class ApiOauthMiddleware(object):
"""Middleware which attempts to use OAuth authentication on API
requests and cookie-based authentication on normal requests."""
auth_middleware = None
def __init__(self):
self.auth_middleware = AuthenticationMiddleware()
def process_request(self, request):
if not default_is_request_api(request):
return self.auth_middleware.process_request(request)
return None
def process_view(self, request, view_func, view_args, view_kwargs):
if default_is_request_api(request):
request.__class__.user = LazyAnonUser()
resource_name = getattr(request, 'oauth_resource_name', None)
if CheckOAuth.is_valid_request(request):
try:
consumer, token, parameters = CheckOAuth.validate_token(request)
except OAuthError, e:
return None
#!! ??return send_oauth_error(e)
if resource_name and token.resource.name != resource_name:
return send_oauth_error(OAuthError(_('You are not allowed to access this resource.')))
elif consumer and token:
if token.user:
request.__class__.user = token.user
return None
| bsd-3-clause |
timsnyder/bokeh | examples/reference/models/multi_select_server.py | 1 | 1279 | ## Bokeh server for MultiSelect
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import MultiSelect
from bokeh.plotting import figure
x=[3,4,6,12,10,1]
y=[7,1,3,4,1,6]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange']
df=pd.DataFrame({'x':x,'y':y,'label':label}) #create a dataframe for future use
source = ColumnDataSource(data=dict(x=x, y=y,label=label))
plot_figure = figure(title='Multi-Select',plot_height=450, plot_width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y',color='label', source=source, size=10)
multi_select = MultiSelect(title="Filter Plot by color:", value=["Red", "Orange"],
options=[("Red", "Red"), ("Orange", "Orange")])
def multiselect_click(attr,old,new):
active_mselect=multi_select.value ##Getting multi-select value
selected_df=df[df['label'].isin(active_mselect)] #filter the dataframe with value in multi-select
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
multi_select.on_change('value',multiselect_click)
layout=row(multi_select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Multi-Select Bokeh Server"
| bsd-3-clause |
wunderlins/learning | python/django/lib/python2.7/site-packages/setuptools/msvc9_support.py | 429 | 2187 | try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
| gpl-2.0 |
9929105/KEEP | keep_backend/vocab/vocab_to_mongo.py | 2 | 3250 | '''
This is a standalone script that is to be
used to load the vocab into the server's
mongo database. This only needs to be run
once, with the appropriate vocabulary on the
filesystem as a one column flat list text file.
'''
import re
import sys
import pymongo as pm
from optparse import OptionParser
'''
This is a method to check if a string contains only
alphanumeric characters
'''
def re_sanitize(string, search=re.compile(r'[^a-zA-Z0-9-]').search):
return not bool( search(string) )
db = None
client = pm.MongoClient()
filepath = raw_input("\nEnter the filename & path (either "
"exact or from the current directory) "
"to the flat vocab list: ")
try:
db = client['dhlab']
except:
print "\nDHLab database does not exist. Please initialize database first. Exiting now."
exit(1)
'''
This presents a menu, and forces the user to choose one of the options in
the option Dictionary if the vocab collection already exists.
'''
if "vocab" in db.collection_names():
options = ''
optionDict = ['i', 'd', 'e']
while (options.lower() not in optionDict):
options = raw_input("\nVocabulary collection already exists. I can do one of the following:\n"
"(I)\tINSERT this vocab into the current collection.\n"
"(D)\tDROP the collection and start clean. (Irreversable!)\n"
"(E)\tEXIT the program.\n"
"(Type the above lettter corresponding to the chosen option and hit Return)\n")
if options.lower() == 'i':
pass
elif options.lower() == 'd':
yn = raw_input("\nContinuing WILL drop the collection, and reload it based on the provided\n"
"file. Continue? (yes) for yes, or any other key or phrase for no.\n"
"NOTE: This is irreversible!\n")
if yn == 'yes':
db.drop_collection("vocab")
print "\n'vocab' collection dropped successfully."
db.create_collection("vocab")
else:
pass
else:
exit(0)
else:
db.create_collection('vocab') #Create the collection if it doesn't already exist.
print "\nEmpty 'vocab' collection created."
collection = db.vocab
# Get a name for this group.
vocabName = raw_input("Provide a name for this vocabulary (only alphanumeric characters and dashes)\n"
"(Ex: ICD-10, UMLS)\n")
while not re_sanitize(vocabName):
vocabName = raw_input("Invalid vocabulary name!\n"
"Provide a name for this vocabulary (only alphanumeric characters and dashes)\n"
"(Ex: ICD-10, UMLS)\n")
with open(filepath, 'r') as infile:
first = True
shiftMod = 0
for line in infile:
# need to do some checks to see if there are extraneous chars
if first is True:
first = False
lastChar = line[-1]
while not re_sanitize(lastChar):
print "\nCurrent first line: ", line[:len(line) - shiftMod - 1]
yn = raw_input("Last character of the first line is non-alphanumeric. "
"If this is a CSV file, a character might have been added "
"to the end of every line. Would you like to remove the"
" last character from every line? (y/n)\n")
if yn is 'y':
shiftMod += 1
lastChar = line[-(1 + shiftMod)]
else:
break
# Insert the lines into the vocab database!
prepLine = line[:len(line) - shiftMod - 1]
collection.save( { 'group': vocabName, 'term': prepLine } )
| mit |
labase/vitollino | src/_spy/vitollino/__init__.py | 1 | 1232 | #! /usr/bin/env python
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Vittolino
# Copyright 2011-2017 Carlo Oliveira <carlo@nce.ufrj.br>,
# `Labase <http://labase.selfip.org/>`__; `GPL <http://is.gd/3Udt>`__.
#
# Vittolino é um software livre; você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, veja em <http://www.gnu.org/licenses/>
from .vitollino import Cena
from .vitollino import Sala
from .vitollino import Salao
from .vitollino import Labirinto
from .vitollino import Elemento
from .vitollino import Popup
from .vitollino import INVENTARIO
from .vitollino import Portal
from .vitollino import Droppable
from .vitollino import Dropper
from .vitollino import Texto
from .vitollino import Jogo, JOGO
__all__ = ["vitollino"]
| gpl-3.0 |
ytaben/cyphesis | rulesets/acorn/world/objects/buildings/FarmHouse1.py | 2 | 2955 | #This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2001 Al Riddoch (See the file COPYING for details).
from cyphesis.Thing import Thing
from atlas import *
from Vector3D import Vector3D
# bbox = 8,8,2.5
# bmedian = 7.5,7.5,2.5
# offset = SW corner = -0.5,-0.5,0
class Farmhouse_deco_1(Thing):
def setup_operation(self, op):
ret = Oplist()
# South wall
loc = Location(self, Vector3D(-0.5,-0.5,0))
loc.bbox = Vector3D(8,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# West wall with door
loc = Location(self, Vector3D(-0.5,-0.5,0))
loc.bbox = Vector3D(0.2,2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(-0.5,3.5,0))
loc.bbox = Vector3D(0.2,12,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# North wall with door
loc = Location(self, Vector3D(-0.5,15.3,0))
loc.bbox = Vector3D(4,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(3.5,15.3,0))
loc.bbox = Vector3D(12,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# East wall
loc = Location(self, Vector3D(15.3,7.5,0))
loc.bbox = Vector3D(0.2,8,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# Interior wall
loc = Location(self, Vector3D(7.3,-0.5,0))
loc.bbox = Vector3D(0.2,14,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# Interior wall with door
loc = Location(self, Vector3D(7.3,7.5,0))
loc.bbox = Vector3D(4.2,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(11.5,7.5,0))
loc.bbox = Vector3D(4,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# South fences
loc = Location(self, Vector3D(7.5,-0.5,0))
loc.bbox = Vector3D(2,0.1,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(11.5,-0.5,0))
loc.bbox = Vector3D(4,0.1,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# East fences
loc = Location(self, Vector3D(15.4,-0.5,0))
loc.bbox = Vector3D(0.1,3,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(15.4,4.5,0))
loc.bbox = Vector3D(0.1,3,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
return ret
| gpl-2.0 |
mkrupcale/ansible | lib/ansible/modules/web_infrastructure/ejabberd_user.py | 11 | 7573 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ejabberd_user
version_added: "1.5"
author: "Peter Sprygada (@privateip)"
short_description: Manages users for ejabberd servers
requirements:
- ejabberd with mod_admin_extra
description:
- This module provides user management for ejabberd servers
options:
username:
description:
- the name of the user to manage
required: true
host:
description:
- the ejabberd host associated with this username
required: true
password:
description:
- the password to assign to the username
required: false
logging:
description:
- enables or disables the local syslog facility for this module
required: false
default: false
choices: [ 'true', 'false', 'yes', 'no' ]
state:
description:
- describe the desired state of the user to be managed
required: false
default: 'present'
choices: [ 'present', 'absent' ]
notes:
- Password parameter is required for state == present only
- Passwords must be stored in clear text for this release
- The ejabberd configuration file must include mod_admin_extra as a module.
'''
EXAMPLES = '''
Example playbook entries using the ejabberd_user module to manage users state.
- name: create a user if it does not exists
ejabberd_user:
username: test
host: server
password: password
- name: delete a user if it exists
ejabberd_user:
username: test
host: server
state: absent
'''
import syslog
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class EjabberdUserException(Exception):
""" Base exeption for EjabberdUser class object """
pass
class EjabberdUser(object):
""" This object represents a user resource for an ejabberd server. The
object manages user creation and deletion using ejabberdctl. The following
commands are currently supported:
* ejabberdctl register
* ejabberdctl deregister
"""
def __init__(self, module):
self.module = module
self.logging = module.params.get('logging')
self.state = module.params.get('state')
self.host = module.params.get('host')
self.user = module.params.get('username')
self.pwd = module.params.get('password')
@property
def changed(self):
""" This method will check the current user and see if the password has
changed. It will return True if the user does not match the supplied
credentials and False if it does not
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('check_password', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return rc
@property
def exists(self):
""" This method will check to see if the supplied username exists for
host specified. If the user exists True is returned, otherwise False
is returned
"""
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('check_account', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return not bool(int(rc))
def log(self, entry):
""" This method will log information to the local syslog facility """
if self.logging:
syslog.openlog('ansible-%s' % self.module._name)
syslog.syslog(syslog.LOG_NOTICE, entry)
def run_command(self, cmd, options):
""" This method will run the any command specified and return the
returns using the Ansible common module
"""
if not all(options):
raise EjabberdUserException
cmd = 'ejabberdctl %s ' % cmd
cmd += " ".join(options)
self.log('command: %s' % cmd)
return self.module.run_command(cmd.split())
def update(self):
""" The update method will update the credentials for the user provided
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('change_password', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def create(self):
""" The create method will create a new user on the host with the
password provided
"""
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('register', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def delete(self):
""" The delete method will delete the user from the host
"""
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('unregister', options)
except EjabberdUserException:
e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default=None, type='str'),
username=dict(default=None, type='str'),
password=dict(default=None, type='str'),
state=dict(default='present', choices=['present', 'absent']),
logging=dict(default=False, type='bool')
),
supports_check_mode = True
)
obj = EjabberdUser(module)
rc = None
result = dict()
if obj.state == 'absent':
if obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.delete()
if rc != 0:
module.fail_json(msg=err, rc=rc)
elif obj.state == 'present':
if not obj.exists:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.create()
elif obj.changed:
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = obj.update()
if rc is not None and rc != 0:
module.fail_json(msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
erdc-cm/air-water-vv | 2d/numericalTanks/nonlinearWaves/ls_consrv_n.py | 12 | 1901 | from proteus.default_n import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools,
NumericalFlux)
import ls_consrv_p as physics
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = ct.domain.nd
mesh = domain.MeshOptions
#time stepping
runCFL = ct.runCFL
timeIntegrator = TimeIntegration.ForwardIntegrator
timeIntegration = TimeIntegration.NoIntegration
#mesh options
nLevels = ct.nLevels
parallelPartitioningType = mesh.parallelPartitioningType
nLayersOfOverlapForParallel = mesh.nLayersOfOverlapForParallel
restrictFineSolutionToAllMeshes = mesh.restrictFineSolutionToAllMeshes
triangleOptions = mesh.triangleOptions
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
femSpaces = {0: ct.basis}
subgridError = None
massLumping = False
numericalFluxType = NumericalFlux.DoNothing
conservativeFlux = None
shockCapturing = None
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = None
linearSmoother = None
matrix = LinearAlgebraTools.SparseMatrix
if ct.useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
linear_solver_options_prefix = 'mcorr_'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
linTolFac = 0.001
l_atol_res = 0.001*ct.mcorr_nl_atol_res
nl_atol_res = ct.mcorr_nl_atol_res
useEisenstatWalker = False#True
maxNonlinearIts = 50
maxLineSearches = 0
| mit |
bssthu/L5MapEditor | editor/main_window.py | 1 | 18257 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Module : main.py
# Author : bssthu
# Project : L5MapEditor
# Creation date : 2015-09-24
# Description :
#
import math
import os
import sqlite3
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot, Qt, QObject
from PyQt5.QtGui import QCursor, QColor
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QGraphicsScene
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QTableWidgetItem
from PyQt5.QtWidgets import QMessageBox
from dao.db_helper import DbHelper
from editor import config_loader
from editor import log
from editor.fsm_mgr import FsmMgr
from editor.map_command import MapCommand
from editor.ui_Form import Ui_MainWindow
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__()
config_loader.load_all()
# ui
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.graphics_view.setScene(QGraphicsScene())
self.view = self.ui.graphics_view
self.scene = self.ui.graphics_view.scene()
self.ui.polygon_table_widget.setColumnCount(2)
self.ui.polygon_table_widget.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.ui.polygon_table_widget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.ui.polygon_table_widget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.ui.second_table_widget.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.ui.second_table_widget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.ui.second_table_widget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.ui.insert_layer_combo_box.addItems(config_loader.get_layer_names())
self.ui.graphics_view.scale(1, -1) # invert y
# data
self.db = DbHelper()
self.command_handler = MapCommand(self.db)
self.path = None
# fsm
self.__init_fsm()
# other signals/slots
self.command_handler.gotoPolygon.connect(self.goto_polygon)
self.ui.polygon_table_widget.itemSelectionChanged.connect(self.polygon_selection_changed)
self.ui.polygon_table_widget.itemClicked.connect(self.polygon_selection_clicked)
self.ui.polygon_table_widget.polygonActivated.connect(self.goto_polygon)
self.ui.second_table_widget.itemSelectionChanged.connect(self.second_selection_changed)
self.ui.second_table_widget.polygonActivated.connect(self.goto_polygon)
self.ui.scale_slider.valueChanged.connect(self.scale_slider_changed)
self.ui.graphics_view.polygonCreated.connect(self.add_polygon)
self.ui.graphics_view.polygonUpdated.connect(self.update_polygon)
self.ui.graphics_view.pointsUpdated.connect(self.update_points)
log.logger.onLog.connect(self.print_to_output)
# open default database
self.setAcceptDrops(True)
self.open('default.sqlite', True)
def __init_fsm(self):
"""初始化 UI 状态机"""
self.fsm_mgr = FsmMgr()
self.fsm_mgr.changeState.connect(self.change_state)
self.fsm_mgr.get_fsm('insert').enterState.connect(self.ui.graphics_view.begin_insert)
self.fsm_mgr.get_fsm('insert').exitState.connect(self.ui.graphics_view.end_insert)
self.fsm_mgr.get_fsm('normal').transferToState.connect(
lambda name: self.ui.graphics_view.begin_move() if (name == 'move') else None)
self.fsm_mgr.get_fsm('normal').transferToState.connect(
lambda name: self.ui.graphics_view.begin_move() if (name == 'move_point') else None)
self.fsm_mgr.get_fsm('move').transferToState.connect(
lambda name: self.ui.graphics_view.end_move() if (name == 'normal') else None)
self.fsm_mgr.get_fsm('move_point').transferToState.connect(
lambda name: self.ui.graphics_view.end_move() if (name == 'normal') else None)
self.change_state(self.fsm_mgr.get_current_state())
# slots
@pyqtSlot(QObject)
def change_state(self, new_state):
"""UI 状态转移"""
if new_state == self.fsm_mgr.get_fsm('empty'):
self.ui.save_action.setEnabled(False)
self.ui.undo_action.setEnabled(False)
self.ui.redo_action.setEnabled(False)
self.ui.insert_action.setEnabled(False)
self.ui.delete_action.setEnabled(False)
self.ui.move_action.setEnabled(False)
self.ui.graphics_view.setCursor(QCursor(Qt.ForbiddenCursor))
self.ui.polygon_table_widget.setEnabled(False)
self.ui.list2_type_label.setText('')
if new_state == self.fsm_mgr.get_fsm('normal'):
self.ui.save_action.setEnabled(True)
self.ui.undo_action.setEnabled(True)
self.ui.redo_action.setEnabled(True)
self.ui.insert_action.setEnabled(True)
self.ui.delete_action.setEnabled(True)
self.ui.move_action.setEnabled(True)
self.ui.graphics_view.setCursor(QCursor(Qt.ArrowCursor))
self.ui.polygon_table_widget.setEnabled(True)
self.ui.list2_type_label.setText('children')
elif new_state == self.fsm_mgr.get_fsm('insert'):
self.ui.undo_action.setEnabled(False)
self.ui.redo_action.setEnabled(False)
self.ui.insert_action.setEnabled(True)
self.ui.delete_action.setEnabled(False)
self.ui.move_action.setEnabled(False)
self.ui.graphics_view.setCursor(QCursor(Qt.CrossCursor))
self.ui.polygon_table_widget.setEnabled(True)
self.ui.list2_type_label.setText('children')
elif new_state == self.fsm_mgr.get_fsm('move'):
self.ui.undo_action.setEnabled(False)
self.ui.redo_action.setEnabled(False)
self.ui.insert_action.setEnabled(False)
self.ui.delete_action.setEnabled(False)
self.ui.move_action.setEnabled(True)
self.ui.graphics_view.setCursor(QCursor(Qt.DragMoveCursor))
self.ui.polygon_table_widget.setEnabled(False)
self.ui.list2_type_label.setText('points')
elif new_state == self.fsm_mgr.get_fsm('move_point'):
self.ui.undo_action.setEnabled(False)
self.ui.redo_action.setEnabled(False)
self.ui.insert_action.setEnabled(False)
self.ui.delete_action.setEnabled(False)
self.ui.move_action.setEnabled(True)
self.ui.graphics_view.setCursor(QCursor(Qt.DragMoveCursor))
self.ui.polygon_table_widget.setEnabled(False)
self.ui.list2_type_label.setText('points')
@pyqtSlot()
def on_open_action_triggered(self):
"""点击“打开”按钮"""
file_filter = '数据库文档(*.sqlite);;其他文档(*.*)'
path = QFileDialog.getOpenFileName(self, '载入数据', '.', file_filter)[0]
if path:
self.open(path)
@pyqtSlot()
def on_save_action_triggered(self):
"""点击“保存”按钮"""
if self.path is not None:
self.save(self.path)
@pyqtSlot()
def on_undo_action_triggered(self):
"""点击“撤销”按钮"""
try:
self.command_handler.undo()
self.update_polygon_list()
except Exception as e:
log.error('撤销操作出错: %s' % repr(e))
return False
else:
return True
@pyqtSlot()
def on_redo_action_triggered(self):
"""点击“重做”按钮"""
try:
self.command_handler.redo()
self.update_polygon_list()
except Exception as e:
log.error('重做操作出错: %s' % repr(e))
return False
else:
return True
@pyqtSlot()
def on_insert_action_triggered(self):
"""点击“插入”按钮"""
if self.ui.insert_action.isChecked():
if not self.fsm_mgr.change_fsm('normal', 'insert'):
self.ui.insert_action.setChecked(False)
else:
if not self.fsm_mgr.change_fsm('insert', 'normal'):
self.ui.insert_action.setChecked(True)
@pyqtSlot()
def on_delete_action_triggered(self):
"""点击“删除”按钮"""
_id = self.selected_id()
if _id >= 0:
row = self.ui.polygon_table_widget.currentRow()
self.execute('del shape %d' % _id)
self.ui.polygon_table_widget.setCurrentCell(row, 0)
@pyqtSlot()
def on_about_action_triggered(self):
"""点击“关于”按钮"""
info = 'L5MapEditor by bssthu\n\n' \
'https://github.com/bssthu/L5MapEditor'
QMessageBox.about(self, '关于', info)
@pyqtSlot()
def on_exit_action_triggered(self):
"""点击“退出”按钮"""
exit()
@pyqtSlot()
def on_move_action_triggered(self):
"""点击“移动”按钮"""
state_name = 'move' if not self.ui.move_point_action.isChecked() else 'move_point'
if self.ui.move_action.isChecked():
if not self.fsm_mgr.change_fsm('normal', state_name):
self.ui.move_action.setChecked(False)
else:
if not self.fsm_mgr.change_fsm(state_name, 'normal'):
self.ui.move_action.setChecked(True)
@pyqtSlot()
def on_move_point_action_triggered(self):
"""点击“拾取点”按钮"""
self.ui.graphics_view.move_point(self.ui.move_point_action.isChecked())
if self.ui.move_point_action.isChecked():
self.fsm_mgr.change_fsm('move', 'move_point')
else:
self.fsm_mgr.change_fsm('move_point', 'move')
@pyqtSlot()
def on_closed_polygon_action_triggered(self):
"""点击“绘制封闭多边形”按钮"""
self.ui.graphics_view.draw_closed_polygon(self.ui.closed_polygon_action.isChecked())
@pyqtSlot()
def on_highlight_action_triggered(self):
"""点击“突出显示图形”按钮"""
self.ui.graphics_view.highlight_selection(self.ui.highlight_action.isChecked())
@pyqtSlot()
def on_grid_action_triggered(self):
"""点击“显示网格”按钮"""
pass
@pyqtSlot()
def on_mark_points_action_triggered(self):
"""点击“标出点”按钮"""
self.ui.graphics_view.mark_points(self.ui.mark_points_action.isChecked())
@pyqtSlot()
def on_command_edit_returnPressed(self):
"""输入命令后按下回车"""
commands = self.ui.command_edit.text().strip()
if commands != '':
# 执行命令
self.execute(commands)
self.ui.command_edit.setText('')
def lock_ui(self):
"""锁定 UI"""
self.ui.tool_bar.setEnabled(False)
@pyqtSlot()
def unlock_ui(self):
"""解锁 UI"""
self.ui.tool_bar.setEnabled(True)
self.ui.graphics_view.scene().update()
@pyqtSlot(list)
def update_child_list(self, polygon_table):
"""更新 children 列表
Args:
polygon_table: 多边形表
"""
self.ui.second_table_widget.fill_with_polygons(polygon_table)
@pyqtSlot(QTableWidgetItem)
def polygon_selection_clicked(self, item):
self.polygon_selection_changed()
@pyqtSlot()
def polygon_selection_changed(self):
"""在多边形列表中选择了多边形"""
_id = self.selected_id()
if _id >= 0:
# draw polygon
polygon = self.db.get_polygon_by_id(_id)
# list children
child_list = self.db.get_children_table_by_id(_id)
else:
# 选中了非法的多边形
polygon = None
child_list = {}
self.ui.graphics_view.set_selected_polygon(polygon)
self.update_child_list(child_list)
return
@pyqtSlot()
def second_selection_changed(self):
"""在第二列选中"""
if self.ui.move_action.isChecked():
self.ui.graphics_view.select_point(self.ui.second_table_widget.currentRow())
if self.ui.second_table_widget.is_polygon:
_id = self.ui.second_table_widget.get_selected_id()
polygon = self.db.get_polygon_by_id(_id)
if polygon is not None:
self.ui.graphics_view.set_selected_polygon(polygon)
@pyqtSlot()
def scale_slider_changed(self):
"""修改地图缩放比例"""
scale = math.exp(self.ui.scale_slider.value() / 10)
self.ui.graphics_view.resetTransform()
self.ui.graphics_view.scale(scale, -scale)
@pyqtSlot(list)
def add_polygon(self, vertices):
"""插入多边形
Args:
vertices (list[list[float]]): 多边形顶点 list, [[x1,y1], [x2,y2], ..., [xn,yn]]
"""
parent_id = self.selected_id()
_id = self.command_handler.get_spare_id(parent_id)
layer = self.ui.insert_layer_combo_box.currentIndex()
additional = 0
if layer == 0:
commands = ['add shape %d %d %s' % (_id, layer, str(additional))]
else:
commands = ['add shape %d %d %s %d' % (_id, layer, str(additional), parent_id)]
for vertex in vertices:
commands.append('add pt %d %f %f' % (_id, vertex[0], vertex[1]))
self.execute(commands)
@pyqtSlot('PyQt_PyObject')
def goto_polygon(self, _id):
"""视角聚焦到多边形中心
Args:
_id (int): 目标多边形 id
"""
self.ui.graphics_view.center_on_polygon(self.db.get_polygon_by_id(_id))
@pyqtSlot(list)
def update_polygon(self, vertices):
"""修改当前选中的多边形的顶点坐标
Args:
vertices (list[list[float]]): 多边形顶点 list, [[x1,y1], [x2,y2], ..., [xn,yn]]
"""
_id = self.selected_id()
commands = []
for pt_id in range(0, len(vertices)):
x = vertices[pt_id][0]
y = vertices[pt_id][1]
commands.append('set pt %d %d %f %f' % (_id, pt_id, x, y))
self.execute(commands)
@pyqtSlot(list)
def update_points(self, points):
"""更新第二列显示的点
编辑模式下,第二列显示当前图形的点的坐标。本方法用于更新第二列的显示。
Args:
points (list[QPointF]): 多边形顶点 list, [qpoint1, qpoint2, ..., qpointn]
"""
row = self.ui.second_table_widget.currentRow()
self.ui.second_table_widget.fill_with_points(points)
row_count = self.ui.second_table_widget.rowCount()
if row_count > 0:
row = min(row_count - 1, max(0, row))
self.ui.second_table_widget.setCurrentCell(row, 0)
@pyqtSlot(str, QColor)
def print_to_output(self, msg, color):
"""打印到输出窗口
Args:
msg (str): 输出内容
color (QColor): 输出颜色
"""
print(msg)
self.ui.output_browser.setTextColor(color)
self.ui.output_browser.append(msg)
def execute(self, commands):
"""执行命令
Args:
commands (str): 待执行命令
"""
log.debug(commands)
try:
self.command_handler.execute(commands)
self.update_polygon_list()
except Exception as e:
log.error('执行命令出错: %s' % repr(e))
return False
else:
return True
def update_polygon_list(self):
"""更新多边形列表"""
polygon_table = self.db.polygon_table
_id = self.selected_id()
self.ui.polygon_table_widget.fill_with_polygons(polygon_table)
self.ui.graphics_view.set_polygons(polygon_table, len(config_loader.get_layer_names()))
if len(polygon_table) > 0:
if not self.select_row_by_id(_id):
self.ui.polygon_table_widget.setCurrentCell(0, 0)
def open(self, path, quiet=False):
"""打开 sqlite 数据库文件
Args:
path (str): 文件路径
quiet (bool): 报错不弹框
"""
if os.path.exists(path):
try:
self.db.load_tables(path)
self.command_handler.reset_backup_data()
self.update_polygon_list()
self.path = path
self.fsm_mgr.change_fsm('empty', 'normal')
log.debug('Open "%s".' % path)
except sqlite3.Error as error:
log.error('Failed to open "%s".' % path)
log.error(repr(error))
if not quiet:
self.show_message(repr(error))
else:
log.error('File %s not exists.' % path)
if not quiet:
self.show_message('File %s not exists.' % path)
def save(self, path):
"""保存 sqlite 数据库文件
Args:
path (str): 文件路径
"""
try:
self.db.write_to_file(path)
self.command_handler.reset_backup_data()
log.debug('Save "%s".' % path)
except sqlite3.Error as error:
log.error('Failed to save "%s".' % path)
log.error(repr(error))
self.show_message(repr(error))
def selected_id(self):
"""选中的多边形的 id"""
return self.ui.polygon_table_widget.get_selected_id()
def select_row_by_id(self, polygon_id):
"""根据多边形的 id 选中行
Args:
polygon_id (int): 多边形 id
"""
return self.ui.polygon_table_widget.select_id(polygon_id)
def show_message(self, msg, title='L5MapEditor'):
QMessageBox.information(self, title, msg)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
url = event.mimeData().urls()[0]
if url.isLocalFile():
path = url.toLocalFile()
if os.path.isfile(path):
self.open(path)
| lgpl-3.0 |
neechbear/ansible | roles/zabbix-agent/library/zabbix_host2.py | 1 | 18982 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- When the host does not exists, a new host will be created, added to any host groups and linked to any templates.
- When the host already exists, the host group membership will be updated, along with the template links and interfaces.
- Delete a host from Zabbix if the host exists.
version_added: "1.9"
author: Tony Minfei Ding, Harrison Gu
requirements:
- zabbix-api python module
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
default: null
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
default: null
login_password:
description:
- Zabbix user password.
required: true
default: null
host_name:
description:
- Technical name of the host.
- If the host has already been added, the host name won't be updated.
required: true
host_groups:
description:
- List of host groups to add the host to.
required: false
link_templates:
description:
- List of templates to be linked to the host.
required: false
default: None
status:
description:
- Status and function of the host.
- Possible values are: enabled and disabled
required: false
default: "enabled"
policy:
description:
- Policy for updating pre-existing hosts.
- Possible values are: merge and replace.
- Merge will merge additional host groups and templates not already associated to a host.
- Replace will replace all host groups and templates associated to a host with only the ones specified, potentially removing the host from host groups and templates.
required: false
default: "replace"
state:
description:
- create/update or delete host.
- Possible values are: present and absent. If the host already exists, and the state is "present", just to update the host.
required: false
default: "present"
timeout:
description:
- The timeout of API request(seconds).
default: 10
interfaces:
description:
- List of interfaces to be created for the host (see example).
- Available values are: dns, ip, main, port, type and useip.
- Please review the interface documentation for more information on the supported properties:
- https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface
required: false
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
policy: merge
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.exists({'host': host_name})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.exists({'name': group_name})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status})
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception, e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status})
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception, e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete({'hostid': host_id})
except Exception, e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception, e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, default=None, aliases=['url']),
login_user=dict(required=True),
login_password=dict(required=True),
host_name=dict(required=True),
host_groups=dict(required=False),
link_templates=dict(required=False),
status=dict(default="enabled"),
state=dict(default="present"),
policy=dict(default="replace"),
timeout=dict(default=10),
interfaces=dict(required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
host_name = module.params['host_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
status = module.params['status']
state = module.params['state']
policy = module.params['policy']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
# merge host groups and templates rather than replace
if policy == "merge":
exist_host_groups = host.get_host_groups_by_host_id(host_id)
if exist_host_groups:
host_groups = list( set(host_groups) | set(exist_host_groups) )
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
exist_template_ids = host.get_host_templates_by_host_id(host_id)
if exist_template_ids:
template_ids = list( set(list(template_ids)) | set(exist_template_ids) )
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
from ansible.module_utils.basic import *
main()
| bsd-3-clause |
ajopanoor/hydra | tools/perf/scripts/python/syscall-counts.py | 1996 | 1700 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Changaco/oh-mainline | vendor/packages/twisted/twisted/runner/topfiles/setup.py | 48 | 1292 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
try:
from twisted.python.dist import setup, ConditionalExtension as Extension
except ImportError:
raise SystemExit("twisted.python.dist module not found. Make sure you "
"have installed the Twisted core package before "
"attempting to install any other Twisted projects.")
extensions = [
Extension("twisted.runner.portmap",
["twisted/runner/portmap.c"],
condition=lambda builder: builder._check_header("rpc/rpc.h")),
]
if __name__ == '__main__':
setup(
twisted_subproject="runner",
# metadata
name="Twisted Runner",
description="Twisted Runner is a process management library and inetd "
"replacement.",
author="Twisted Matrix Laboratories",
author_email="twisted-python@twistedmatrix.com",
maintainer="Andrew Bennetts",
url="http://twistedmatrix.com/trac/wiki/TwistedRunner",
license="MIT",
long_description="""\
Twisted Runner contains code useful for persistent process management
with Python and Twisted, and has an almost full replacement for inetd.
""",
# build stuff
conditionalExtensions=extensions,
)
| agpl-3.0 |
j0gurt/ggrc-core | src/ggrc_workflows/service_specs/steps/factories.py | 7 | 2339 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from behave import given
from factory.fuzzy import FuzzyChoice
import tests.ggrc.behave.factories as factories
from ggrc import models
from ggrc_workflows.models import (
Workflow, TaskGroup,
WorkflowPerson,
TaskGroupObject, TaskGroupTask,
Cycle, CycleTaskEntry,
CycleTaskGroup, CycleTaskGroupObject, CycleTaskGroupObjectTask
)
class WorkflowFactory(factories.ModelFactory):
MODEL = Workflow
frequency = FuzzyChoice(MODEL.VALID_FREQUENCIES)
class TaskGroupFactory(factories.ModelFactory):
MODEL = TaskGroup
class WorkflowPersonFactory(factories.ModelFactory):
MODEL = WorkflowPerson
status = FuzzyChoice(MODEL.VALID_STATES)
class TaskGroupObjectFactory(factories.ModelFactory):
MODEL = TaskGroupObject
object = factories.FactoryStubMarker(models.System)
status = FuzzyChoice(MODEL.VALID_STATES)
class TaskGroupTaskFactory(factories.ModelFactory):
MODEL = TaskGroupTask
class CycleFactory(factories.ModelFactory):
MODEL = Cycle
status = FuzzyChoice(MODEL.VALID_STATES)
class CycleTaskEntryFactory(factories.ModelFactory):
MODEL = CycleTaskEntry
class CycleTaskGroupFactory(factories.ModelFactory):
MODEL = CycleTaskGroup
status = FuzzyChoice(MODEL.VALID_STATES)
class CycleTaskGroupObjectFactory(factories.ModelFactory):
MODEL = CycleTaskGroupObject
status = FuzzyChoice(MODEL.VALID_STATES)
object = factories.FactoryStubMarker(models.System)
class CycleTaskGroupObjectTaskFactory(factories.ModelFactory):
MODEL = CycleTaskGroupObjectTask
status = FuzzyChoice(MODEL.VALID_STATES)
@given('Workflow factories registration')
def workflow_factories_registration(context):
factories.WorkflowFactory = WorkflowFactory
factories.TaskGroupFactory = TaskGroupFactory
factories.WorkflowPersonFactory = WorkflowPersonFactory
factories.TaskGroupObjectFactory = TaskGroupObjectFactory
factories.TaskGroupTaskFactory = TaskGroupTaskFactory
factories.CycleFactory = CycleFactory
factories.CycleTaskEntryFactory = CycleTaskEntryFactory
factories.CycleTaskGroupFactory = CycleTaskGroupFactory
factories.CycleTaskGroupObjectFactory = CycleTaskGroupObjectFactory
factories.CycleTaskGroupObjectTaskFactory = CycleTaskGroupObjectTaskFactory
| apache-2.0 |
dmitriy-serdyuk/fuel | fuel/converters/mnist.py | 18 | 6073 | import gzip
import os
import struct
import h5py
import numpy
from fuel.converters.base import fill_hdf5_file, check_exists
MNIST_IMAGE_MAGIC = 2051
MNIST_LABEL_MAGIC = 2049
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
ALL_FILES = [TRAIN_IMAGES, TRAIN_LABELS, TEST_IMAGES, TEST_LABELS]
@check_exists(required_files=ALL_FILES)
def convert_mnist(directory, output_directory, output_filename=None,
dtype=None):
"""Converts the MNIST dataset to HDF5.
Converts the MNIST dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.MNIST`. The converted dataset is
saved as 'mnist.hdf5'.
This method assumes the existence of the following files:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`
It assumes the existence of the following files:
* `train-images-idx3-ubyte.gz`
* `train-labels-idx1-ubyte.gz`
* `t10k-images-idx3-ubyte.gz`
* `t10k-labels-idx1-ubyte.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
dtype : str, optional
Either 'float32', 'float64', or 'bool'. Defaults to `None`,
in which case images will be returned in their original
unsigned byte format.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if not output_filename:
if dtype:
output_filename = 'mnist_{}.hdf5'.format(dtype)
else:
output_filename = 'mnist.hdf5'
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_feat_path = os.path.join(directory, TRAIN_IMAGES)
train_features = read_mnist_images(train_feat_path, dtype)
train_lab_path = os.path.join(directory, TRAIN_LABELS)
train_labels = read_mnist_labels(train_lab_path)
test_feat_path = os.path.join(directory, TEST_IMAGES)
test_features = read_mnist_images(test_feat_path, dtype)
test_lab_path = os.path.join(directory, TEST_LABELS)
test_labels = read_mnist_labels(test_lab_path)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
subparser.add_argument(
"--dtype", help="dtype to save to; by default, images will be " +
"returned in their original unsigned byte format",
choices=('float32', 'float64', 'bool'), type=str, default=None)
return convert_mnist
def read_mnist_images(filename, dtype=None):
"""Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
"""
with gzip.open(filename, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == 'b':
# If the user wants Booleans, threshold at half the range.
array = array >= 128
elif dtype.kind == 'f':
# Otherwise, just convert.
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array
def read_mnist_labels(filename):
"""Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with gzip.open(filename, 'rb') as f:
magic, _ = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError("Wrong magic number reading MNIST label file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape(array.size, 1)
return array
| mit |
farm3r/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/deps.py | 216 | 3993 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import genmsg.msg_loader
import genmsg
# pkg_name - string
# msg_file - string full path
# search_paths - dict of {'pkg':'msg_dir'}
def find_msg_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(full_type_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_msg_dependencies(pkg_name, msg_file, search_paths):
deps = find_msg_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
def find_srv_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(spec.request.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
for dep_type_name in msg_context.get_all_depends(spec.response.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_srv_dependencies(pkg_name, msg_file, search_paths):
deps = find_srv_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
#paths = {'std_msgs':'/u/mkjargaard/repositories/mkjargaard/dist-sandbox/std_msgs/msg'}
#file = '/u/mkjargaard/repositories/mkjargaard/dist-sandbox/quux_msgs/msg/QuuxString.msg'
#find_msg_dependencies('quux_msgs', file, paths)
| gpl-3.0 |
alanjw/GreenOpenERP-Win-X86 | python/Lib/bsddb/test/test_fileid.py | 7 | 1891 | """TestCase for reseting File ID.
"""
import os
import shutil
import unittest
from test_all import db, test_support, get_new_environment_path, get_new_database_path
class FileidResetTestCase(unittest.TestCase):
def setUp(self):
self.db_path_1 = get_new_database_path()
self.db_path_2 = get_new_database_path()
self.db_env_path = get_new_environment_path()
def test_fileid_reset(self):
# create DB 1
self.db1 = db.DB()
self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=(db.DB_CREATE|db.DB_EXCL))
self.db1.put('spam', 'eggs')
self.db1.close()
shutil.copy(self.db_path_1, self.db_path_2)
self.db2 = db.DB()
self.db2.open(self.db_path_2, dbtype=db.DB_HASH)
self.db2.put('spam', 'spam')
self.db2.close()
self.db_env = db.DBEnv()
self.db_env.open(self.db_env_path, db.DB_CREATE|db.DB_INIT_MPOOL)
# use fileid_reset() here
self.db_env.fileid_reset(self.db_path_2)
self.db1 = db.DB(self.db_env)
self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
self.assertEqual(self.db1.get('spam'), 'eggs')
self.db2 = db.DB(self.db_env)
self.db2.open(self.db_path_2, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
self.assertEqual(self.db2.get('spam'), 'spam')
self.db1.close()
self.db2.close()
self.db_env.close()
def tearDown(self):
test_support.unlink(self.db_path_1)
test_support.unlink(self.db_path_2)
test_support.rmtree(self.db_env_path)
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 4):
suite.addTest(unittest.makeSuite(FileidResetTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| agpl-3.0 |
Mafarricos/Mafarricos-modded-xbmc-addons | plugin.video.streamajoker/resources/site-packages/bs4/element.py | 438 | 61538 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
| gpl-2.0 |
Leila20/django | django/shortcuts.py | 117 | 5429 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
try:
obj_list = list(queryset.filter(*args, **kwargs))
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| bsd-3-clause |
ybdesire/apk_sdk_analysis | common/Androguard-2.0/tests/test_sign.py | 23 | 2170 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.androgen import AndroguardS
from androguard.core.androgen import AndroguardS
from androguard.core.analysis import analysis
TEST_CASE = "examples/android/TestsAndroguard/bin/classes.dex"
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
a = AndroguardS( TEST_CASE )
x = analysis.uVMAnalysis( a.get_vm() )
for method in a.get_methods():
print method.get_class_name(), method.get_name(), method.get_descriptor()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_L0_0).get_string()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_L0_1).get_string()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_L0_2).get_string()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_L0_3).get_string()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_L0_4).get_string()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_HEX).get_string()
print "-> : \t", x.get_method_signature(method, predef_sign = analysis.SIGNATURE_SEQUENCE_BB).get_list()
print
| gpl-3.0 |
rreimann/electron | script/upload-node-headers.py | 12 | 2019 | #!/usr/bin/env python
import argparse
import glob
import os
import shutil
import sys
from lib.config import PLATFORM, get_target_arch, s3_config
from lib.util import safe_mkdir, scoped_cwd, s3put
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
def main():
args = parse_args()
# Upload node's headers to S3.
bucket, access_key, secret_key = s3_config()
upload_node(bucket, access_key, secret_key, args.version)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def upload_node(bucket, access_key, secret_key, version):
with scoped_cwd(DIST_DIR):
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), glob.glob('node-*.tar.gz'))
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), glob.glob('iojs-*.tar.gz'))
if PLATFORM == 'win32':
if get_target_arch() == 'ia32':
node_lib = os.path.join(DIST_DIR, 'node.lib')
iojs_lib = os.path.join(DIST_DIR, 'win-x86', 'iojs.lib')
else:
node_lib = os.path.join(DIST_DIR, 'x64', 'node.lib')
iojs_lib = os.path.join(DIST_DIR, 'win-x64', 'iojs.lib')
safe_mkdir(os.path.dirname(node_lib))
safe_mkdir(os.path.dirname(iojs_lib))
# Copy atom.lib to node.lib and iojs.lib.
atom_lib = os.path.join(OUT_DIR, 'node.dll.lib')
shutil.copy2(atom_lib, node_lib)
shutil.copy2(atom_lib, iojs_lib)
# Upload the node.lib.
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), [node_lib])
# Upload the iojs.lib.
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), [iojs_lib])
if __name__ == '__main__':
sys.exit(main())
| mit |
fkorotkov/pants | src/python/pants/reporting/report.py | 17 | 4044 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import threading
import time
class ReportingError(Exception):
pass
class EmitterThread(threading.Thread):
"""Periodically flush the report buffers.
This thread wakes up periodically and flushes the reporting buffers from memory to
make sure that the output of long running workunits can be monitored.
"""
def __init__(self, report, name):
super(EmitterThread, self).__init__(name=name)
self._report = report
self._stop = threading.Event()
self.daemon = True
def run(self):
# NB(Eric Ayers) Using self._stop.wait(timeout=0.5) causes spurious exceptions on shutdown
# on some platforms. See https://github.com/pantsbuild/pants/issues/2750
while not self._stop.is_set():
self._report.flush()
time.sleep(0.5)
def stop(self):
self._stop.set()
class Report(object):
"""A report of a pants run."""
# Log levels.
FATAL = 0
ERROR = 1
WARN = 2
INFO = 3
DEBUG = 4
_log_level_name_map = {
'FATAL': FATAL, 'ERROR': ERROR, 'WARN': WARN, 'WARNING': WARN, 'INFO': INFO, 'DEBUG': DEBUG
}
@staticmethod
def log_level_from_string(s):
s = s.upper()
return Report._log_level_name_map.get(s, Report.INFO)
def __init__(self):
# We periodically emit newly gathered output from tool invocations.
self._emitter_thread = EmitterThread(report=self, name='output-emitter')
# Map from workunit id to workunit.
self._workunits = {}
# We report to these reporters.
self._reporters = {} # name -> Reporter instance.
# We synchronize on this, to support parallel execution.
self._lock = threading.Lock()
def open(self):
with self._lock:
for reporter in self._reporters.values():
reporter.open()
self._emitter_thread.start()
# Note that if you addr/remove reporters after open() has been called you have
# to ensure that their state is set up correctly. Best only to do this with
# stateless reporters, such as ConsoleReporter.
def add_reporter(self, name, reporter):
with self._lock:
self._reporters[name] = reporter
def remove_reporter(self, name):
with self._lock:
ret = self._reporters[name]
del self._reporters[name]
return ret
def start_workunit(self, workunit):
with self._lock:
self._workunits[workunit.id] = workunit
for reporter in self._reporters.values():
reporter.start_workunit(workunit)
def log(self, workunit, level, *msg_elements):
"""Log a message.
Each element of msg_elements is either a message string or a (message, detail) pair.
"""
with self._lock:
for reporter in self._reporters.values():
reporter.handle_log(workunit, level, *msg_elements)
def end_workunit(self, workunit):
with self._lock:
self._notify() # Make sure we flush everything reported until now.
for reporter in self._reporters.values():
reporter.end_workunit(workunit)
if workunit.id in self._workunits:
del self._workunits[workunit.id]
def flush(self):
with self._lock:
self._notify()
def close(self):
self._emitter_thread.stop()
with self._lock:
self._notify() # One final time.
for reporter in self._reporters.values():
reporter.close()
def _notify(self):
# Notify for output in all workunits. Note that output may be coming in from workunits other
# than the current one, if work is happening in parallel.
# Assumes self._lock is held by the caller.
for workunit in self._workunits.values():
for label, output in workunit.outputs().items():
s = output.read()
if len(s) > 0:
for reporter in self._reporters.values():
reporter.handle_output(workunit, label, s)
| apache-2.0 |
FDio/vpp | test/test_arping.py | 2 | 8702 | from scapy.layers.l2 import ARP
from scapy.layers.inet6 import ICMPv6ND_NS, ICMPv6ND_NA, IPv6
from framework import VppTestCase
""" TestArping is a subclass of VPPTestCase classes.
Basic test for sanity check of arping.
"""
class TestArping(VppTestCase):
""" Arping Test Case """
@classmethod
def setUpClass(cls):
super(TestArping, cls).setUpClass()
try:
cls.create_pg_interfaces(range(2))
cls.interfaces = list(cls.pg_interfaces)
for i in cls.interfaces:
i.admin_up()
i.config_ip4()
i.config_ip6()
i.disable_ipv6_ra()
i.resolve_arp()
i.resolve_ndp()
except Exception:
super(TestArping, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestArping, cls).tearDownClass()
def tearDown(self):
super(TestArping, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show hardware"))
def verify_arping_request(self, p, src, dst):
arp = p[ARP]
self.assertEqual(arp.hwtype, 0x0001)
self.assertEqual(arp.ptype, 0x0800)
self.assertEqual(arp.hwlen, 6)
self.assertEqual(arp.op, 1)
self.assertEqual(arp.psrc, src)
self.assertEqual(arp.pdst, dst)
def verify_arping_ip6_ns(self, p, src, dst):
icmpv6 = p[ICMPv6ND_NS]
self.assertEqual(icmpv6.type, 135)
self.assertEqual(icmpv6.tgt, dst)
ipv6 = p[IPv6]
self.assertEqual(src, ipv6.src)
def verify_arping_ip6_na(self, p, src, dst):
icmpv6 = p[ICMPv6ND_NA]
self.assertEqual(icmpv6.type, 136)
self.assertEqual(icmpv6.tgt, dst)
ipv6 = p[IPv6]
self.assertEqual(src, ipv6.src)
def test_arping_ip4_arp_request_cli(self):
""" arping IP4 arp request CLI test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
remote_ip4 = self.pg1.remote_ip4
ping_cmd = "arping " + remote_ip4 + "pg1 repeat 5 interval 0.1"
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
ping_cmd = "arping " + remote_ip4 + "pg1"
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_request(p, self.pg1.local_ip4,
self.pg1.remote_ip4)
finally:
self.vapi.cli("show error")
def test_arping_ip4_garp_cli(self):
""" arping ip4 gratuitous arp CLI test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
ping_cmd = ("arping gratuitous" + self.pg1.local_ip4 +
"pg1 repeat 5 interval 0.1")
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
ping_cmd = "arping gratuitous" + self.pg1.local_ip4 + "pg1"
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_request(p, self.pg1.local_ip4,
self.pg1.local_ip4)
finally:
self.vapi.cli("show error")
def test_arping_ip4_arp_request_api(self):
""" arping ip4 arp request API test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
remote_ip4 = self.pg1.remote_ip4
ret = self.vapi.arping(address=remote_ip4,
sw_if_index=self.pg1.sw_if_index,
is_garp=0, repeat=5, interval=0.1)
self.logger.info(ret)
ret = self.vapi.arping(address=remote_ip4,
sw_if_index=self.pg1.sw_if_index,
is_garp=0)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_request(p, self.pg1.local_ip4,
self.pg1.remote_ip4)
finally:
self.vapi.cli("show error")
def test_arping_ip4_garp_api(self):
""" arping ip4 gratuitous arp API test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
ret = self.vapi.arping(address=self.pg1.local_ip4,
sw_if_index=self.pg1.sw_if_index,
is_garp=1, repeat=5, interval=0.1)
self.logger.info(ret)
ret = self.vapi.arping(address=self.pg1.local_ip4,
sw_if_index=self.pg1.sw_if_index,
is_garp=1)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_request(p, self.pg1.local_ip4,
self.pg1.local_ip4)
finally:
self.vapi.cli("show error")
def test_arping_ip6_ns_cli(self):
""" arping IP6 neighbor solicitation CLI test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
remote_ip6 = self.pg1.remote_ip6
ping_cmd = "arping " + remote_ip6 + "pg1 repeat 5 interval 0.1"
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
ping_cmd = "arping " + remote_ip6 + "pg1"
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_ip6_ns(p, self.pg1.local_ip6,
self.pg1.remote_ip6)
finally:
self.vapi.cli("show error")
def test_arping_ip6_ns_api(self):
""" arping ip6 neighbor solicitation API test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
remote_ip6 = self.pg1.remote_ip6
ret = self.vapi.arping(address=remote_ip6,
sw_if_index=self.pg1.sw_if_index,
is_garp=0, repeat=5, interval=0.1)
self.logger.info(ret)
ret = self.vapi.arping(address=remote_ip6,
sw_if_index=self.pg1.sw_if_index,
is_garp=0)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_ip6_ns(p, self.pg1.local_ip6,
self.pg1.remote_ip6)
finally:
self.vapi.cli("show error")
def test_arping_ip6_na_cli(self):
""" arping ip6 neighbor advertisement CLI test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
ping_cmd = ("arping gratuitous" + self.pg1.local_ip6 +
"pg1 repeat 5 interval 0.1")
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
ping_cmd = "arping gratuitous" + self.pg1.local_ip6 + "pg1"
ret = self.vapi.cli(ping_cmd)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_ip6_na(p, self.pg1.local_ip6,
self.pg1.local_ip6)
finally:
self.vapi.cli("show error")
def test_arping_ip6_na_api(self):
""" arping ip6 neighbor advertisement API test """
try:
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
ret = self.vapi.arping(address=self.pg1.local_ip6,
sw_if_index=self.pg1.sw_if_index,
is_garp=1, repeat=5, interval=0.1)
self.logger.info(ret)
ret = self.vapi.arping(address=self.pg1.local_ip6,
sw_if_index=self.pg1.sw_if_index,
is_garp=1)
self.logger.info(ret)
out = self.pg1.get_capture(6)
for p in out:
self.verify_arping_ip6_na(p, self.pg1.local_ip6,
self.pg1.local_ip6)
finally:
self.vapi.cli("show error")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| apache-2.0 |
OaklandPeters/dist-map-reduce | endgame/indexes/shared.py | 1 | 2001 | from __future__ import absolute_import
import os
import json
import collections
import itertools
__all__ = [
'dirpath_to_confpath',
'confpath_to_dirpath',
'directory_to_config'
]
def dirpath_to_confpath(dirpath):
if dirpath[-1] == os.sep:
confpath = dirpath[:-1] + ".json"
else:
confpath = dirpath + ".json"
return confpath
def confpath_to_dirpath(confpath):
cname, _ = os.path.splitext(confpath)
return cname
def pathsequence(fullpath):
return fullpath.split(os.sep)
def directory_to_config(dirpath, **keywords):
"""Create a configuration file for dirpath, and return it's filepath.
Place the configuration file on level with the directory. IE:
parent/
{dirpath}/
{dirpath}.json
"""
if 'data' in keywords:
raise TypeError("Invalid keyword: 'data'")
if not os.path.isdir(dirpath):
raise ValueError("{0} is not an existing directory.".format(dirpath))
# Write config_path: remove trailing seperator
confpath = dirpath_to_confpath(dirpath)
#Get all csv files
record_files = [
pathsequence(os.path.join(dirpath, filepath))
for filepath in os.listdir(dirpath)
if filepath.endswith('.csv')
]
# Write JSON config file
keywords['data'] = record_files
with open(confpath, 'w') as config_file:
#json.dump({'data': record_files}, config_file)
json.dump(keywords, config_file)
return confpath
def is_nonstringsequence(value):
return isinstance(value, collections.Sequence) and not isinstance(value, basestring)
def flatten(seq_of_seq):
"Flatten one level of nesting"
return itertools.chain.from_iterable(seq_of_seq)
def query_to_url(query):
"""Convert a query object to a URL query term."""
template = "find/{ips}/{start}/{end}/"
return template.format(
ips = str(query.ips).replace('\'', ''),
start = query.timerange.start,
end = query.timerange.end
) | mit |
shapiromatron/tblBuilder | src/private/scripts/reports/epi/ResultTable.py | 1 | 4586 | from textwrap import dedent
from docxUtils.reports import DOCXReport
from docxUtils.tables import TableMaker
class EpiResultTables(DOCXReport):
COLUMN_WIDTHS = [1.0, 1.7, 0.75, 0.75, 1.0, 1.2, 2.6]
def _build_result(self, res):
rows = 0
tbl = TableMaker(
self.COLUMN_WIDTHS, numHeaders=0, firstRowCaption=False, tblStyle="ntpTbl"
)
rowspan = len(res["riskEstimates"])
# Column A
txt = dedent(
f"""\
{res["descriptive"]["reference"]["name"]}
{res["descriptive"]["studyDesign"]}
{res["descriptive"]["location"]}
{res["descriptive"]["enrollmentDates"]}"""
)
tbl.new_td_txt(rows, 0, txt, rowspan=rowspan)
# Column B
if res["descriptive"]["isCaseControl"]:
popD = tbl.new_run(
dedent(
f"""\
{res["descriptive"].get("eligibilityCriteria", "")}
Cases: {res["descriptive"].get("populationSizeCase", "")}
Controls: {res["descriptive"].get("populationSizeControl", "")}"""
)
)
else:
popD = tbl.new_run(
dedent(
f"""\
{res["descriptive"].get("eligibilityCriteria", "")}
{res["descriptive"].get("populationSize", "")}"""
)
)
runs = [
popD,
tbl.new_run("Exposure assessment method: ", b=True, newline=False),
tbl.new_run(res["descriptive"]["exposureAssessmentType"], newline=False),
]
if res.get("organSite"):
runs.insert(0, tbl.new_run(res["organSite"], b=True))
tbl.new_td_run(rows, 1, runs, rowspan=rowspan)
# Columns C,D,E
for i, est in enumerate(res["riskEstimates"]):
tbl.new_td_txt(rows + i, 2, est["exposureCategory"])
tbl.new_td_txt(rows + i, 3, est["numberExposed"])
tbl.new_td_txt(rows + i, 4, est["riskFormatted"])
# Column F
txt = res["wrd_covariatesList"]
runs = [tbl.new_run(res["wrd_covariatesList"])]
if res["hasTrendTest"]:
runs.extend(
[
tbl.new_run("Trend-test ", newline=False),
tbl.new_run("P", i=True, newline=False),
tbl.new_run(f'-value: {res["trendTest"]}', newline=False),
]
)
tbl.new_td_run(rows, 5, runs, rowspan=rowspan)
# Column G
runs = [
tbl.new_run(res["descriptive"]["wrd_notes"]),
tbl.new_run("Strengths:", b=True),
tbl.new_run(res["descriptive"]["strengths"]),
tbl.new_run("Limitations:", b=True),
tbl.new_run(res["descriptive"]["limitations"], newline=False),
]
tbl.new_td_run(rows, 6, runs, rowspan=rowspan)
return tbl
def build_res_tbl(self, caption, results):
tbl = TableMaker(self.COLUMN_WIDTHS, numHeaders=2, tblStyle="ntpTbl")
# write title
tbl.new_th(0, 0, caption, colspan=7)
# write header
tbl.new_th(1, 0, "Reference, study-design, location, and year")
tbl.new_th(1, 1, "Population description & exposure assessment method")
tbl.new_th(1, 2, "Exposure category or level")
tbl.new_th(1, 3, "Exposed cases/deaths")
tbl.new_th(1, 4, "Risk estimate\n(95% CI)")
tbl.new_th(1, 5, "Co-variates controlled")
tbl.new_th(1, 6, "Comments, strengths, and weaknesses")
docx_tbl = tbl.render(self.doc)
# write additional rows
for res in results:
inner_tbl = self._build_result(res)
docx_tbl_inner = inner_tbl.render(self.doc)
docx_tbl._cells.extend(docx_tbl_inner._cells)
def create_content(self):
doc = self.doc
d = self.context
self.setLandscape()
# title
txt = (
f'{d["tables"][0]["volumeNumber"]} {d["tables"][0]["monographAgent"]}:'
" Results by organ-site"
)
p = doc.paragraphs[0]
p.text = txt
p.style = "Title"
doc.add_paragraph(d["tables"][0]["name"])
# build table for each organ-site
for organSite in sorted(d["organSites"], key=lambda v: v["organSite"]):
txt = f'Table X: {organSite["organSite"]}'
self.build_res_tbl(txt, organSite["results"])
self.doc.add_page_break()
def get_template_fn(self):
return "base.docx"
| mit |
pgmillon/ansible | lib/ansible/modules/cloud/google/gcp_compute_region_disk.py | 2 | 22530 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_region_disk
description:
- Persistent disks are durable storage devices that function similarly to the physical
disks in a desktop or a server. Compute Engine manages the hardware behind these
devices to ensure data redundancy and optimize performance for you. Persistent disks
are available as either standard hard disk drives (HDD) or solid-state drives (SSD).
- Persistent disks are located independently from your virtual machine instances,
so you can detach or move persistent disks to keep your data even after you delete
your instances. Persistent disk performance scales automatically with size, so you
can resize your existing persistent disks or add more persistent disks to an instance
to meet your performance and storage space requirements.
- Add a persistent disk to your instance when you need reliable and affordable storage
with consistent performance characteristics.
short_description: Creates a GCP RegionDisk
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
labels:
description:
- Labels to apply to this disk. A list of key->value pairs.
required: false
licenses:
description:
- Any applicable publicly visible licenses.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
size_gb:
description:
- Size of the persistent disk, specified in GB. You can specify this field when
creating a persistent disk using the sourceImage or sourceSnapshot parameter,
or specify it alone to create an empty persistent disk.
- If you specify this field along with sourceImage or sourceSnapshot, the value
of sizeGb must not be less than the size of the sourceImage or the size of the
snapshot.
required: false
physical_block_size_bytes:
description:
- Physical block size of the persistent disk, in bytes. If not present in a request,
a default value is used. Currently supported sizes are 4096 and 16384, other
sizes may be added in the future.
- If an unsupported value is requested, the error message will list the supported
values for the caller's project.
required: false
replica_zones:
description:
- URLs of the zones where the disk should be replicated to.
required: true
type:
description:
- URL of the disk type resource describing which disk type to use to create the
disk. Provide this when creating the disk.
required: false
region:
description:
- A reference to the region where the disk resides.
required: true
disk_encryption_key:
description:
- Encrypts the disk using a customer-supplied encryption key.
- After you encrypt a disk with a customer-supplied key, you must provide the
same key if you use the disk later (e.g. to create a disk snapshot or an image,
or to attach the disk to a virtual machine).
- Customer-supplied encryption keys do not protect access to metadata of the disk.
- If you do not provide an encryption key when creating the disk, then the disk
will be encrypted using an automatically generated key and you do not need to
provide a key to use the disk later.
required: false
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
source_snapshot:
description:
- The source snapshot used to create this disk. You can provide this as a partial
or full URL to the resource.
- 'This field represents a link to a Snapshot resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_snapshot task and then set this source_snapshot field to "{{
name-of-resource }}"'
required: false
source_snapshot_encryption_key:
description:
- The customer-supplied encryption key of the source snapshot. Required if the
source snapshot is protected by a customer-supplied encryption key.
required: false
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/beta/regionDisks)'
- 'Adding or Resizing Regional Persistent Disks: U(https://cloud.google.com/compute/docs/disks/regional-persistent-disk)'
'''
EXAMPLES = '''
- name: create a region disk
gcp_compute_region_disk:
name: test_object
size_gb: 50
disk_encryption_key:
raw_key: SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=
region: us-central1
replica_zones:
- https://www.googleapis.com/compute/v1/projects/google.com:graphite-playground/zones/us-central1-a
- https://www.googleapis.com/compute/v1/projects/google.com:graphite-playground/zones/us-central1-b
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
lastAttachTimestamp:
description:
- Last attach timestamp in RFC3339 text format.
returned: success
type: str
lastDetachTimestamp:
description:
- Last dettach timestamp in RFC3339 text format.
returned: success
type: str
labels:
description:
- Labels to apply to this disk. A list of key->value pairs.
returned: success
type: dict
licenses:
description:
- Any applicable publicly visible licenses.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sizeGb:
description:
- Size of the persistent disk, specified in GB. You can specify this field when
creating a persistent disk using the sourceImage or sourceSnapshot parameter,
or specify it alone to create an empty persistent disk.
- If you specify this field along with sourceImage or sourceSnapshot, the value
of sizeGb must not be less than the size of the sourceImage or the size of the
snapshot.
returned: success
type: int
users:
description:
- 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
.'
returned: success
type: list
physicalBlockSizeBytes:
description:
- Physical block size of the persistent disk, in bytes. If not present in a request,
a default value is used. Currently supported sizes are 4096 and 16384, other sizes
may be added in the future.
- If an unsupported value is requested, the error message will list the supported
values for the caller's project.
returned: success
type: int
replicaZones:
description:
- URLs of the zones where the disk should be replicated to.
returned: success
type: list
type:
description:
- URL of the disk type resource describing which disk type to use to create the
disk. Provide this when creating the disk.
returned: success
type: str
region:
description:
- A reference to the region where the disk resides.
returned: success
type: str
diskEncryptionKey:
description:
- Encrypts the disk using a customer-supplied encryption key.
- After you encrypt a disk with a customer-supplied key, you must provide the same
key if you use the disk later (e.g. to create a disk snapshot or an image, or
to attach the disk to a virtual machine).
- Customer-supplied encryption keys do not protect access to metadata of the disk.
- If you do not provide an encryption key when creating the disk, then the disk
will be encrypted using an automatically generated key and you do not need to
provide a key to use the disk later.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceSnapshot:
description:
- The source snapshot used to create this disk. You can provide this as a partial
or full URL to the resource.
returned: success
type: dict
sourceSnapshotEncryptionKey:
description:
- The customer-supplied encryption key of the source snapshot. Required if the source
snapshot is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceSnapshotId:
description:
- The unique ID of the snapshot used to create this disk. This value identifies
the exact snapshot that was used to create this persistent disk. For example,
if you created the persistent disk from a snapshot that was later deleted and
recreated under the same name, the source snapshot ID would identify the exact
version of the snapshot that was used.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
labels=dict(type='dict'),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
size_gb=dict(type='int'),
physical_block_size_bytes=dict(type='int'),
replica_zones=dict(required=True, type='list', elements='str'),
type=dict(type='str'),
region=dict(required=True, type='str'),
disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
source_snapshot=dict(type='dict'),
source_snapshot_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#disk'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('labels') != request.get('labels'):
label_fingerprint_update(module, request, response)
if response.get('sizeGb') != request.get('sizeGb'):
size_gb_update(module, request, response)
def label_fingerprint_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/disks/{name}/setLabels"]).format(**module.params),
{u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')},
)
def size_gb_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/disks/{name}/resize"]).format(**module.params),
{u'sizeGb': module.params.get('size_gb')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#disk',
u'diskEncryptionKey': RegionDiskDiskencryptionkey(module.params.get('disk_encryption_key', {}), module).to_request(),
u'sourceSnapshotEncryptionKey': RegionDiskSourcesnapshotencryptionkey(module.params.get('source_snapshot_encryption_key', {}), module).to_request(),
u'description': module.params.get('description'),
u'labels': module.params.get('labels'),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'sizeGb': module.params.get('size_gb'),
u'physicalBlockSizeBytes': module.params.get('physical_block_size_bytes'),
u'replicaZones': module.params.get('replica_zones'),
u'type': region_disk_type_selflink(module.params.get('type'), module.params),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'labelFingerprint': response.get(u'labelFingerprint'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'id': response.get(u'id'),
u'lastAttachTimestamp': response.get(u'lastAttachTimestamp'),
u'lastDetachTimestamp': response.get(u'lastDetachTimestamp'),
u'labels': response.get(u'labels'),
u'licenses': response.get(u'licenses'),
u'name': module.params.get('name'),
u'sizeGb': response.get(u'sizeGb'),
u'users': response.get(u'users'),
u'physicalBlockSizeBytes': response.get(u'physicalBlockSizeBytes'),
u'replicaZones': response.get(u'replicaZones'),
u'type': response.get(u'type'),
}
def zone_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/%s".format(**params) % name
return name
def region_disk_type_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/.*/diskTypes/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/diskTypes/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#disk')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class RegionDiskDiskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
class RegionDiskSourcesnapshotencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
if __name__ == '__main__':
main()
| gpl-3.0 |
sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/tokenize/toktok.py | 7 | 8031 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Python port of the tok-tok.pl tokenizer.
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Jon Dehdari
# Contributors: Liling Tan, Selcuk Ayguney, ikegami, Martijn Pieters
#
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
"""
The tok-tok tokenizer is a simple, general tokenizer, where the input has one
sentence per line; thus only final period is tokenized.
Tok-tok has been tested on, and gives reasonably good results for English,
Persian, Russian, Czech, French, German, Vietnamese, Tajik, and a few others.
The input should be in UTF-8 encoding.
Reference:
Jon Dehdari. 2014. A Neurophysiologically-Inspired Statistical Language
Model (Doctoral dissertation). Columbus, OH, USA: The Ohio State University.
"""
import re
from six import text_type
from nltk.tokenize.api import TokenizerI
class ToktokTokenizer(TokenizerI):
"""
This is a Python port of the tok-tok.pl from
https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl
>>> toktok = ToktokTokenizer()
>>> text = u'Is 9.5 or 525,600 my favorite number?'
>>> print (toktok.tokenize(text, return_str=True))
Is 9.5 or 525,600 my favorite number ?
>>> text = u'The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things'
>>> print (toktok.tokenize(text, return_str=True))
The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things
>>> text = u'\xa1This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf'
>>> expected = u'\xa1 This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf'
>>> assert toktok.tokenize(text, return_str=True) == expected
>>> toktok.tokenize(text) == [u'\xa1', u'This', u',', u'is', u'a', u'sentence', u'with', u'weird', u'\xbb', u'symbols', u'\u2026', u'appearing', u'everywhere', u'\xbf']
True
"""
# Replace non-breaking spaces with normal spaces.
NON_BREAKING = re.compile(u"\u00A0"), " "
# Pad some funky punctuation.
FUNKY_PUNCT_1 = re.compile(u'([،;؛¿!"\])}»›”؟¡%٪°±©®।॥…])'), r" \1 "
# Pad more funky punctuation.
FUNKY_PUNCT_2 = re.compile(u'([({\[“‘„‚«‹「『])'), r" \1 "
# Pad En dash and em dash
EN_EM_DASHES = re.compile(u'([–—])'), r" \1 "
# Replace problematic character with numeric character reference.
AMPERCENT = re.compile('& '), '& '
TAB = re.compile('\t'), ' 	 '
PIPE = re.compile('\|'), ' | '
# Pad numbers with commas to keep them from further tokenization.
COMMA_IN_NUM = re.compile(r'(?<!,)([,،])(?![,\d])'), r' \1 '
# Just pad problematic (often neurotic) hyphen/single quote, etc.
PROB_SINGLE_QUOTES = re.compile(r"(['’`])"), r' \1 '
# Group ` ` stupid quotes ' ' into a single token.
STUPID_QUOTES_1 = re.compile(r" ` ` "), r" `` "
STUPID_QUOTES_2 = re.compile(r" ' ' "), r" '' "
# Don't tokenize period unless it ends the line and that it isn't
# preceded by another period, e.g.
# "something ..." -> "something ..."
# "something." -> "something ."
FINAL_PERIOD_1 = re.compile(r"(?<!\.)\.$"), r" ."
# Don't tokenize period unless it ends the line eg.
# " ... stuff." -> "... stuff ."
FINAL_PERIOD_2 = re.compile(r"""(?<!\.)\.\s*(["'’»›”]) *$"""), r" . \1"
# Treat continuous commas as fake German,Czech, etc.: „
MULTI_COMMAS = re.compile(r'(,{2,})'), r' \1 '
# Treat continuous dashes as fake en-dash, etc.
MULTI_DASHES = re.compile(r'(-{2,})'), r' \1 '
# Treat multiple periods as a thing (eg. ellipsis)
MULTI_DOTS = re.compile(r'(\.{2,})'), r' \1 '
# This is the \p{Open_Punctuation} from Perl's perluniprops
# see http://perldoc.perl.org/perluniprops.html
OPEN_PUNCT = text_type(u'([{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d'
u'\u208d\u2329\u2768\u276a\u276c\u276e\u2770\u2772'
u'\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983'
u'\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993'
u'\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26'
u'\u2e28\u3008\u300a\u300c\u300e\u3010\u3014\u3016'
u'\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39'
u'\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b'
u'\ufe5d\uff08\uff3b\uff5b\uff5f\uff62')
# This is the \p{Close_Punctuation} from Perl's perluniprops
CLOSE_PUNCT = text_type(u')]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a'
u'\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6'
u'\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988'
u'\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998'
u'\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009'
u'\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b'
u'\u301e\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c'
u'\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e'
u'\uff09\uff3d\uff5d\uff60\uff63')
# This is the \p{Close_Punctuation} from Perl's perluniprops
CURRENCY_SYM = text_type(u'$\xa2\xa3\xa4\xa5\u058f\u060b\u09f2\u09f3\u09fb'
u'\u0af1\u0bf9\u0e3f\u17db\u20a0\u20a1\u20a2\u20a3'
u'\u20a4\u20a5\u20a6\u20a7\u20a8\u20a9\u20aa\u20ab'
u'\u20ac\u20ad\u20ae\u20af\u20b0\u20b1\u20b2\u20b3'
u'\u20b4\u20b5\u20b6\u20b7\u20b8\u20b9\u20ba\ua838'
u'\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6')
# Pad spaces after opening punctuations.
OPEN_PUNCT_RE = re.compile(u'([{}])'.format(OPEN_PUNCT)), r'\1 '
# Pad spaces before closing punctuations.
CLOSE_PUNCT_RE = re.compile(u'([{}])'.format(CLOSE_PUNCT)), r'\1 '
# Pad spaces after currency symbols.
CURRENCY_SYM_RE = re.compile(u'([{}])'.format(CURRENCY_SYM)), r'\1 '
# Use for tokenizing URL-unfriendly characters: [:/?#]
URL_FOE_1 = re.compile(r':(?!//)'), r' : ' # in perl s{:(?!//)}{ : }g;
URL_FOE_2 = re.compile(r'\?(?!\S)'), r' ? ' # in perl s{\?(?!\S)}{ ? }g;
# in perl: m{://} or m{\S+\.\S+/\S+} or s{/}{ / }g;
URL_FOE_3 = re.compile(r'(:\/\/)[\S+\.\S+\/\S+][\/]'), ' / '
URL_FOE_4 = re.compile(r' /'), r' / ' # s{ /}{ / }g;
# Left/Right strip, i.e. remove heading/trailing spaces.
# These strip regexes should NOT be used,
# instead use str.lstrip(), str.rstrip() or str.strip()
# (They are kept for reference purposes to the original toktok.pl code)
LSTRIP = re.compile(r'^ +'), ''
RSTRIP = re.compile(r'\s+$'),'\n'
# Merge multiple spaces.
ONE_SPACE = re.compile(r' {2,}'), ' '
TOKTOK_REGEXES = [NON_BREAKING, FUNKY_PUNCT_1,
URL_FOE_1, URL_FOE_2, URL_FOE_3, URL_FOE_4,
AMPERCENT, TAB, PIPE,
OPEN_PUNCT_RE, CLOSE_PUNCT_RE,
MULTI_COMMAS, COMMA_IN_NUM, FINAL_PERIOD_2,
PROB_SINGLE_QUOTES, STUPID_QUOTES_1, STUPID_QUOTES_2,
CURRENCY_SYM_RE, EN_EM_DASHES, MULTI_DASHES, MULTI_DOTS,
FINAL_PERIOD_1, FINAL_PERIOD_2, ONE_SPACE]
def tokenize(self, text, return_str=False):
text = text_type(text) # Converts input string into unicode.
for regexp, subsitution in self.TOKTOK_REGEXES:
text = regexp.sub(subsitution, text)
# Finally, strips heading and trailing spaces
# and converts output string into unicode.
text = text_type(text.strip())
return text if return_str else text.split() | mit |
olemis/brython | www/src/Lib/test/test_importlib/extension/test_finder.py | 26 | 1223 | from importlib import machinery
from .. import abc
from . import util
import unittest
class FinderTests(abc.FinderTests):
"""Test the finder for extension modules."""
def find_module(self, fullname):
importer = machinery.FileFinder(util.PATH,
(machinery.ExtensionFileLoader,
machinery.EXTENSION_SUFFIXES))
return importer.find_module(fullname)
def test_module(self):
self.assertTrue(self.find_module(util.NAME))
def test_package(self):
# No extension module as an __init__ available for testing.
pass
def test_module_in_package(self):
# No extension module in a package available for testing.
pass
def test_package_in_package(self):
# No extension module as an __init__ available for testing.
pass
def test_package_over_module(self):
# Extension modules cannot be an __init__ for a package.
pass
def test_failure(self):
self.assertIsNone(self.find_module('asdfjkl;'))
def test_main():
from test.support import run_unittest
run_unittest(FinderTests)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
Limags/MissionPlanner | Lib/site-packages/scipy/linalg/basic.py | 53 | 17691 | #
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'inv', 'det', 'lstsq', 'pinv', 'pinv2']
from numpy import asarray, zeros, sum, conjugate, dot, transpose, \
asarray_chkfinite, single
import numpy
from flinalg import get_flinalg_funcs
from lapack import get_lapack_funcs
from misc import LinAlgError, _datacopied
from scipy.linalg import calc_lwork
from funcinfo import get_func_info
import decomp_svd
# Linear equations
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False,
debug=False):
"""Solve the equation a x = b for x
Parameters
----------
a : array, shape (M, M)
b : array, shape (M,) or (M, N)
sym_pos : boolean
Assume a is symmetric and positive definite
lower : boolean
Use only data contained in the lower triangle of a, if sym_pos is true.
Default is to use upper triangle.
overwrite_a : boolean
Allow overwriting data in a (may enhance performance)
overwrite_b : boolean
Allow overwriting data in b (may enhance performance)
Returns
-------
x : array, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises LinAlgError if a is singular
"""
a1, b1 = map(asarray_chkfinite,(a,b))
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print 'solve:overwrite_a=',overwrite_a
print 'solve:overwrite_b=',overwrite_b
if sym_pos:
posv, = get_lapack_funcs(('posv',), (a1,b1))
c, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
gesv, = get_lapack_funcs(('gesv',), (a1,b1))
lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=False):
"""Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : array, shape (M, M)
b : array, shape (M,) or (M, N)
lower : boolean
Use only data contained in the lower triangle of a.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : boolean
If True, diagonal elements of A are assumed to be 1 and
will not be referenced.
overwrite_b : boolean
Allow overwriting data in b (may enhance performance)
Returns
-------
x : array, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If a is singular
"""
a1, b1 = map(asarray_chkfinite,(a,b))
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print 'solve:overwrite_b=',overwrite_b
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1,b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %s" % (info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs')
def solve_banded((l, u), ab, b, overwrite_ab=False, overwrite_b=False,
debug=False):
"""Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in ab using the matrix diagonal orded form::
ab[u + i - j, j] == a[i,j]
Example of ab (shape of a is (6,6), u=1, l=2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : array, shape (l+u+1, M)
Banded matrix
b : array, shape (M,) or (M, K)
Right-hand side
overwrite_ab : boolean
Discard data in ab (may enhance performance)
overwrite_b : boolean
Discard data in b (may enhance performance)
Returns
-------
x : array, shape (M,) or (M, K)
The solution to the system a x = b
"""
a1, b1 = map(asarray_chkfinite, (ab, b))
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
if l + u + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper diagonals:"
" l+u+1 (%d) does not equal ab.shape[0] (%d)" % (l+u+1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = zeros((2*l+u+1, a1.shape[1]), dtype=get_func_info(gbsv).dtype)
a2[l:,:] = a1
lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False):
"""Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in ab either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of ab (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : array, shape (u + 1, M)
Banded matrix
b : array, shape (M,) or (M, K)
Right-hand side
overwrite_ab : boolean
Discard data in ab (may enhance performance)
overwrite_b : boolean
Discard data in b (may enhance performance)
lower : boolean
Is the matrix in the lower form. (Default is upper form)
Returns
-------
x : array, shape (M,) or (M, K)
The solution to the system a x = b
"""
ab, b = map(asarray_chkfinite, (ab, b))
# Validate shapes.
if ab.shape[-1] != b.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
pbsv, = get_lapack_funcs(('pbsv',), (ab, b))
c, x, info = pbsv(ab, b, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbsv'
% -info)
return x
# matrix inversion
def inv(a, overwrite_a=False):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError :
If `a` is singular.
ValueError :
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> a = np.array([[1., 2.], [3., 4.]])
>>> sp.linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, sp.linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
#XXX: I found no advantage or disadvantage of using finv.
## finv, = get_flinalg_funcs(('inv',),(a1,))
## if finv is not None:
## a_inv,info = finv(a1,overwrite_a=overwrite_a)
## if info==0:
## return a_inv
## if info>0: raise LinAlgError, "singular matrix"
## if info<0: raise ValueError,\
## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info)
getrf, getri = get_lapack_funcs(('getrf','getri'), (a1,))
getrf_info = get_func_info(getrf)
getri_info = get_func_info(getri)
#XXX: C ATLAS versions of getrf/i have rowmajor=1, this could be
# exploited for further optimization. But it will be probably
# a mess. So, a good testing site is required before trying
# to do that.
if (getrf_info.module_name[:7] == 'clapack' !=
getri_info.module_name[:7]):
# ATLAS 3.2.1 has getrf but not getri.
lu, piv, info = getrf(transpose(a1), rowmajor=0,
overwrite_a=overwrite_a)
lu = transpose(lu)
else:
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
if getri_info.module_name[:7] == 'flapack':
lwork = calc_lwork.getri(getri_info.prefix, a1.shape[0])
lwork = lwork[1]
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
else: # clapack
inv_a, info = getri(lu, piv, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
### Determinant
def det(a, overwrite_a=False):
"""Compute the determinant of a matrix
Parameters
----------
a : array, shape (M, M)
Returns
-------
det : float or complex
Determinant of a
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
"""
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
### Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : array, shape (M, N)
Left hand side matrix (2-D array).
b : array, shape (M,) or (M, K)
Right hand side matrix or vector (1-D or 2-D array).
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
Returns
-------
x : array, shape (N,) or (N, K) depending on shape of b
Least-squares solution.
residues : ndarray, shape () or (1,) or (K,)
Sums of residues, squared 2-norm for each column in ``b - a x``.
If rank of matrix a is < N or > M this is an empty array.
If b was 1-D, this is an (1,) shape array, otherwise the shape is (K,).
rank : int
Effective rank of matrix `a`.
s : array, shape (min(M,N),)
Singular values of `a`. The condition number of a is
``abs(s[0]/s[-1])``.
Raises
------
LinAlgError :
If computation does not converge.
See Also
--------
optimize.nnls : linear least squares with non-negativity constraint
"""
a1, b1 = map(asarray_chkfinite, (a, b))
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
gelss, = get_lapack_funcs(('gelss',), (a1, b1))
gelss_info = get_func_info(gelss)
if n > m:
# need to extend b matrix as it will be filled with
# a larger solution matrix
b2 = zeros((n, nrhs), dtype=gelss_info.dtype)
if len(b1.shape) == 2:
b2[:m,:] = b1
else:
b2[:m,0] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if gelss_info.module_name[:7] == 'flapack':
lwork = calc_lwork.gelss(gelss_info.prefix, m, n, nrhs)[1]
v, x, s, rank, info = gelss(a1, b1, cond=cond, lwork=lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
raise NotImplementedError('calling gelss from %s' % get_func_info(gelss).module_name)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gelss'
% -info)
resids = asarray([], dtype=x.dtype)
if n < m:
x1 = x[:n]
if rank == n:
resids = sum(abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
def pinv(a, cond=None, rcond=None):
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : array, shape (M, N)
Matrix to be pseudo-inverted
cond, rcond : float
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than rcond*largest_singular_value are
considered zero.
Returns
-------
B : array, shape (N, M)
Raises LinAlgError if computation does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = asarray_chkfinite(a)
b = numpy.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
return lstsq(a, b, cond=cond)[0]
def pinv2(a, cond=None, rcond=None):
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : array, shape (M, N)
Matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than rcond*largest_singular_value are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
B : array, shape (N, M)
Raises LinAlgError if SVD computation does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = asarray_chkfinite(a)
u, s, vh = decomp_svd.svd(a)
t = u.dtype.char
if rcond is not None:
cond = rcond
if cond in [None,-1]:
eps = numpy.finfo(float).eps
feps = numpy.finfo(single).eps
_array_precision = {'f': 0, 'd': 1, 'F': 0, 'D': 1}
cond = {0: feps*1e3, 1: eps*1e6}[_array_precision[t]]
m, n = a.shape
cutoff = cond*numpy.maximum.reduce(s)
psigma = zeros((m, n), t)
for i in range(len(s)):
if s[i] > cutoff:
psigma[i,i] = 1.0/conjugate(s[i])
#XXX: use lapack/blas routines for dot
return transpose(conjugate(dot(dot(u,psigma),vh)))
| gpl-3.0 |
activitycentral/ebookreader | src/overlaywidget.py | 1 | 1061 | from gi.repository import Gtk
from gi.repository import Gdk
from sugar3.graphics.icon import Icon
class OverlayWidget(Gtk.Window):
def __init__(self, widget_to_overlay):
Gtk.Window.__init__(self)
self._box = Gtk.VBox()
self._widget_to_overlay = widget_to_overlay
self.set_decorated(False)
self.set_resizable(False)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.set_border_width(0)
self.props.accept_focus = False
self._box.add(widget_to_overlay)
self._box.show_all()
self.add(self._box)
self._width = 1000
self._height = 400
def _reposition(self):
x_position = (self._screen.get_width())/2 - (self._width / 2)
y_position = (self._screen.get_height())/2 - (self._height / 2)
self.move(x_position, y_position)
def show_overlay_widget(self):
self.setup()
self._reposition()
self.show()
def get_overlaid_widget(self):
return self._box
def setup(self):
pass
| gpl-2.0 |
Maistho/CouchPotatoServer | libs/rsa/pkcs1.py | 110 | 13153 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for PKCS#1 version 1.5 encryption and signing
This module implements certain functionality from PKCS#1 version 1.5. For a
very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
At least 8 bytes of random padding is used when encrypting a message. This makes
these methods much more secure than the ones in the ``rsa`` module.
WARNING: this module leaks information when decryption or verification fails.
The exceptions that are raised contain the Python traceback information, which
can be used to deduce where in the process the failure occurred. DO NOT PASS
SUCH INFORMATION to your users.
'''
import hashlib
import os
from rsa._compat import b
from rsa import common, transform, core, varblock
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
}
HASH_METHODS = {
'MD5': hashlib.md5,
'SHA-1': hashlib.sha1,
'SHA-256': hashlib.sha256,
'SHA-384': hashlib.sha384,
'SHA-512': hashlib.sha512,
}
class CryptoError(Exception):
'''Base class for all exceptions in this module.'''
class DecryptionError(CryptoError):
'''Raised when decryption fails.'''
class VerificationError(CryptoError):
'''Raised when verification fails.'''
def _pad_for_encryption(message, target_length):
r'''Pads the message for encryption, returning the padded message.
:return: 00 02 RANDOM_DATA 00 MESSAGE
>>> block = _pad_for_encryption('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x02'
>>> block[-6:]
'\x00hello'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
# Get random padding
padding = b('')
padding_length = target_length - msglength - 3
# We remove 0-bytes, so we'll end up with less padding than we've asked for,
# so keep adding data until we're at the correct length.
while len(padding) < padding_length:
needed_bytes = padding_length - len(padding)
# Always read at least 8 bytes more than we need, and trim off the rest
# after removing the 0-bytes. This increases the chance of getting
# enough bytes, especially when needed_bytes is small
new_padding = os.urandom(needed_bytes + 5)
new_padding = new_padding.replace(b('\x00'), b(''))
padding = padding + new_padding[:needed_bytes]
assert len(padding) == padding_length
return b('').join([b('\x00\x02'),
padding,
b('\x00'),
message])
def _pad_for_signing(message, target_length):
r'''Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x01'
>>> block[-6:]
'\x00hello'
>>> block[2:-6]
'\xff\xff\xff\xff\xff\xff\xff\xff'
'''
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b('').join([b('\x00\x01'),
padding_length * b('\xff'),
b('\x00'),
message])
def encrypt(message, pub_key):
'''Encrypts the given message using PKCS#1 v1.5
:param message: the message to encrypt. Must be a byte string no longer than
``k-11`` bytes, where ``k`` is the number of bytes needed to encode
the ``n`` component of the public key.
:param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
:raise OverflowError: when the message is too large to fit in the padded
block.
>>> from rsa import key, common
>>> (pub_key, priv_key) = key.newkeys(256)
>>> message = 'hello'
>>> crypto = encrypt(message, pub_key)
The crypto text should be just as long as the public key 'n' component:
>>> len(crypto) == common.byte_size(pub_key.n)
True
'''
keylength = common.byte_size(pub_key.n)
padded = _pad_for_encryption(message, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def decrypt(crypto, priv_key):
r'''Decrypts the given message using PKCS#1 v1.5
The decryption is considered 'failed' when the resulting cleartext doesn't
start with the bytes 00 02, or when the 00 byte between the padding and
the message cannot be found.
:param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
:param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
:raise DecryptionError: when the decryption fails. No details are given as
to why the code thinks the decryption fails, as this would leak
information about the private key.
>>> import rsa
>>> (pub_key, priv_key) = rsa.newkeys(256)
It works with strings:
>>> crypto = encrypt('hello', pub_key)
>>> decrypt(crypto, priv_key)
'hello'
And with binary data:
>>> crypto = encrypt('\x00\x00\x00\x00\x01', pub_key)
>>> decrypt(crypto, priv_key)
'\x00\x00\x00\x00\x01'
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
code the exception occurred, and thus leaks information about the key.
It's only a tiny bit of information, but every bit makes cracking the
keys easier.
>>> crypto = encrypt('hello', pub_key)
>>> crypto = crypto[0:5] + 'X' + crypto[6:] # change a byte
>>> decrypt(crypto, priv_key)
Traceback (most recent call last):
...
DecryptionError: Decryption failed
'''
blocksize = common.byte_size(priv_key.n)
encrypted = transform.bytes2int(crypto)
decrypted = core.decrypt_int(encrypted, priv_key.d, priv_key.n)
cleartext = transform.int2bytes(decrypted, blocksize)
# If we can't find the cleartext marker, decryption failed.
if cleartext[0:2] != b('\x00\x02'):
raise DecryptionError('Decryption failed')
# Find the 00 separator between the padding and the message
try:
sep_idx = cleartext.index(b('\x00'), 2)
except ValueError:
raise DecryptionError('Decryption failed')
return cleartext[sep_idx+1:]
def sign(message, priv_key, hash):
'''Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
'''
# Get the ASN1 code for this hash method
if hash not in HASH_ASN1:
raise ValueError('Invalid hash method: %s' % hash)
asn1code = HASH_ASN1[hash]
# Calculate the hash
hash = _hash(message, hash)
# Encrypt the hash with the private key
cleartext = asn1code + hash
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def verify(message, signature, pub_key):
'''Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
the code the exception occurred, and thus leaks information about the
key. It's only a tiny bit of information, but every bit makes cracking
the keys easier.
'''
blocksize = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, blocksize)
# If we can't find the signature marker, verification failed.
if clearsig[0:2] != b('\x00\x01'):
raise VerificationError('Verification failed')
# Find the 00 separator between the padding and the payload
try:
sep_idx = clearsig.index(b('\x00'), 2)
except ValueError:
raise VerificationError('Verification failed')
# Get the hash and the hash method
(method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
message_hash = _hash(message, method_name)
# Compare the real hash to the hash in the signature
if message_hash != signature_hash:
raise VerificationError('Verification failed')
def _hash(message, method_name):
'''Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
'''
if method_name not in HASH_METHODS:
raise ValueError('Invalid hash method: %s' % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if hasattr(message, 'read') and hasattr(message.read, '__call__'):
# read as 1K blocks
for block in varblock.yield_fixedblocks(message, 1024):
hasher.update(block)
else:
# hash the message object itself.
hasher.update(message)
return hasher.digest()
def _find_method_hash(method_hash):
'''Finds the hash method and the hash itself.
:param method_hash: ASN1 code for the hash method concatenated with the
hash itself.
:return: tuple (method, hash) where ``method`` is the used hash method, and
``hash`` is the hash itself.
:raise VerificationFailed: when the hash method cannot be found
'''
for (hashname, asn1code) in HASH_ASN1.items():
if not method_hash.startswith(asn1code):
continue
return (hashname, method_hash[len(asn1code):])
raise VerificationError('Verification failed')
__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
'DecryptionError', 'VerificationError', 'CryptoError']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count and count % 100 == 0:
print('%i times' % count)
print('Doctests done')
| gpl-3.0 |
bartvm/pylearn2 | pylearn2/sandbox/rnn/costs/tests/test_gradient_clipping.py | 44 | 1513 | """
Unit tests for the gradient clipping cost
"""
import unittest
import numpy as np
from theano import function
from pylearn2.costs.mlp import Default
from pylearn2.models.mlp import MLP, Linear
from pylearn2.sandbox.rnn.costs.gradient_clipping import GradientClipping
class TestGradientClipping(unittest.TestCase):
"""
Test cases for the gradient clipping cost
Parameters
----------
None
"""
def test_gradient_clipping(self):
"""
Create a known gradient and check whether it is being clipped
correctly
"""
mlp = MLP(layers=[Linear(dim=1, irange=0, layer_name='linear')],
nvis=1)
W, b = mlp.layers[0].get_params()
W.set_value([[10]])
X = mlp.get_input_space().make_theano_batch()
y = mlp.get_output_space().make_theano_batch()
cost = Default()
gradients, _ = cost.get_gradients(mlp, (X, y))
clipped_cost = GradientClipping(20, Default())
clipped_gradients, _ = clipped_cost.get_gradients(mlp, (X, y))
# The MLP defines f(x) = (x W)^2, with df/dW = 2 W x^2
f = function([X, y], [gradients[W].sum(), clipped_gradients[W].sum()],
allow_input_downcast=True)
# df/dW = df/db = 20 for W = 10, x = 1, so the norm is 20 * sqrt(2)
# and the gradients should be clipped to 20 / sqrt(2)
np.testing.assert_allclose(f([[1]], [[0]]), [20, 20 / np.sqrt(2)])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Mac/Modules/cf/cfsupport.py | 35 | 26266 | # This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
#error missing SetActionFilter
import string
# Declarations that change for each manager
MODNAME = '_CF' # The name of the module
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'CF' # The prefix for module-wide routines
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Special case generator for the functions that have an AllocatorRef first argument,
# which we skip anyway, and the object as the second arg.
class MethodSkipArg1(MethodGenerator):
"""Similar to MethodGenerator, but has self as last argument"""
def parseArgumentList(self, args):
if len(args) < 2:
raise ValueError, "MethodSkipArg1 expects at least 2 args"
a0, a1, args = args[0], args[1], args[2:]
t0, n0, m0 = a0
if t0 != "CFAllocatorRef" and m0 != InMode:
raise ValueError, "MethodSkipArg1 should have dummy AllocatorRef first arg"
t1, n1, m1 = a1
if m1 != InMode:
raise ValueError, "method's 'self' must be 'InMode'"
dummy = Variable(t0, n0, m0)
self.argumentList.append(dummy)
self.itself = Variable(t1, "_self->ob_itself", SelfMode)
self.argumentList.append(self.itself)
FunctionGenerator.parseArgumentList(self, args)
# Create the type objects
includestuff = includestuff + """
#include <CoreServices/CoreServices.h>
#include "pycfbridge.h"
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern PyObject *_CFObj_New(CFTypeRef);
extern int _CFObj_Convert(PyObject *, CFTypeRef *);
#define CFObj_New _CFObj_New
#define CFObj_Convert _CFObj_Convert
extern PyObject *_CFTypeRefObj_New(CFTypeRef);
extern int _CFTypeRefObj_Convert(PyObject *, CFTypeRef *);
#define CFTypeRefObj_New _CFTypeRefObj_New
#define CFTypeRefObj_Convert _CFTypeRefObj_Convert
extern PyObject *_CFStringRefObj_New(CFStringRef);
extern int _CFStringRefObj_Convert(PyObject *, CFStringRef *);
#define CFStringRefObj_New _CFStringRefObj_New
#define CFStringRefObj_Convert _CFStringRefObj_Convert
extern PyObject *_CFMutableStringRefObj_New(CFMutableStringRef);
extern int _CFMutableStringRefObj_Convert(PyObject *, CFMutableStringRef *);
#define CFMutableStringRefObj_New _CFMutableStringRefObj_New
#define CFMutableStringRefObj_Convert _CFMutableStringRefObj_Convert
extern PyObject *_CFArrayRefObj_New(CFArrayRef);
extern int _CFArrayRefObj_Convert(PyObject *, CFArrayRef *);
#define CFArrayRefObj_New _CFArrayRefObj_New
#define CFArrayRefObj_Convert _CFArrayRefObj_Convert
extern PyObject *_CFMutableArrayRefObj_New(CFMutableArrayRef);
extern int _CFMutableArrayRefObj_Convert(PyObject *, CFMutableArrayRef *);
#define CFMutableArrayRefObj_New _CFMutableArrayRefObj_New
#define CFMutableArrayRefObj_Convert _CFMutableArrayRefObj_Convert
extern PyObject *_CFDataRefObj_New(CFDataRef);
extern int _CFDataRefObj_Convert(PyObject *, CFDataRef *);
#define CFDataRefObj_New _CFDataRefObj_New
#define CFDataRefObj_Convert _CFDataRefObj_Convert
extern PyObject *_CFMutableDataRefObj_New(CFMutableDataRef);
extern int _CFMutableDataRefObj_Convert(PyObject *, CFMutableDataRef *);
#define CFMutableDataRefObj_New _CFMutableDataRefObj_New
#define CFMutableDataRefObj_Convert _CFMutableDataRefObj_Convert
extern PyObject *_CFDictionaryRefObj_New(CFDictionaryRef);
extern int _CFDictionaryRefObj_Convert(PyObject *, CFDictionaryRef *);
#define CFDictionaryRefObj_New _CFDictionaryRefObj_New
#define CFDictionaryRefObj_Convert _CFDictionaryRefObj_Convert
extern PyObject *_CFMutableDictionaryRefObj_New(CFMutableDictionaryRef);
extern int _CFMutableDictionaryRefObj_Convert(PyObject *, CFMutableDictionaryRef *);
#define CFMutableDictionaryRefObj_New _CFMutableDictionaryRefObj_New
#define CFMutableDictionaryRefObj_Convert _CFMutableDictionaryRefObj_Convert
extern PyObject *_CFURLRefObj_New(CFURLRef);
extern int _CFURLRefObj_Convert(PyObject *, CFURLRef *);
extern int _OptionalCFURLRefObj_Convert(PyObject *, CFURLRef *);
#define CFURLRefObj_New _CFURLRefObj_New
#define CFURLRefObj_Convert _CFURLRefObj_Convert
#define OptionalCFURLRefObj_Convert _OptionalCFURLRefObj_Convert
#endif
/*
** Parse/generate CFRange records
*/
PyObject *CFRange_New(CFRange *itself)
{
return Py_BuildValue("ll", (long)itself->location, (long)itself->length);
}
int
CFRange_Convert(PyObject *v, CFRange *p_itself)
{
long location, length;
if( !PyArg_ParseTuple(v, "ll", &location, &length) )
return 0;
p_itself->location = (CFIndex)location;
p_itself->length = (CFIndex)length;
return 1;
}
/* Optional CFURL argument or None (passed as NULL) */
int
OptionalCFURLRefObj_Convert(PyObject *v, CFURLRef *p_itself)
{
if ( v == Py_None ) {
p_itself = NULL;
return 1;
}
return CFURLRefObj_Convert(v, p_itself);
}
"""
finalstuff = finalstuff + """
/* Routines to convert any CF type to/from the corresponding CFxxxObj */
PyObject *CFObj_New(CFTypeRef itself)
{
if (itself == NULL)
{
PyErr_SetString(PyExc_RuntimeError, "cannot wrap NULL");
return NULL;
}
if (CFGetTypeID(itself) == CFArrayGetTypeID()) return CFArrayRefObj_New((CFArrayRef)itself);
if (CFGetTypeID(itself) == CFDictionaryGetTypeID()) return CFDictionaryRefObj_New((CFDictionaryRef)itself);
if (CFGetTypeID(itself) == CFDataGetTypeID()) return CFDataRefObj_New((CFDataRef)itself);
if (CFGetTypeID(itself) == CFStringGetTypeID()) return CFStringRefObj_New((CFStringRef)itself);
if (CFGetTypeID(itself) == CFURLGetTypeID()) return CFURLRefObj_New((CFURLRef)itself);
/* XXXX Or should we use PyCF_CF2Python here?? */
return CFTypeRefObj_New(itself);
}
int CFObj_Convert(PyObject *v, CFTypeRef *p_itself)
{
if (v == Py_None) { *p_itself = NULL; return 1; }
/* Check for other CF objects here */
if (!CFTypeRefObj_Check(v) &&
!CFArrayRefObj_Check(v) &&
!CFMutableArrayRefObj_Check(v) &&
!CFDictionaryRefObj_Check(v) &&
!CFMutableDictionaryRefObj_Check(v) &&
!CFDataRefObj_Check(v) &&
!CFMutableDataRefObj_Check(v) &&
!CFStringRefObj_Check(v) &&
!CFMutableStringRefObj_Check(v) &&
!CFURLRefObj_Check(v) )
{
/* XXXX Or should we use PyCF_Python2CF here?? */
PyErr_SetString(PyExc_TypeError, "CF object required");
return 0;
}
*p_itself = ((CFTypeRefObject *)v)->ob_itself;
return 1;
}
"""
initstuff = initstuff + """
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFTypeRef, CFObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFTypeRef, CFObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFTypeRef, CFTypeRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFTypeRef, CFTypeRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFStringRef, CFStringRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFStringRef, CFStringRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFMutableStringRef, CFMutableStringRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFMutableStringRef, CFMutableStringRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFArrayRef, CFArrayRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFArrayRef, CFArrayRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFMutableArrayRef, CFMutableArrayRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFMutableArrayRef, CFMutableArrayRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFDictionaryRef, CFDictionaryRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFDictionaryRef, CFDictionaryRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFMutableDictionaryRef, CFMutableDictionaryRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFMutableDictionaryRef, CFMutableDictionaryRefObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(CFURLRef, CFURLRefObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(CFURLRef, CFURLRefObj_Convert);
"""
variablestuff="""
#define _STRINGCONST(name) PyModule_AddObject(m, #name, CFStringRefObj_New(name))
_STRINGCONST(kCFPreferencesAnyApplication);
_STRINGCONST(kCFPreferencesCurrentApplication);
_STRINGCONST(kCFPreferencesAnyHost);
_STRINGCONST(kCFPreferencesCurrentHost);
_STRINGCONST(kCFPreferencesAnyUser);
_STRINGCONST(kCFPreferencesCurrentUser);
"""
Boolean = Type("Boolean", "l")
CFTypeID = Type("CFTypeID", "l") # XXXX a guess, seems better than OSTypeType.
CFHashCode = Type("CFHashCode", "l")
CFIndex = Type("CFIndex", "l")
CFRange = OpaqueByValueType('CFRange', 'CFRange')
CFOptionFlags = Type("CFOptionFlags", "l")
CFStringEncoding = Type("CFStringEncoding", "l")
CFComparisonResult = Type("CFComparisonResult", "l") # a bit dangerous, it's an enum
CFURLPathStyle = Type("CFURLPathStyle", "l") # a bit dangerous, it's an enum
char_ptr = stringptr
return_stringptr = Type("char *", "s") # ONLY FOR RETURN VALUES!!
CFAllocatorRef = FakeType("(CFAllocatorRef)NULL")
CFArrayCallBacks_ptr = FakeType("&kCFTypeArrayCallBacks")
CFDictionaryKeyCallBacks_ptr = FakeType("&kCFTypeDictionaryKeyCallBacks")
CFDictionaryValueCallBacks_ptr = FakeType("&kCFTypeDictionaryValueCallBacks")
# The real objects
CFTypeRef = OpaqueByValueType("CFTypeRef", "CFTypeRefObj")
CFArrayRef = OpaqueByValueType("CFArrayRef", "CFArrayRefObj")
CFMutableArrayRef = OpaqueByValueType("CFMutableArrayRef", "CFMutableArrayRefObj")
CFArrayRef = OpaqueByValueType("CFArrayRef", "CFArrayRefObj")
CFMutableArrayRef = OpaqueByValueType("CFMutableArrayRef", "CFMutableArrayRefObj")
CFDataRef = OpaqueByValueType("CFDataRef", "CFDataRefObj")
CFMutableDataRef = OpaqueByValueType("CFMutableDataRef", "CFMutableDataRefObj")
CFDictionaryRef = OpaqueByValueType("CFDictionaryRef", "CFDictionaryRefObj")
CFMutableDictionaryRef = OpaqueByValueType("CFMutableDictionaryRef", "CFMutableDictionaryRefObj")
CFStringRef = OpaqueByValueType("CFStringRef", "CFStringRefObj")
CFMutableStringRef = OpaqueByValueType("CFMutableStringRef", "CFMutableStringRefObj")
CFURLRef = OpaqueByValueType("CFURLRef", "CFURLRefObj")
OptionalCFURLRef = OpaqueByValueType("CFURLRef", "OptionalCFURLRefObj")
##CFPropertyListRef = OpaqueByValueType("CFPropertyListRef", "CFTypeRefObj")
# ADD object type here
# Our (opaque) objects
class MyGlobalObjectDefinition(PEP253Mixin, GlobalObjectDefinition):
def outputCheckNewArg(self):
Output('if (itself == NULL)')
OutLbrace()
Output('PyErr_SetString(PyExc_RuntimeError, "cannot wrap NULL");')
Output('return NULL;')
OutRbrace()
def outputStructMembers(self):
GlobalObjectDefinition.outputStructMembers(self)
Output("void (*ob_freeit)(CFTypeRef ptr);")
def outputInitStructMembers(self):
GlobalObjectDefinition.outputInitStructMembers(self)
## Output("it->ob_freeit = NULL;")
Output("it->ob_freeit = CFRelease;")
def outputCheckConvertArg(self):
Out("""
if (v == Py_None) { *p_itself = NULL; return 1; }
/* Check for other CF objects here */
""")
def outputCleanupStructMembers(self):
Output("if (self->ob_freeit && self->ob_itself)")
OutLbrace()
Output("self->ob_freeit((CFTypeRef)self->ob_itself);")
Output("self->ob_itself = NULL;")
OutRbrace()
def outputCompare(self):
Output()
Output("static int %s_compare(%s *self, %s *other)", self.prefix, self.objecttype, self.objecttype)
OutLbrace()
Output("/* XXXX Or should we use CFEqual?? */")
Output("if ( self->ob_itself > other->ob_itself ) return 1;")
Output("if ( self->ob_itself < other->ob_itself ) return -1;")
Output("return 0;")
OutRbrace()
def outputHash(self):
Output()
Output("static int %s_hash(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("/* XXXX Or should we use CFHash?? */")
Output("return (int)self->ob_itself;")
OutRbrace()
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFTypeRef type-%%d object at 0x%%8.8x for 0x%%8.8x>", (int)CFGetTypeID(self->ob_itself), (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
def output_tp_newBody(self):
Output("PyObject *self;")
Output
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("((%s *)self)->ob_itself = NULL;", self.objecttype)
Output("((%s *)self)->ob_freeit = CFRelease;", self.objecttype)
Output("return self;")
def output_tp_initBody(self):
Output("%s itself;", self.itselftype)
Output("char *kw[] = {\"itself\", 0};")
Output()
Output("if (PyArg_ParseTupleAndKeywords(_args, _kwds, \"O&\", kw, %s_Convert, &itself))",
self.prefix)
OutLbrace()
Output("((%s *)_self)->ob_itself = itself;", self.objecttype)
Output("return 0;")
OutRbrace()
if self.prefix != 'CFTypeRefObj':
Output()
Output("/* Any CFTypeRef descendent is allowed as initializer too */")
Output("if (PyArg_ParseTupleAndKeywords(_args, _kwds, \"O&\", kw, CFTypeRefObj_Convert, &itself))")
OutLbrace()
Output("((%s *)_self)->ob_itself = itself;", self.objecttype)
Output("return 0;")
OutRbrace()
Output("return -1;")
class CFTypeRefObjectDefinition(MyGlobalObjectDefinition):
pass
class CFArrayRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFTypeRef_Type"
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFArrayRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFMutableArrayRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFArrayRef_Type"
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFMutableArrayRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFDictionaryRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFTypeRef_Type"
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFDictionaryRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFMutableDictionaryRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFDictionaryRef_Type"
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFMutableDictionaryRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFDataRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFTypeRef_Type"
def outputCheckConvertArg(self):
Out("""
if (v == Py_None) { *p_itself = NULL; return 1; }
if (PyString_Check(v)) {
char *cStr;
int cLen;
if( PyString_AsStringAndSize(v, &cStr, &cLen) < 0 ) return 0;
*p_itself = CFDataCreate((CFAllocatorRef)NULL, (unsigned char *)cStr, cLen);
return 1;
}
""")
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFDataRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFMutableDataRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFDataRef_Type"
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFMutableDataRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFStringRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFTypeRef_Type"
def outputCheckConvertArg(self):
Out("""
if (v == Py_None) { *p_itself = NULL; return 1; }
if (PyString_Check(v)) {
char *cStr;
if (!PyArg_Parse(v, "es", "ascii", &cStr))
return NULL;
*p_itself = CFStringCreateWithCString((CFAllocatorRef)NULL, cStr, kCFStringEncodingASCII);
PyMem_Free(cStr);
return 1;
}
if (PyUnicode_Check(v)) {
/* We use the CF types here, if Python was configured differently that will give an error */
CFIndex size = PyUnicode_GetSize(v);
UniChar *unichars = PyUnicode_AsUnicode(v);
if (!unichars) return 0;
*p_itself = CFStringCreateWithCharacters((CFAllocatorRef)NULL, unichars, size);
return 1;
}
""")
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFStringRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFMutableStringRefObjectDefinition(CFStringRefObjectDefinition):
basetype = "CFStringRef_Type"
def outputCheckConvertArg(self):
# Mutable, don't allow Python strings
return MyGlobalObjectDefinition.outputCheckConvertArg(self)
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFMutableStringRef object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
class CFURLRefObjectDefinition(MyGlobalObjectDefinition):
basetype = "CFTypeRef_Type"
def outputRepr(self):
Output()
Output("static PyObject * %s_repr(%s *self)", self.prefix, self.objecttype)
OutLbrace()
Output("char buf[100];")
Output("""sprintf(buf, "<CFURL object at 0x%%8.8x for 0x%%8.8x>", (unsigned)self, (unsigned)self->ob_itself);""")
Output("return PyString_FromString(buf);")
OutRbrace()
# ADD object class here
# From here on it's basically all boiler plate...
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff, variablestuff)
CFTypeRef_object = CFTypeRefObjectDefinition('CFTypeRef', 'CFTypeRefObj', 'CFTypeRef')
CFArrayRef_object = CFArrayRefObjectDefinition('CFArrayRef', 'CFArrayRefObj', 'CFArrayRef')
CFMutableArrayRef_object = CFMutableArrayRefObjectDefinition('CFMutableArrayRef', 'CFMutableArrayRefObj', 'CFMutableArrayRef')
CFDictionaryRef_object = CFDictionaryRefObjectDefinition('CFDictionaryRef', 'CFDictionaryRefObj', 'CFDictionaryRef')
CFMutableDictionaryRef_object = CFMutableDictionaryRefObjectDefinition('CFMutableDictionaryRef', 'CFMutableDictionaryRefObj', 'CFMutableDictionaryRef')
CFDataRef_object = CFDataRefObjectDefinition('CFDataRef', 'CFDataRefObj', 'CFDataRef')
CFMutableDataRef_object = CFMutableDataRefObjectDefinition('CFMutableDataRef', 'CFMutableDataRefObj', 'CFMutableDataRef')
CFStringRef_object = CFStringRefObjectDefinition('CFStringRef', 'CFStringRefObj', 'CFStringRef')
CFMutableStringRef_object = CFMutableStringRefObjectDefinition('CFMutableStringRef', 'CFMutableStringRefObj', 'CFMutableStringRef')
CFURLRef_object = CFURLRefObjectDefinition('CFURLRef', 'CFURLRefObj', 'CFURLRef')
# ADD object here
module.addobject(CFTypeRef_object)
module.addobject(CFArrayRef_object)
module.addobject(CFMutableArrayRef_object)
module.addobject(CFDictionaryRef_object)
module.addobject(CFMutableDictionaryRef_object)
module.addobject(CFDataRef_object)
module.addobject(CFMutableDataRef_object)
module.addobject(CFStringRef_object)
module.addobject(CFMutableStringRef_object)
module.addobject(CFURLRef_object)
# ADD addobject call here
# Create the generator classes used to populate the lists
Function = OSErrWeakLinkFunctionGenerator
Method = OSErrWeakLinkMethodGenerator
# Create and populate the lists
functions = []
CFTypeRef_methods = []
CFArrayRef_methods = []
CFMutableArrayRef_methods = []
CFDictionaryRef_methods = []
CFMutableDictionaryRef_methods = []
CFDataRef_methods = []
CFMutableDataRef_methods = []
CFStringRef_methods = []
CFMutableStringRef_methods = []
CFURLRef_methods = []
# ADD _methods initializer here
execfile(INPUTFILE)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
for f in CFTypeRef_methods: CFTypeRef_object.add(f)
for f in CFArrayRef_methods: CFArrayRef_object.add(f)
for f in CFMutableArrayRef_methods: CFMutableArrayRef_object.add(f)
for f in CFDictionaryRef_methods: CFDictionaryRef_object.add(f)
for f in CFMutableDictionaryRef_methods: CFMutableDictionaryRef_object.add(f)
for f in CFDataRef_methods: CFDataRef_object.add(f)
for f in CFMutableDataRef_methods: CFMutableDataRef_object.add(f)
for f in CFStringRef_methods: CFStringRef_object.add(f)
for f in CFMutableStringRef_methods: CFMutableStringRef_object.add(f)
for f in CFURLRef_methods: CFURLRef_object.add(f)
# Manual generators for getting data out of strings
getasstring_body = """
int size = CFStringGetLength(_self->ob_itself)+1;
char *data = malloc(size);
if( data == NULL ) return PyErr_NoMemory();
if ( CFStringGetCString(_self->ob_itself, data, size, 0) ) {
_res = (PyObject *)PyString_FromString(data);
} else {
PyErr_SetString(PyExc_RuntimeError, "CFStringGetCString could not fit the string");
_res = NULL;
}
free(data);
return _res;
"""
f = ManualGenerator("CFStringGetString", getasstring_body);
f.docstring = lambda: "() -> (string _rv)"
CFStringRef_object.add(f)
getasunicode_body = """
int size = CFStringGetLength(_self->ob_itself)+1;
Py_UNICODE *data = malloc(size*sizeof(Py_UNICODE));
CFRange range;
range.location = 0;
range.length = size;
if( data == NULL ) return PyErr_NoMemory();
CFStringGetCharacters(_self->ob_itself, range, data);
_res = (PyObject *)PyUnicode_FromUnicode(data, size-1);
free(data);
return _res;
"""
f = ManualGenerator("CFStringGetUnicode", getasunicode_body);
f.docstring = lambda: "() -> (unicode _rv)"
CFStringRef_object.add(f)
# Get data from CFDataRef
getasdata_body = """
int size = CFDataGetLength(_self->ob_itself);
char *data = (char *)CFDataGetBytePtr(_self->ob_itself);
_res = (PyObject *)PyString_FromStringAndSize(data, size);
return _res;
"""
f = ManualGenerator("CFDataGetData", getasdata_body);
f.docstring = lambda: "() -> (string _rv)"
CFDataRef_object.add(f)
# Manual generator for CFPropertyListCreateFromXMLData because of funny error return
fromxml_body = """
CFTypeRef _rv;
CFOptionFlags mutabilityOption;
CFStringRef errorString;
if (!PyArg_ParseTuple(_args, "l",
&mutabilityOption))
return NULL;
_rv = CFPropertyListCreateFromXMLData((CFAllocatorRef)NULL,
_self->ob_itself,
mutabilityOption,
&errorString);
if (errorString)
CFRelease(errorString);
if (_rv == NULL) {
PyErr_SetString(PyExc_RuntimeError, "Parse error in XML data");
return NULL;
}
_res = Py_BuildValue("O&",
CFTypeRefObj_New, _rv);
return _res;
"""
f = ManualGenerator("CFPropertyListCreateFromXMLData", fromxml_body)
f.docstring = lambda: "(CFOptionFlags mutabilityOption) -> (CFTypeRefObj)"
CFTypeRef_object.add(f)
# Convert CF objects to Python objects
toPython_body = """
_res = PyCF_CF2Python(_self->ob_itself);
return _res;
"""
f = ManualGenerator("toPython", toPython_body);
f.docstring = lambda: "() -> (python_object)"
CFTypeRef_object.add(f)
toCF_body = """
CFTypeRef rv;
CFTypeID typeid;
if (!PyArg_ParseTuple(_args, "O&", PyCF_Python2CF, &rv))
return NULL;
typeid = CFGetTypeID(rv);
if (typeid == CFStringGetTypeID())
return Py_BuildValue("O&", CFStringRefObj_New, rv);
if (typeid == CFArrayGetTypeID())
return Py_BuildValue("O&", CFArrayRefObj_New, rv);
if (typeid == CFDictionaryGetTypeID())
return Py_BuildValue("O&", CFDictionaryRefObj_New, rv);
if (typeid == CFURLGetTypeID())
return Py_BuildValue("O&", CFURLRefObj_New, rv);
_res = Py_BuildValue("O&", CFTypeRefObj_New, rv);
return _res;
"""
f = ManualGenerator("toCF", toCF_body);
f.docstring = lambda: "(python_object) -> (CF_object)"
module.add(f)
# ADD add forloop here
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
| mit |
drpngx/tensorflow | tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py | 12 | 22234 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cudnn RNN operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
# Half for cell input, half for hidden states.
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
CUDNN_INPUT_LINEAR_MODE = cudnn_rnn_ops.CUDNN_INPUT_LINEAR_MODE
CUDNN_INPUT_SKIP_MODE = cudnn_rnn_ops.CUDNN_INPUT_SKIP_MODE
CUDNN_INPUT_AUTO_MODE = cudnn_rnn_ops.CUDNN_INPUT_AUTO_MODE
__all__ = ["CudnnLSTM", "CudnnGRU", "CudnnRNNTanh", "CudnnRNNRelu"]
class _CudnnRNN(base_layer.Layer):
# pylint:disable=line-too-long
"""Abstract class for RNN layers with Cudnn implementation.
Cudnn RNNs have two major differences from other platform-independent RNNs tf
provides:
* Cudnn LSTM and GRU are mathematically different from their tf counterparts.
(e.g. @{tf.contrib.rnn.LSTMBlockCell} and @{tf.nn.rnn_cell.GRUCell}.
* Cudnn-trained checkpoints are not directly compatible with tf RNNs:
* They use a single opaque parameter buffer for the entire (possibly)
multi-layer multi-directional RNN; Whereas tf RNN weights are per-cell and
layer.
* The size and layout of the parameter buffers may change between
CUDA/CuDNN/GPU generations. Because of that, the opaque parameter variable
does not have a static shape and is not partitionable. Instead of using
partitioning to alleviate the PS's traffic load, try building a
multi-tower model and do gradient aggregation locally within the host
before updating the PS. See https://www.tensorflow.org/performance/performance_models#parameter_server_variables
for a detailed performance guide.
Consequently, if one plans to use Cudnn trained models on both GPU and CPU
for inference and training, one needs to:
* Create a CudnnOpaqueParamsSaveable subclass object to save RNN params in
canonical format. (This is done for you automatically during layer building
process.)
* When not using a Cudnn RNN class, use CudnnCompatibleRNN classes to load the
checkpoints. These classes are platform-independent and perform the same
computation as Cudnn for training and inference.
Similarly, CudnnCompatibleRNN-trained checkpoints can be loaded by CudnnRNN
classes seamlessly.
Below is a typical workflow(using LSTM as an example):
for detailed performance guide.
# Use Cudnn-trained checkpoints with CudnnCompatibleRNNs
```python
with tf.Graph().as_default():
lstm = CudnnLSTM(num_layers, num_units, direction, ...)
outputs, output_states = lstm(inputs, initial_states, training=True)
# If user plans to delay calling the cell with inputs, one can do
# lstm.build(input_shape)
saver = Saver()
# training subgraph
...
# Once in a while save the model.
saver.save(save_path)
# Inference subgraph for unidirectional RNN on, e.g., CPU or mobile.
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
# NOTE: Even if there's only one layer, the cell needs to be wrapped in
# MultiRNNCell.
cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
# Leave the scope arg unset.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
# Inference subgraph for bidirectional RNN
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
# Leave the scope arg unset.
(outputs, output_state_fw,
output_state_bw) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw, cells_bw, inputs, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
```
"""
# pylint:enable=line-too-long
# TODO(allenl): Document object-based saving and checkpoint compatibility once
# it's implemented for more cuDNN Layers.
# The following are constants defined by subclasses.
# Type of RNN cell.
_rnn_mode = None
# Number of cell weights(or biases) per layer.
_num_params_per_layer = None
# Custom SaveableObject class for the CudnnRNN class.
_saveable_cls = None
def __init__(self,
num_layers,
num_units,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=None,
dtype=dtypes.float32,
kernel_initializer=None,
bias_initializer=None,
name=None):
"""Creates a CudnnRNN model from model spec.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It can be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Can be either
'unidirectional' or 'bidirectional'
dropout: dropout rate, a number between [0, 1]. Dropout is applied between
each layer (no dropout is applied for a model with a single layer).
When set to 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
for behavior.
dtype: tf.float16, tf.float32 or tf.float64
kernel_initializer: starting value to initialize the weight.
bias_initializer: starting value to initialize the bias
(default is all zeros).
name: VariableScope for the created subgraph; defaults to class name.
This only serves the default scope if later no scope is specified when
invoking __call__().
Raises:
ValueError: if direction is invalid. Or dtype is not supported.
"""
super(_CudnnRNN, self).__init__(dtype=dtype, name=name)
cudnn_rnn_ops.check_direction(direction)
cudnn_rnn_ops.check_input_mode(input_mode)
if dtype not in [dtypes.float16, dtypes.float32, dtypes.float64]:
raise ValueError(
"Only support float16, float32, float64, provided %s" % dtype)
# Layer self.dtype is type name, the original DType object is kept here.
self._plain_dtype = dtype
self._num_layers = num_layers
self._num_units = num_units
self._input_mode = input_mode
self._direction = direction
self._dropout = dropout
self._seed = seed
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
# Init input_size to None, which will be set after build().
self._input_size = None
self._saveable = None
@property
def num_layers(self):
return self._num_layers
@property
def num_units(self):
return self._num_units
@property
def input_mode(self):
"""Input mode of first layer.
Indicates whether there is a linear projection between the input and the
actual computation before the first layer. It can be
* 'linear_input': (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior)
* 'skip_input': 'skip_input' is only allowed when input_size == num_units.
* 'auto_select'. implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
Returns:
'linear_input', 'skip_input' or 'auto_select'.
"""
return self._input_mode
@property
def input_size(self):
if not self._input_size:
raise ValueError(
"\'input_size\' is unknown since layer has not been built.")
return self._input_size
@property
def rnn_mode(self):
"""Type of RNN cell used.
Returns:
`lstm`, `gru`, `rnn_relu` or `rnn_tanh`.
"""
return self._rnn_mode
@property
def direction(self):
"""Returns `unidirectional` or `bidirectional`."""
return self._direction
@property
def num_dirs(self):
return 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
@property
def saveable(self):
return self._saveable
@property
def canonical_weight_shapes(self):
"""Shapes of Cudnn canonical weight tensors."""
if not self._input_size:
raise RuntimeError(
"%s.canonical_weight_shapes invoked before input shape is known" %
type(self).__name__)
shapes = []
for i in range(self._num_layers):
shapes.extend(self._canonical_weight_shape(i))
return shapes
@property
def canonical_bias_shapes(self):
"""Shapes of Cudnn canonical bias tensors."""
return self._canonical_bias_shape(0) * self._num_layers
def _update_trainable_weights(self, getter, *args, **kwargs):
"""Custom getter for layer variables."""
# Add variables to layer's `(non_)trainable_weights` list(s).
variable = getter(*args, **kwargs)
trainable = kwargs.get("trainable", True)
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
def build(self, input_shape):
"""Create variables of the Cudnn RNN.
It can be called manually before `__call__()` or automatically through
`__call__()`. In the former case, subsequent `__call__()`s will skip
creating variables.
Args:
input_shape: network input tensor shape, a python list or a TensorShape
object with 3 dimensions.
Raises:
ValueError: if input_shape has wrong dimension or unknown 3rd dimension.
"""
if self.built:
return
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims != 3:
raise ValueError("Expecting input_shape with 3 dims, got %d" %
input_shape.ndims)
if input_shape[-1].value is None:
raise ValueError("The last dimension of the inputs to `CudnnRNN` "
"should be defined. Found `None`.")
self._input_size = input_shape[-1].value
self.input_spec = base_layer.InputSpec(ndim=3, axes={-1: self._input_size})
self._set_scope(None)
# Not using base class `add_variable()` since the it calls
# `tf.get_variable()` with a callable initializer whereas here with a
# tensor. The difference is mandated to support forward-compatibility with
# Cudnn.
with vs.variable_scope(
self._scope,
reuse=self.built,
custom_getter=self._update_trainable_weights):
if self._kernel_initializer is None:
self._kernel_initializer = init_ops.glorot_uniform_initializer(
seed=self._seed, dtype=self._plain_dtype)
if self._bias_initializer is None:
self._bias_initializer = init_ops.constant_initializer(
0.0, dtype=self._plain_dtype)
weights = [
self._kernel_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_weight_shapes
]
biases = [
self._bias_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_bias_shapes
]
opaque_params_t = self._canonical_to_opaque(weights, biases)
if vs.get_variable_scope().partitioner is not None:
logging.warn(
"Partitioner is not supported for Cudnn RNN layer variables, using "
"it will create forward-compatibility issues with future "
"CUDA/CuDNN generations.")
# Initialize opaque params with a tensor.
self.kernel = vs.get_variable(
"opaque_kernel", dtype=self._plain_dtype,
initializer=opaque_params_t, validate_shape=False)
# Create saveable in the outer scope of the cudnn subgraph, such that
# alternative subgraph with platform-independent rnn cells can load the
# checkpoints directly.
if not (self.built or vs.get_variable_scope().reuse is True):
self._create_saveable()
self.built = True
def _gather_saveables_for_checkpoint(self):
raise NotImplementedError(
"This cell does not yet support object-based saving. File a feature "
"request if this limitation bothers you.")
def call(self, inputs, initial_state=None, training=True):
"""Runs the forward step for the RNN model.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple of tensor(s) of shape
`[num_layers * num_dirs, batch_size, num_units]`. If not provided, use
zero initial states. The tuple size is 2 for LSTM and 1 for other RNNs.
training: whether this operation will be used in training or inference.
Returns:
output: a tensor of shape `[time_len, batch_size, num_dirs * num_units]`.
It is a `concat([fwd_output, bak_output], axis=2)`.
output_states: a tuple of tensor(s) of the same shape and structure as
`initial_state`.
Raises:
ValueError: initial_state is not a tuple.
"""
if initial_state is not None and not isinstance(initial_state, tuple):
raise ValueError("Invalid initial_state type: %s, expecting tuple.",
type(initial_state))
dtype = self.dtype
inputs = ops.convert_to_tensor(inputs, dtype=dtype)
batch_size = array_ops.shape(inputs)[1]
if initial_state is None:
initial_state = self._zero_state(batch_size)
if self._rnn_mode == CUDNN_LSTM:
h, c = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
else:
h, = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
h = ops.convert_to_tensor(h, dtype=dtype)
if self._rnn_mode == CUDNN_LSTM:
c = ops.convert_to_tensor(c, dtype=dtype)
else:
# For model that doesn't take input_c, replace with a dummy tensor.
c = array_ops.constant([], dtype=dtype)
outputs, (output_h, output_c) = self._forward(inputs, h, c, self.kernel,
training)
if self._rnn_mode == CUDNN_LSTM:
return outputs, (output_h, output_c)
else:
return outputs, (output_h,)
def state_shape(self, batch_size):
raise NotImplementedError
def _zero_state(self, batch_size):
res = []
for sp in self.state_shape(batch_size):
res.append(array_ops.zeros(sp, dtype=self.dtype))
return tuple(res)
def _canonical_weight_shape(self, layer):
"""Shapes of Cudnn canonical weight tensors for given layer."""
if layer < 0 or layer >= self._num_layers:
raise ValueError("\'layer\' is not valid, got %s, expecting [%d, %d]" %
(layer, 0, self._num_layers-1))
if not self._input_size:
raise RuntimeError(
"%s._canonical_weight_shape invoked before input shape is known" %
type(self).__name__)
input_size = self._input_size
num_units = self._num_units
num_gates = self._num_params_per_layer // 2
is_bidi = self._direction == CUDNN_RNN_BIDIRECTION
if layer == 0:
wts_applied_on_inputs = [(num_units, input_size)] * num_gates
else:
if is_bidi:
wts_applied_on_inputs = [(num_units, 2 * num_units)] * num_gates
else:
wts_applied_on_inputs = [(num_units, num_units)] * num_gates
wts_applied_on_hidden_states = [(num_units, num_units)] * num_gates
tf_wts = wts_applied_on_inputs + wts_applied_on_hidden_states
return tf_wts if not is_bidi else tf_wts * 2
def _canonical_bias_shape(self, unused_layer):
"""Shapes of Cudnn canonical bias tensors for given layer."""
num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
return [[self._num_units]] * num_dirs * self._num_params_per_layer
def _canonical_to_opaque(self, cu_weights, cu_biases):
if not self._input_size:
raise RuntimeError(
"%s._canonical_to_opaque invoked before input shape is known" %
type(self).__name__)
with ops.device("/gpu:0"):
return cudnn_rnn_ops.cudnn_rnn_canonical_to_opaque_params(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
weights=cu_weights,
biases=cu_biases,
input_mode=self._input_mode,
seed=self._seed,
dropout=self._dropout,
direction=self._direction)
def _forward(self, inputs, h, c, opaque_params, training):
output, output_h, output_c = cudnn_rnn_ops._cudnn_rnn( # pylint:disable=protected-access
inputs,
h,
c,
opaque_params,
training,
self._rnn_mode,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
return output, (output_h, output_c)
def _create_saveable(self):
"""Create custom saveable for the Cudnn layer.
Called during layer building process to make sharing checkpoints between
Cudnn and Cudnn-compatible RNNs easy.
Returns:
a `CudnnOpaqueParamsSaveable` object.
Raises:
RuntimeError: if any custom saveable is already created for this layer.
"""
if self._saveable is not None:
raise RuntimeError("Cudnn saveable already created.")
self._saveable = self._saveable_cls( # pylint:disable=not-callable
opaque_params=self.trainable_variables[0],
num_layers=self.num_layers,
num_units=self.num_units,
input_size=self.input_size,
input_mode=self.input_mode,
direction=self.direction,
scope=vs.get_variable_scope(),
name="%s_saveable" % self.trainable_variables[0].name.split(":")[0])
self._saveable._add_checkpointable_dependencies( # pylint: disable=protected-access
checkpointable=self, dtype=self._plain_dtype)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
class CudnnLSTM(_CudnnRNN):
"""Cudnn implementation of LSTM layer."""
_rnn_mode = CUDNN_LSTM
_num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnLSTMSaveable
def state_shape(self, batch_size):
"""Shape of Cudnn LSTM states.
Shape is a 2-element tuple. Each is
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return ([self.num_layers * self.num_dirs, batch_size, self.num_units],
[self.num_layers * self.num_dirs, batch_size, self.num_units])
@property
def _gather_saveables_for_checkpoint(self):
if self._direction == CUDNN_RNN_UNIDIRECTION:
# Skip one inheritance level to avoid NotImplementedError.
return super(_CudnnRNN, self)._gather_saveables_for_checkpoint
else:
raise NotImplementedError(
"Object-based saving does not currently support bidirectional LSTM "
"cells. File a feature request if this limitation bothers you.")
class _CudnnRNNNoInputC(_CudnnRNN):
"""Abstract simple CudnnRNN layer without input_c."""
def state_shape(self, batch_size):
"""Shape of the state of Cudnn RNN cells w/o. input_c.
Shape is a 1-element tuple,
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return [self.num_layers * self.num_dirs, batch_size, self.num_units],
class CudnnGRU(_CudnnRNNNoInputC):
"""Cudnn implementation of the GRU layer."""
_rnn_mode = CUDNN_GRU
_num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnGRUSaveable
class CudnnRNNTanh(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-tanh layer."""
_rnn_mode = CUDNN_RNN_TANH
_num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNTanhSaveable
class CudnnRNNRelu(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-relu layer."""
_rnn_mode = CUDNN_RNN_RELU
_num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNReluSaveable
| apache-2.0 |
danakj/chromium | third_party/closure_linter/closure_linter/errors.py | 99 | 4184 | #!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.13:
])
| bsd-3-clause |
ingadhoc/openerp-travel | travel_rental_car/travel_rental_car.py | 2 | 3163 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
class travel_car_rental(orm.Model):
"""Car Rentals for travel"""
_name = 'travel.rental.car'
_description = _(__doc__)
@staticmethod
def _check_dep_arr_dates(start, end):
return not start or not end or start <= end
def on_change_times(self, cr, uid, ids, start, end, context=None):
if self._check_dep_arr_dates(start, end):
return {}
return {
'value': {
'end': False,
},
'warning': {
'title': 'Arrival after Departure',
'message': ('End of rental (%s) cannot be before Start (%s).' %
(start, end)),
},
}
def check_date(self, cr, uid, ids, context=None):
if not ids:
return False
rental = self.browse(cr, uid, ids[0], context=context)
return self._check_dep_arr_dates(rental.start, rental.end)
_columns = {
'pickup_loc': fields.char('Pick-up Location',
help="Location of car pick-up."),
'dropoff_loc': fields.char('Drop-off Location',
help="Location of car drop-off."),
'type': fields.many2one('vehicle.vehicle', 'Vehicle type',
help="Make and model of the car."),
'start': fields.datetime('Start', required=True,
help='Start date and time of car rental.'),
'end': fields.datetime('End', required=True,
help='End date and time of car rental.'),
'driver': fields.boolean('With Chauffeur',
help='Will the car rental require a driver.'),
'passenger_id': fields.many2one('travel.passenger', 'Passenger',
required=True,
help='Passenger on this car rental.'),
}
_constraints = [
(check_date, _('End date cannot be after Start date for car rental.'),
['start', 'end']),
]
| agpl-3.0 |
tyagiarpit/servo | tests/wpt/css-tests/css-fonts-3_dev/html/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
jgmize/bedrock | lib/l10n_utils/management/commands/l10n_extract.py | 11 | 7162 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from textwrap import dedent
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from babel.messages.catalog import Catalog
from babel.messages.extract import extract_from_file
from babel.messages.pofile import write_po
from babel.util import pathmatch
from puente.commands import generate_options_map
from puente.settings import get_setting
from lib.l10n_utils.gettext import pot_to_langfiles
DOMAIN = 'django'
METHODS = settings.PUENTE['DOMAIN_METHODS'][DOMAIN]
def gettext_extract():
call_command('extract', create=True)
def extract_callback(filename, method, options):
if method != 'ignore':
print " %s" % filename
def extract_from_files(filenames,
method_map=METHODS,
options_map=generate_options_map(),
keywords=get_setting('KEYWORDS'),
comment_tags=get_setting('COMMENT_TAGS'),
callback=extract_callback,
strip_comment_tags=False):
"""Extract messages from any source files found in the given iterable.
This function generates tuples of the form:
``(filename, lineno, message, comments)``
Which extraction method is used per file is determined by the `method_map`
parameter, which maps extended glob patterns to extraction method names.
For example, the following is the default mapping:
>>> method_map = [
... ('**.py', 'python')
... ]
This basically says that files with the filename extension ".py"
should be processed by the "python" extraction
method. Files that don't match any of the mapping patterns are ignored. See
the documentation of the `pathmatch` function for details on the pattern
syntax.
The following extended mapping would also use the "genshi" extraction
method on any file in "templates" subdirectory:
>>> method_map = [
... ('**/templates/**.*', 'genshi'),
... ('**.py', 'python')
... ]
The dictionary provided by the optional `options_map` parameter augments
these mappings. It uses extended glob patterns as keys, and the values are
dictionaries mapping options names to option values (both strings).
The glob patterns of the `options_map` do not necessarily need to be the
same as those used in the method mapping. For example, while all files in
the ``templates`` folders in an application may be Genshi applications, the
options for those files may differ based on extension:
>>> options_map = {
... '**/templates/**.txt': {
... 'template_class': 'genshi.template:TextTemplate',
... 'encoding': 'latin-1'
... },
... '**/templates/**.html': {
... 'include_attrs': ''
... }
... }
:param filenames: an iterable of filenames relative to the ROOT of
the project
:param method_map: a list of ``(pattern, method)`` tuples that maps of
extraction method names to extended glob patterns
:param options_map: a dictionary of additional options (optional)
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of tags of translator comments to search for
and include in the results
:param callback: a function that is called for every file that message are
extracted from, just before the extraction itself is
performed; the function is passed the filename, the name
of the extraction method and and the options dictionary as
positional arguments, in that order
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: an iterator over ``(filename, lineno, funcname, message)`` tuples
:rtype: ``iterator``
:see: `pathmatch`
"""
# adapted from babel.messages.extract.extract_from_dir
for filename in filenames:
matched = False
for pattern, method in method_map:
if pathmatch(pattern, filename):
matched = True
filepath = os.path.join(settings.ROOT, filename)
if not os.path.exists(filepath):
print '! %s does not exist!' % filename
break
options = {}
for opattern, odict in options_map.items():
if pathmatch(opattern, filename):
options = odict
if callback:
callback(filename, method, options)
for lineno, message, comments, context in\
extract_from_file(method, filepath,
keywords=keywords,
comment_tags=comment_tags,
options=options,
strip_comment_tags=strip_comment_tags):
yield filename, lineno, message, comments, context
break
if not matched:
print '! %s does not match any domain methods!' % filename
class Command(BaseCommand):
args = '<filename filename ...>'
help = dedent("""
Extracts a .lang file with new translations from all source files.
If <filename>s are provided only extract from those files.
""").strip()
def handle(self, *args, **options):
if args:
# mimics puente.management.commands.extract for a list of files
outputdir = os.path.join(settings.ROOT, 'locale', 'templates',
'LC_MESSAGES')
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
catalog = Catalog(
header_comment='',
project=get_setting('PROJECT'),
version=get_setting('VERSION'),
msgid_bugs_address=get_setting('MSGID_BUGS_ADDRESS'),
charset='utf-8',
)
for filename, lineno, msg, cmts, ctxt in extract_from_files(args):
catalog.add(msg, None, [(filename, lineno)], auto_comments=cmts,
context=ctxt)
with open(os.path.join(outputdir, '%s.pot' % DOMAIN), 'wb') as fp:
write_po(fp, catalog, width=80)
else:
# This is basically a wrapper around the puente extract
# command, we might want to do some things around this in the
# future
gettext_extract()
pot_to_langfiles(DOMAIN)
| mpl-2.0 |
Andrew-McNab-UK/DIRAC | ResourceStatusSystem/PolicySystem/Actions/BaseAction.py | 4 | 1493 | # $HeadURL: $
''' BaseAction
Base class for Actions.
'''
from DIRAC import gLogger
__RCSID__ = '$Id: $'
class BaseAction( object ):
'''
Base class for all actions. It defines a constructor an a run main method.
'''
def __init__( self, name, decisionParams, enforcementResult, singlePolicyResults, clients ):
# enforcementResult supposed to look like:
# {
# 'Status' : <str>,
# 'Reason' : <str>,
# 'PolicyActions' : <list>,
# [ 'EndDate' : <str> ]
# }
# decisionParams supposed to look like:
# {
# 'element' : None,
# 'name' : None,
# 'elementType' : None,
# 'statusType' : None,
# 'status' : None,
# 'reason' : None,
# 'tokenOwner' : None
# }
self.actionName = name # 'BaseAction'
self.decisionParams = decisionParams
self.enforcementResult = enforcementResult
self.singlePolicyResults = singlePolicyResults
self.clients = clients
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.log.verbose( "Running %s action" % self.__class__.__name__ )
def run( self ):
'''
Method to be over written by the real actions
'''
self.log.warn( '%s: you may want to overwrite this method' % self.actionName )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
kuiwei/edx-platform | common/test/acceptance/performance/test_lms_performance.py | 62 | 4375 | """
Single page performance tests for LMS.
"""
from bok_choy.web_app_test import WebAppTest, with_cache
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.dashboard import DashboardPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.login import LoginPage
from ..pages.lms.progress import ProgressPage
from ..pages.common.logout import LogoutPage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
from ..tests.helpers import UniqueCourseTest, load_data_str
from nose.plugins.attrib import attr
@attr(har_mode='explicit')
class LmsPerformanceTest(UniqueCourseTest):
"""
Base class to capture LMS performance with HTTP Archives.
"""
username = 'test_student'
email = 'student101@example.com'
def setUp(self):
"""
Setup course
"""
super(LmsPerformanceTest, self).setUp()
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(CourseUpdateDesc(date='January 29, 2014', content='Test course update1'))
course_fix.add_update(CourseUpdateDesc(date='January 30, 2014', content='Test course update2'))
course_fix.add_update(CourseUpdateDesc(date='January 31, 2014', content='Test course update3'))
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML', data="<html>Html child text</html>"),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('html', 'Html Child', data="<html>Html child text</html>")
)
),
XBlockFixtureDesc('chapter', 'Test Section 3').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 3').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3')
)
)
).install()
AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()
def _make_har_file(self, page):
"""
Visit page and make HAR file.
"""
har_name = '{page}_{course}'.format(page=type(page).__name__, course=self.course_info['number'])
self.har_capturer.add_page(self.browser, har_name)
page.visit()
self.har_capturer.save_har(self.browser, har_name)
@with_cache
def test_visit_coursware(self):
"""
Produce a HAR for loading the Coursware page.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
self._make_har_file(courseware_page)
@with_cache
def test_visit_dashboard(self):
"""
Produce a HAR for loading the Dashboard page.
"""
dashboard_page = DashboardPage(self.browser)
self._make_har_file(dashboard_page)
@with_cache
def test_visit_course_info(self):
"""
Produce a HAR for loading the Course Info page.
"""
course_info_page = CourseInfoPage(self.browser, self.course_id)
self._make_har_file(course_info_page)
@with_cache
def test_visit_login_page(self):
"""
Produce a HAR for loading the Login page.
"""
login_page = LoginPage(self.browser)
# Logout previously logged in user to be able to see Login page.
LogoutPage(self.browser).visit()
self._make_har_file(login_page)
@with_cache
def test_visit_progress_page(self):
"""
Produce a HAR for loading the Progress page.
"""
progress_page = ProgressPage(self.browser, self.course_id)
self._make_har_file(progress_page)
| agpl-3.0 |
alphafoobar/intellij-community | python/lib/Lib/site-packages/django/core/mail/backends/base.py | 660 | 1164 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
| apache-2.0 |
harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/beanstalk/layer1.py | 7 | 56243 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token, profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
| gpl-3.0 |
bsmr-misc-forks/letsencrypt | certbot-apache/certbot_apache/tests/obj_test.py | 11 | 5342 | """Tests for certbot_apache.obj."""
import unittest
class VirtualHostTest(unittest.TestCase):
"""Test the VirtualHost class."""
def setUp(self):
from certbot_apache.obj import Addr
from certbot_apache.obj import VirtualHost
self.addr1 = Addr.fromstring("127.0.0.1")
self.addr2 = Addr.fromstring("127.0.0.1:443")
self.addr_default = Addr.fromstring("_default_:443")
self.vhost1 = VirtualHost(
"filep", "vh_path", set([self.addr1]), False, False, "localhost")
self.vhost1b = VirtualHost(
"filep", "vh_path", set([self.addr1]), False, False, "localhost")
self.vhost2 = VirtualHost(
"fp", "vhp", set([self.addr2]), False, False, "localhost")
def test_repr(self):
self.assertEqual(repr(self.addr2), "certbot_apache.obj.Addr(('127.0.0.1', '443'))")
def test_eq(self):
self.assertTrue(self.vhost1b == self.vhost1)
self.assertFalse(self.vhost1 == self.vhost2)
self.assertEqual(str(self.vhost1b), str(self.vhost1))
self.assertFalse(self.vhost1b == 1234)
def test_ne(self):
self.assertTrue(self.vhost1 != self.vhost2)
self.assertFalse(self.vhost1 != self.vhost1b)
def test_conflicts(self):
from certbot_apache.obj import Addr
from certbot_apache.obj import VirtualHost
complex_vh = VirtualHost(
"fp", "vhp",
set([Addr.fromstring("*:443"), Addr.fromstring("1.2.3.4:443")]),
False, False)
self.assertTrue(complex_vh.conflicts([self.addr1]))
self.assertTrue(complex_vh.conflicts([self.addr2]))
self.assertFalse(complex_vh.conflicts([self.addr_default]))
self.assertTrue(self.vhost1.conflicts([self.addr2]))
self.assertFalse(self.vhost1.conflicts([self.addr_default]))
self.assertFalse(self.vhost2.conflicts([self.addr1,
self.addr_default]))
def test_same_server(self):
from certbot_apache.obj import VirtualHost
no_name1 = VirtualHost(
"fp", "vhp", set([self.addr1]), False, False, None)
no_name2 = VirtualHost(
"fp", "vhp", set([self.addr2]), False, False, None)
no_name3 = VirtualHost(
"fp", "vhp", set([self.addr_default]),
False, False, None)
no_name4 = VirtualHost(
"fp", "vhp", set([self.addr2, self.addr_default]),
False, False, None)
self.assertTrue(self.vhost1.same_server(self.vhost2))
self.assertTrue(no_name1.same_server(no_name2))
self.assertFalse(self.vhost1.same_server(no_name1))
self.assertFalse(no_name1.same_server(no_name3))
self.assertFalse(no_name1.same_server(no_name4))
class AddrTest(unittest.TestCase):
"""Test obj.Addr."""
def setUp(self):
from certbot_apache.obj import Addr
self.addr = Addr.fromstring("*:443")
self.addr1 = Addr.fromstring("127.0.0.1")
self.addr2 = Addr.fromstring("127.0.0.1:*")
self.addr_defined = Addr.fromstring("127.0.0.1:443")
self.addr_default = Addr.fromstring("_default_:443")
def test_wildcard(self):
self.assertFalse(self.addr.is_wildcard())
self.assertTrue(self.addr1.is_wildcard())
self.assertTrue(self.addr2.is_wildcard())
def test_get_sni_addr(self):
from certbot_apache.obj import Addr
self.assertEqual(
self.addr.get_sni_addr("443"), Addr.fromstring("*:443"))
self.assertEqual(
self.addr.get_sni_addr("225"), Addr.fromstring("*:225"))
self.assertEqual(
self.addr1.get_sni_addr("443"), Addr.fromstring("127.0.0.1"))
def test_conflicts(self):
# Note: Defined IP is more important than defined port in match
self.assertTrue(self.addr.conflicts(self.addr1))
self.assertTrue(self.addr.conflicts(self.addr2))
self.assertTrue(self.addr.conflicts(self.addr_defined))
self.assertFalse(self.addr.conflicts(self.addr_default))
self.assertFalse(self.addr1.conflicts(self.addr))
self.assertTrue(self.addr1.conflicts(self.addr_defined))
self.assertFalse(self.addr1.conflicts(self.addr_default))
self.assertFalse(self.addr_defined.conflicts(self.addr1))
self.assertFalse(self.addr_defined.conflicts(self.addr2))
self.assertFalse(self.addr_defined.conflicts(self.addr))
self.assertFalse(self.addr_defined.conflicts(self.addr_default))
self.assertTrue(self.addr_default.conflicts(self.addr))
self.assertTrue(self.addr_default.conflicts(self.addr1))
self.assertTrue(self.addr_default.conflicts(self.addr_defined))
# Self test
self.assertTrue(self.addr.conflicts(self.addr))
self.assertTrue(self.addr1.conflicts(self.addr1))
# This is a tricky one...
self.assertTrue(self.addr1.conflicts(self.addr2))
def test_equal(self):
self.assertTrue(self.addr1 == self.addr2)
self.assertFalse(self.addr == self.addr1)
self.assertFalse(self.addr == 123)
def test_not_equal(self):
self.assertFalse(self.addr1 != self.addr2)
self.assertTrue(self.addr != self.addr1)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
gbaty/pyside2 | tests/QtWidgets/qabstracttextdocumentlayout_test.py | 3 | 1258 | import unittest
import py3kcompat as py3k
from PySide2.QtCore import QSizeF, QTimer
from PySide2.QtGui import QTextFormat, QTextCharFormat, QPyTextObject
from PySide2.QtWidgets import QTextEdit
from helper import UsesQApplication
class Foo(QPyTextObject):
called = False
def intrinsicSize(self, doc, posInDocument, format):
Foo.called = True
return QSizeF(10, 10)
def drawObject(self, painter, rect, doc, posInDocument, format):
pass
class QAbstractTextDocumentLayoutTest(UsesQApplication):
objectType = QTextFormat.UserObject + 1
def foo(self):
fmt = QTextCharFormat()
fmt.setObjectType(QAbstractTextDocumentLayoutTest.objectType)
cursor = self.textEdit.textCursor()
cursor.insertText(py3k.unichr(0xfffc), fmt)
self.textEdit.setTextCursor(cursor)
self.textEdit.close()
def testIt(self):
self.textEdit = QTextEdit()
self.textEdit.show()
interface = Foo()
self.textEdit.document().documentLayout().registerHandler(QAbstractTextDocumentLayoutTest.objectType, interface)
QTimer.singleShot(0, self.foo)
self.app.exec_()
self.assertTrue(Foo.called)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 |
karlito40/servo | tests/wpt/css-tests/css21_dev/xhtml1/reference/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 |
jonathanverner/brython | www/src/Lib/html/parser.py | 737 | 19605 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s, flags=re.ASCII)
| bsd-3-clause |
kokogaga/arducopter | mk/PX4/Tools/genmsg/test/test_genmsg_gentools.py | 215 | 9526 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
TEST_CTX = 'rosgraph_msgs'
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'md5tests'))
def get_test_msg_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'files'))
def get_search_path():
test_dir = get_test_msg_dir()
search_path = {}
for pkg in ['std_msgs', 'rosgraph_msgs', 'test_ros', 'geometry_msgs']:
search_path[pkg] = [ os.path.join(test_dir, pkg, 'msg') ]
return search_path
def _load_md5_tests(dir_name):
test_dir = os.path.join(get_test_dir(), dir_name)
tests = {}
for f in os.listdir(test_dir):
path = os.path.join(test_dir, f)
if not f.endswith('.txt'):
continue
name = f[:-4]
while name and name[-1].isdigit():
name = name[:-1]
assert bool(name)
if name in tests:
tests[name].append(path)
else:
tests[name] = [path]
return tests
def _compute_md5(msg_context, f):
from genmsg import load_depends, compute_md5
from genmsg.msg_loader import load_msg_from_string
text = open(f, 'r').read()
short_name = os.path.basename(f)[:-len('.msg')]
full_name = "%s/%s"%(TEST_CTX, short_name)
spec = load_msg_from_string(msg_context, text, full_name)
search_path = get_search_path()
load_depends(msg_context, spec, search_path)
return compute_md5(msg_context, spec)
def _compute_md5_text(msg_context, f):
from genmsg import compute_md5_text, load_depends
from genmsg.msg_loader import load_msg_from_string
text = open(f, 'r').read()
short_name = os.path.basename(f)[:-len('.msg')]
full_name = "%s/%s"%(TEST_CTX, short_name)
spec = load_msg_from_string(msg_context, text, full_name)
search_path = get_search_path()
load_depends(msg_context, spec, search_path)
return compute_md5_text(msg_context, spec)
def test_compute_md5_text():
from genmsg import MsgContext
msg_context = MsgContext.create_default()
# this test is just verifying that the md5sum is what it was for cturtle->electric
Header_md5 = "2176decaecbce78abc3b96ef049fabed"
rg_msg_dir = os.path.join(get_test_msg_dir(), TEST_CTX, 'msg')
clock_msg = os.path.join(rg_msg_dir, 'Clock.msg')
# a bit gory, but go ahead and regression test these important messages
assert "time clock" == _compute_md5_text(msg_context, clock_msg)
log_msg = os.path.join(rg_msg_dir, 'Log.msg')
assert "byte DEBUG=1\nbyte INFO=2\nbyte WARN=4\nbyte ERROR=8\nbyte FATAL=16\n%s header\nbyte level\nstring name\nstring msg\nstring file\nstring function\nuint32 line\nstring[] topics"%Header_md5 == _compute_md5_text(msg_context, log_msg)
tests = _load_md5_tests('md5text')
# text file #1 is the reference
for k, files in tests.items():
print("running tests", k)
ref_file = [f for f in files if f.endswith('%s1.txt'%k)]
if not ref_file:
assert False, "failed to load %s"%k
ref_file = ref_file[0]
ref_text = open(ref_file, 'r').read().strip()
print("KEY", k)
files = [f for f in files if not f.endswith('%s1.txt'%k)]
for f in files[1:]:
f_text = _compute_md5_text(msg_context, f)
assert ref_text == f_text, "failed on %s\n%s\n%s: \n[%s]\nvs.\n[%s]\n"%(k, ref_file, f, ref_text, f_text)
def test_md5_equals():
from genmsg import MsgContext
msg_context = MsgContext.create_default()
search_path = get_search_path()
tests = _load_md5_tests('same')
for k, files in tests.items():
print("running tests", k)
md5sum = _compute_md5(msg_context, files[0])
for f in files[1:]:
assert md5sum == _compute_md5(msg_context, f), "failed on %s: \n[%s]\nvs.\n[%s]\n"%(k, _compute_md5_text(msg_context, files[0]), _compute_md5_text(msg_context, f))
def test_md5_not_equals():
from genmsg import MsgContext
msg_context = MsgContext.create_default()
tests = _load_md5_tests('different')
for k, files in tests.items():
print("running tests", k)
md5s = set()
md6md5sum = _compute_md5(msg_context, files[0])
for f in files:
md5s.add(_compute_md5(msg_context, f))
# each md5 should be unique
assert len(md5s) == len(files)
twist_with_covariance_stamped_full_text = """# This represents an estimate twist with reference coordinate frame and timestamp.
Header header
TwistWithCovariance twist
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/TwistWithCovariance
# This expresses velocity in free space with uncertianty.
Twist twist
# Row-major representation of the 6x6 covariance matrix
# The orientation parameters use a fixed-axis representation.
# In order, the parameters are:
# (x, y, z, rotation about X axis, rotation about Y axis, rotation about Z axis)
float64[36] covariance
================================================================================
MSG: geometry_msgs/Twist
# This expresses velocity in free space broken into it's linear and angular parts.
Vector3 linear
Vector3 angular
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
float64 x
float64 y
float64 z"""
log_full_text = """##
## Severity level constants
##
byte DEBUG=1 #debug level
byte INFO=2 #general level
byte WARN=4 #warning level
byte ERROR=8 #error level
byte FATAL=16 #fatal/critical level
##
## Fields
##
Header header
byte level
string name # name of the node
string msg # message
string file # file the message came from
string function # function the message came from
uint32 line # line the message came from
string[] topics # topic names that the node publishes
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
def test_compute_full_text():
from genmsg import MsgContext, compute_full_text, load_msg_by_type, load_depends
msg_context = MsgContext.create_default()
search_path = get_search_path()
# regression test against values used for cturtle-electric
spec = load_msg_by_type(msg_context, 'rosgraph_msgs/Log', search_path)
load_depends(msg_context, spec, search_path)
val = compute_full_text(msg_context, spec)
assert val == log_full_text, "[%s][%s]"%(val, log_full_text)
spec = load_msg_by_type(msg_context, 'geometry_msgs/TwistWithCovarianceStamped', search_path)
load_depends(msg_context, spec, search_path)
val = compute_full_text(msg_context, spec)
assert val == twist_with_covariance_stamped_full_text, "[%s][%s]"%(val, twist_with_covariance_stamped_full_text)
| gpl-3.0 |
wangzhe0417/shadowsocks | utils/autoban.py | 22 | 2086 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
banned.add(ip)
cmd = 'iptables -A INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
os.system(cmd)
| mit |
NikiStanchev/SoftUni | AngularFundamentals/Angular2/Redux/node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| mit |
vikas1885/test1 | common/lib/xmodule/xmodule/modulestore/tests/test_mongo_call_count.py | 91 | 7866 | """
Tests to verify correct number of MongoDB calls during course import/export and traversal
when using the Split modulestore.
"""
from tempfile import mkdtemp
from shutil import rmtree
from unittest import TestCase, skip
import ddt
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore.tests.test_cross_modulestore_import_export import (
MixedModulestoreBuilder, VersioningModulestoreBuilder,
MongoModulestoreBuilder, TEST_DATA_DIR
)
MIXED_OLD_MONGO_MODULESTORE_BUILDER = MixedModulestoreBuilder([('draft', MongoModulestoreBuilder())])
MIXED_SPLIT_MODULESTORE_BUILDER = MixedModulestoreBuilder([('split', VersioningModulestoreBuilder())])
@ddt.ddt
@skip("Fix call counts below - sometimes the counts are off by 1.")
class CountMongoCallsXMLRoundtrip(TestCase):
"""
This class exists to test XML import and export to/from Split.
"""
def setUp(self):
super(CountMongoCallsXMLRoundtrip, self).setUp()
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
@ddt.data(
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 287, 779, 702, 702),
(MIXED_SPLIT_MODULESTORE_BUILDER, 37, 16, 190, 189),
)
@ddt.unpack
def test_import_export(self, store_builder, export_reads, import_reads, first_import_writes, second_import_writes):
with store_builder.build() as (source_content, source_store):
with store_builder.build() as (dest_content, dest_store):
source_course_key = source_store.make_course_key('a', 'course', 'course')
dest_course_key = dest_store.make_course_key('a', 'course', 'course')
# An extra import write occurs in the first Split import due to the mismatch between
# the course id and the wiki_slug in the test XML course. The course must be updated
# with the correct wiki_slug during import.
with check_mongo_calls(import_reads, first_import_writes):
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_DIR,
source_dirs=['manual-testing-complete'],
static_content_store=source_content,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
with check_mongo_calls(export_reads):
export_course_to_xml(
source_store,
source_content,
source_course_key,
self.export_dir,
'exported_source_course',
)
with check_mongo_calls(import_reads, second_import_writes):
import_course_from_xml(
dest_store,
'test_user',
self.export_dir,
source_dirs=['exported_source_course'],
static_content_store=dest_content,
target_id=dest_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
@ddt.ddt
class CountMongoCallsCourseTraversal(TestCase):
"""
Tests the number of Mongo calls made when traversing a course tree from the top course root
to the leaf nodes.
"""
# Suppose you want to traverse a course - maybe accessing the fields of each XBlock in the course,
# maybe not. What parameters should one use for get_course() in order to minimize the number of
# mongo calls? The tests below both ensure that code changes don't increase the number of mongo calls
# during traversal -and- demonstrate how to minimize the number of calls.
@ddt.data(
# These two lines show the way this traversal *should* be done
# (if you'll eventually access all the fields and load all the definitions anyway).
# 'lazy' does not matter in old Mongo.
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, True, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, True, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, True, 387),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, True, 387),
# As shown in these two lines: whether or not the XBlock fields are accessed,
# the same number of mongo calls are made in old Mongo for depth=None.
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, False, False, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, None, True, False, 189),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, False, False, 387),
(MIXED_OLD_MONGO_MODULESTORE_BUILDER, 0, True, False, 387),
# The line below shows the way this traversal *should* be done
# (if you'll eventually access all the fields and load all the definitions anyway).
(MIXED_SPLIT_MODULESTORE_BUILDER, None, False, True, 4),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, True, 143),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, True, 143),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, True, 143),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, False, False, 4),
(MIXED_SPLIT_MODULESTORE_BUILDER, None, True, False, 4),
# TODO: The call count below seems like a bug - should be 4?
# Seems to be related to using self.lazy in CachingDescriptorSystem.get_module_data().
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, False, False, 143),
(MIXED_SPLIT_MODULESTORE_BUILDER, 0, True, False, 4)
)
@ddt.unpack
def test_number_mongo_calls(self, store, depth, lazy, access_all_block_fields, num_mongo_calls):
with store.build() as (source_content, source_store):
source_course_key = source_store.make_course_key('a', 'course', 'course')
# First, import a course.
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_DIR,
source_dirs=['manual-testing-complete'],
static_content_store=source_content,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
# Course traversal modeled after the traversal done here:
# lms/djangoapps/mobile_api/video_outlines/serializers.py:BlockOutline
# Starting at the root course block, do a breadth-first traversal using
# get_children() to retrieve each block's children.
with check_mongo_calls(num_mongo_calls):
with source_store.bulk_operations(source_course_key):
start_block = source_store.get_course(source_course_key, depth=depth, lazy=lazy)
all_blocks = []
stack = [start_block]
while stack:
curr_block = stack.pop()
all_blocks.append(curr_block)
if curr_block.has_children:
for block in reversed(curr_block.get_children()):
stack.append(block)
if access_all_block_fields:
# Read the fields on each block in order to ensure each block and its definition is loaded.
for xblock in all_blocks:
for __, field in xblock.fields.iteritems():
if field.is_set_on(xblock):
__ = field.read_from(xblock)
| agpl-3.0 |
wangyum/tensorflow | tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py | 36 | 8009 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxLossOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax, sparsemax_loss
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxLossTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_loss(self, z, q):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# Calculate q^T * z
z_k = np.sum(q * z, axis=1)
# calculate sum over S(z)
p = self._np_sparsemax(z)
s = p > 0
# z_i^2 - tau(z)^2 = p_i (2 * z_i - p_i) for i \in S(z)
S_sum = np.sum(s * p * (2 * z - p), axis=1)
# because q is binary, sum([q_1^2, q_2^2, ...]) is just sum(q)
q_norm = np.sum(q, axis=1)
return -z_k + 0.5 * S_sum + 0.5 * q_norm
def _np_sparsemax_loss_grad(self, z, q):
# chain rule
grad = 1
return grad * (-q + self._np_sparsemax(z))
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _tf_sparsemax_loss(self, z, q, dtype, use_gpu):
z = z.astype(dtype)
q = q.astype(dtype)
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z)
tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
tf_loss_out = tf_loss_op.eval()
return tf_loss_op, tf_loss_out
def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
np_loss = self._np_sparsemax_loss(z, q).astype(dtype)
self.assertAllCloseAccordingToType(np_loss, tf_loss_out,
half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_loss, tf_loss_op)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
c = random.uniform(low=-3, high=3, size=(test_obs, 1))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
_, tf_loss_zpc = self._tf_sparsemax_loss(
z + c, q, dtype, use_gpu
)
_, tf_loss_z = self._tf_sparsemax_loss(
z, q, dtype, use_gpu
)
self.assertAllCloseAccordingToType(tf_loss_zpc, tf_loss_z,
float_atol=5e-6, float_rtol=5e-6,
half_atol=1e-2, half_rtol=1e-2)
def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.abs(tf_loss_out), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):
"""check sparsemax-loss proposition 5"""
# construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for
# delta_0 = 1.
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
z[:, 0] = np.max(z, axis=1) + 1.05
q = np.zeros((test_obs, 10))
q[:, 0] = 1
tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(np.zeros(test_obs), tf_loss_out)
self.assertShapeEqual(np.zeros(test_obs), tf_loss_op)
self.assertAllCloseAccordingToType(q, tf_sparsemax_out)
self.assertShapeEqual(q, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, aginst estimated-loss Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
q = np.zeros((test_obs, 10)).astype(dtype)
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
loss_op = sparsemax_loss(logits, sparsemax_op, q)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape,
loss_op, (test_obs, ),
x_init_value=z, delta=1e-9
)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, aginst numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
logits = constant_op.constant(z.astype(dtype), name='z')
sparsemax_op = sparsemax(logits)
loss_op = sparsemax_loss(logits, sparsemax_op, q.astype(dtype))
loss_grad_op = gradients_impl.gradients(loss_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = loss_grad_op.eval()
np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)
self.assertAllCloseAccordingToType(np_grad, tf_grad,
half_atol=1e-2, half_rtol=5e-3)
self.assertShapeEqual(np_grad, loss_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_loss_against_numpy(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_sparsemax_loss_positive(dtype, random, use_gpu=False)
self._test_sparsemax_loss_zero(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == "__main__":
test.main()
| apache-2.0 |
burakbayramli/quant_at | data/simple.py | 1 | 7677 | from pymongo import MongoClient
import logging, Quandl, random, os
import datetime, glob, pandas as pd
from pandas_datareader import data, wb
import numpy as np, sys
from memo import *
MONGO_STAT = "C:\\Progra~1\\MongoDB\\Server\\3.2\\bin\\mongostat.exe /rowcount:1"
@memo # so that we dont constantly read the .quand file
def get_quandl_auth():
fname = '%s/.quandl' % os.environ['HOME']
if not os.path.isfile(fname):
print 'Please create a %s file ' % fname
exit()
auth = open(fname).read()
return auth
def web_load(symbol, backend, start, end):
"""
Outside interface to get all the data
"""
auth = get_quandl_auth()
try:
if backend == "fred":
return data.DataReader(symbol, backend, start, end)
if backend == "google":
return data.DataReader(market + ":" + symbol, backend, start, end)
if backend == "yahoo":
return data.DataReader(symbol, backend, start, end)
except IOError:
logging.debug("cant find " + symbol)
def get_beginning_of_time():
return datetime.datetime(2006, 1, 1)
def get_today():
#today=datetime.datetime(2016, 2, 15) # hack, freeze the end time
dt=datetime.datetime.today() - datetime.timedelta(days=1)
today = datetime.datetime(dt.year, dt.month, dt.day)
today_int = int(today.strftime('%Y%m%d') )
return today, today_int
def get_last_date_in_db(symbol, db, today):
ts = db.simple.find( {"_id.sym": symbol} )
# Check if there are records.
if ts.count() > 0:
q = {"$query" :{"_id.sym": symbol},"$orderby":{"_id.dt" : -1}}
ts = list(db.simple.find(q).limit(1))
last_date_in_db = int(ts[0]['_id']['dt'])
return pd.to_datetime(str(last_date_in_db), format='%Y%m%d')
def do_download(items):
"""
Download a given list of (market,symbol,name) triplets.
This list would have been prepared outside of this call, probably
a chunk of bigger list of symbols. This way this function has no
knowledge of what all symbols are, it only works on the piece given
to it.
"""
connection = MongoClient()
db = connection.findb
tickers = db.simple
beginning_of_time=get_beginning_of_time()
today, today_int = get_today()
logging.debug ("%d items" % len(items))
for market,symbol,name in items:
logging.debug("%s %s" % (symbol, name))
s = None; last_date_in_db = None
if market == "fred":
last_date = get_last_date_in_db(symbol, db, today)
logging.debug('last %s' % last_date)
logging.debug('today %s' % today)
if last_date and last_date >= today:
logging.debug('no need')
continue
s = web_load(symbol, "fred", beginning_of_time, today)
if 'DataFrame' not in str(type(s)): continue
for srow in s.iterrows():
dt = str(srow[0])[0:10]
dt = int(dt.replace("-",""))
new_row = {"_id": {"sym": symbol, "dt": dt },"a": float(srow[1])}
tickers.save(new_row)
elif market == "yahoo" :
start = beginning_of_time; end = today
last_date = get_last_date_in_db(symbol,db,today)
logging.debug('last %s' % last_date)
logging.debug('today %s' % today)
if last_date and last_date >= today:
logging.debug('no need')
continue
if last_date: start = last_date
logging.debug("" + repr(start) + " " + repr(end))
s = web_load(symbol, market, start, end)
# symbol could not be found
if 'DataFrame' not in str(type(s)): continue
for srow in s.iterrows():
dt = int((str(srow[0])[0:10]).replace("-",""))
new_row = {"_id": {"sym": symbol, "dt": dt },
"o": srow[1].Open,
"h": srow[1].High,
"l": srow[1].Low,
"c": srow[1].Close,
"v": srow[1].Volume,
"a": srow[1]['Adj Close']}
tickers.save(new_row)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def download_data(ith_chunk=0, no_chunks=1,base_dir="."):
"""
Download data for the ith chunk of no_chunks. The chunks
come from a list of all available stock, etf symbols
"""
res = []
df = pd.read_csv("simple.csv")
for line in df.iterrows():
res.append((line[1].Engine, line[1].Symbol, line[1].Name))
random.seed(0)
random.shuffle(res)
s = int(len(res) / no_chunks)
res = list(chunks(res, s))
do_download(res[ith_chunk])
def get(symbol):
"""
Returns all data for symbol in a pandas dataframe
"""
connection = MongoClient()
db = connection.findb
q = {"$query" :{"_id.sym": symbol},"$orderby":{"_id.dt" : 1}}
res = list(db.simple.find( q )); res1 = []
if len(res) == 0: return pd.DataFrame()
if 'c' in res[0]: # then we have a stock ticker, this series does not have 'closed' or 'open'
for x in res: res1.append( { 'a': x['a'],'c': x['c'],'h':x['h'], 'l': x['l'],'o': x['o'],'Date':x['_id']['dt']} )
else: # we have a macro timeseries, 'a' always exists in all time series
for x in res: res1.append( { 'a': x['a'],'Date':x['_id']['dt']} )
df = pd.DataFrame(res1, columns=res1[0].keys())
df['Date'] = pd.to_datetime(df.Date,format='%Y%m%d')
df = df.set_index('Date')
return df
def get_multi(symbols):
"""
Returns all data for symbols
"""
dfs = [get(x).a for x in symbols]
res = pd.concat(dfs,axis=1)
res.columns = symbols
return res
def get_hft(symbol, date):
"""
Return minute level high-frequency data for the given symbol and date
"""
connection = MongoClient()
db = connection.findb
q = {"$query" :{"_id.sym": symbol, "_id.dt": date} }
res = list(db.simple.find(q).limit(1))
if len(res) > 0 and 'hft' in res[0].keys():
df = pd.DataFrame(res[0]['hft'])
return df.T
def get_hft_for_dates(symbol, start, end):
"""
Return minute level high-frequency data for a time period
between start and end.
"""
start = pd.to_datetime(str(start), format='%Y%m%d')
end = pd.to_datetime(str(end), format='%Y%m%d')
dates = [(start+datetime.timedelta(days=i)).strftime('%Y%m%d') for i in range((end-start).days+1)]
res = []
for dt in dates:
df = get_hft(symbol, int(dt))
if 'DataFrame' in str(type(df)):
df['Date'] = dt
df = df.set_index('Date',append=True)
res.append(df)
dfs = pd.concat(res)
return dfs
def check_mongo():
pipe = os.popen(MONGO_STAT + ' 2>&1', 'r')
text = pipe.read()
if 'no reachable servers' in text:
print "\n\n**** Mongo is not running ****\n\n"
exit()
if __name__ == "__main__":
check_mongo()
f = '%(asctime)-15s: %(message)s'
if len(sys.argv) == 3:
logging.basicConfig(filename='%s/simple-%d.log' % (os.environ['TEMP'],int(sys.argv[1])),level=logging.DEBUG,format=f)
download_data(int(sys.argv[1]),int(sys.argv[2]))
else:
logging.basicConfig(filename='%s/simple.log' % os.environ['TEMP'],level=logging.DEBUG, format=f)
download_data()
| gpl-3.0 |
40223142/2015cad0623 | static/Brython3.1.3-20150514-095342/Lib/unittest/test/test_result.py | 788 | 19069 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
rossjones/ScraperWikiX | web/frontend/migrations/0014_add_message.py | 2 | 7348 |
from south.db import db
from django.db import models
from frontend.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Message'
db.create_table('frontend_message', (
('id', orm['frontend.message:id']),
('text', orm['frontend.message:text']),
('start', orm['frontend.message:start']),
('finish', orm['frontend.message:finish']),
))
db.send_create_signal('frontend', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table('frontend_message')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'frontend.alerts': {
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_alerts_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'historicalgroup': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_level': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'message_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'message_value': ('django.db.models.fields.CharField', [], {'max_length': '5000', 'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'frontend.alerttypes': {
'applies_to': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'})
},
'frontend.message': {
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'frontend.userprofile': {
'alert_frequency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'alert_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['frontend.AlertTypes']"}),
'alerts_last_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'frontend.usertouserrole': {
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_user'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['frontend']
| agpl-3.0 |
rarbg/ZeroNet | src/lib/geventwebsocket/resource.py | 21 | 1865 | import re
from .protocols.base import BaseProtocol
from .exceptions import WebSocketError
class WebSocketApplication(object):
protocol_class = BaseProtocol
def __init__(self, ws):
self.protocol = self.protocol_class(self)
self.ws = ws
def handle(self):
self.protocol.on_open()
while True:
try:
message = self.ws.receive()
except WebSocketError:
self.protocol.on_close()
break
self.protocol.on_message(message)
def on_open(self, *args, **kwargs):
pass
def on_close(self, *args, **kwargs):
pass
def on_message(self, message, *args, **kwargs):
self.ws.send(message, **kwargs)
@classmethod
def protocol_name(cls):
return cls.protocol_class.PROTOCOL_NAME
class Resource(object):
def __init__(self, apps=None):
self.apps = apps if apps else []
def _app_by_path(self, environ_path):
# Which app matched the current path?
for path, app in self.apps.iteritems():
if re.match(path, environ_path):
return app
def app_protocol(self, path):
app = self._app_by_path(path)
if hasattr(app, 'protocol_name'):
return app.protocol_name()
else:
return ''
def __call__(self, environ, start_response):
environ = environ
current_app = self._app_by_path(environ['PATH_INFO'])
if current_app is None:
raise Exception("No apps defined")
if 'wsgi.websocket' in environ:
ws = environ['wsgi.websocket']
current_app = current_app(ws)
current_app.ws = ws # TODO: needed?
current_app.handle()
return None
else:
return current_app(environ, start_response)
| gpl-2.0 |
Mirdrack/4chanscrapper | lib/python2.7/site-packages/setuptools/archive_util.py | 409 | 6601 | """Utilities for extracting common archive formats"""
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
import zipfile, tarfile, os, shutil, posixpath
from pkg_resources import ensure_directory
from distutils.errors import DistutilsError
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src,dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(filename, extract_dir, progress_filter=default_filter,
drivers=None
):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % (filename,))
paths = {filename:('',extract_dir)}
for base, dirs, files in os.walk(filename):
src,dst = paths[base]
for d in dirs:
paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d)
for f in files:
name = src+f
target = os.path.join(dst,f)
target = progress_filter(src+f, target)
if not target:
continue # skip non-files
ensure_directory(target)
f = os.path.join(base,f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
z = zipfile.ZipFile(filename)
try:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
f = open(target,'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
finally:
z.close()
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
try:
tarobj.chown = lambda *args: None # don't do any chowning!
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
linkpath = posixpath.join(posixpath.dirname(member.name), linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
tarobj._extract_member(member, final_dst) # XXX Ugh
except tarfile.ExtractError:
pass # chown/chmod/mkfifo/mknode/makedev failed
return True
finally:
tarobj.close()
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
| mit |
jjlee3/openthread | tests/scripts/thread-cert/ipv6.py | 5 | 35645 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import abc
import io
import struct
import sys
from binascii import hexlify
from ipaddress import ip_address
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
# Next headers for IPv6 protocols
IPV6_NEXT_HEADER_HOP_BY_HOP = 0
IPV6_NEXT_HEADER_TCP = 6
IPV6_NEXT_HEADER_UDP = 17
IPV6_NEXT_HEADER_ICMP = 58
UPPER_LAYER_PROTOCOLS = [
IPV6_NEXT_HEADER_TCP,
IPV6_NEXT_HEADER_UDP,
IPV6_NEXT_HEADER_ICMP,
]
# ICMP Protocol codes
ICMP_DESTINATION_UNREACHABLE = 1
ICMP_ECHO_REQUEST = 128
ICMP_ECHO_RESPONSE = 129
# Default hop limit for IPv6
HOP_LIMIT_DEFAULT = 64
def calculate_checksum(data):
""" Calculate checksum from data bytes.
How to calculate checksum (RFC 2460):
https://tools.ietf.org/html/rfc2460#page-27
Args:
data (bytes): input data from which checksum will be calculated
Returns:
int: calculated checksum
"""
# Create halfwords from data bytes. Example: data[0] = 0x01, data[1] = 0xb2 => 0x01b2
halfwords = [((byte0 << 8) | byte1) for byte0, byte1 in zip_longest(data[::2], data[1::2], fillvalue=0x00)]
checksum = 0
for halfword in halfwords:
checksum += halfword
checksum = (checksum & 0xFFFF) + (checksum >> 16)
checksum ^= 0xFFFF
if checksum == 0:
return 0xFFFF
else:
return checksum
class PacketFactory(object):
""" Interface for classes that produce objects from data. """
def parse(self, data, message_info):
""" Convert data to object.
Args:
data (BytesIO)
message_info (MessageInfo)
"""
raise NotImplementedError
class BuildableFromBytes(object):
""" Interface for classes which can be built from bytes. """
@classmethod
def from_bytes(cls, data):
""" Convert data to object.
Args:
data (bytes)
"""
raise NotImplementedError
class ConvertibleToBytes(object):
""" Interface for classes which can be converted to bytes. """
def to_bytes(self):
""" Convert object to data.
Returns:
bytes
"""
raise NotImplementedError
def __len__(self):
""" Length of data (in bytes).
Returns:
int
"""
raise NotImplementedError
class Header(object):
""" Interface for header classes. """
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def type(self):
""" Number which can be used in the next header field in IPv6 header or next headers.
Returns:
int
"""
class ExtensionHeader(object):
""" Base for classes representing Extension Headers in IPv6 packets. """
def __init__(self, next_header, hdr_ext_len=0):
self.next_header = next_header
self.hdr_ext_len = hdr_ext_len
class UpperLayerProtocol(Header, ConvertibleToBytes):
""" Base for classes representing upper layer protocol payload in IPv6 packets. """
def __init__(self, header):
self.header = header
@property
def checksum(self):
""" Return checksum from upper layer protocol header. """
return self.header.checksum
@checksum.setter
def checksum(self, value):
""" Set checksum value in upper layer protocol header. """
self.header.checksum = value
def is_valid_checksum(self):
""" Return information if set checksum is valid.
It is not possible to get zero from checksum calculation.
Zero indicates invalid checksum value.
Returns:
bool
"""
return self.checksum != 0
class IPv6PseudoHeader(ConvertibleToBytes):
""" Class representing IPv6 pseudo header which is required to calculate
upper layer protocol (like e.g. UDP or ICMPv6) checksum.
This class is used only during upper layer protocol checksum calculation. Do not use it outside of this module.
"""
def __init__(self, source_address, destination_address, payload_length, next_header):
self._source_address = self._convert_to_ipaddress(source_address)
self._destination_address = self._convert_to_ipaddress(destination_address)
self.payload_length = payload_length
self.next_header = next_header
def _convert_to_ipaddress(self, value):
if isinstance(value, bytearray):
value = bytes(value)
elif isinstance(value, str) and sys.version_info[0] == 2:
value = value.decode("utf-8")
return ip_address(value)
@property
def source_address(self):
return self._source_address
@source_address.setter
def source_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
@property
def destination_address(self):
return self._destination_address
@destination_address.setter
def destination_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
def to_bytes(self):
data = bytearray()
data += self.source_address.packed
data += self.destination_address.packed
data += struct.pack(">I", self.payload_length)
data += struct.pack(">I", self.next_header)
return data
class IPv6Header(ConvertibleToBytes, BuildableFromBytes):
""" Class representing IPv6 packet header. """
_version = 6
_header_length = 40
def __init__(self, source_address, destination_address, traffic_class=0, flow_label=0, hop_limit=64,
payload_length=0, next_header=0):
self.version = self._version
self._source_address = self._convert_to_ipaddress(source_address)
self._destination_address = self._convert_to_ipaddress(destination_address)
self.traffic_class = traffic_class
self.flow_label = flow_label
self.hop_limit = hop_limit
self.payload_length = payload_length
self.next_header = next_header
def _convert_to_ipaddress(self, value):
if isinstance(value, bytearray):
value = bytes(value)
elif isinstance(value, str) and sys.version_info[0] == 2:
value = value.decode("utf-8")
return ip_address(value)
@property
def source_address(self):
return self._source_address
@source_address.setter
def source_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
@property
def destination_address(self):
return self._destination_address
def to_bytes(self):
data = bytearray([
((self.version & 0x0F) << 4) | ((self.traffic_class >> 4) & 0x0F),
((self.traffic_class & 0x0F) << 4) | ((self.flow_label >> 16) & 0x0F),
((self.flow_label >> 8) & 0xFF),
((self.flow_label & 0xFF))
])
data += struct.pack(">H", self.payload_length)
data += bytearray([self.next_header, self.hop_limit])
data += self.source_address.packed
data += self.destination_address.packed
return data
@classmethod
def from_bytes(cls, data):
b = bytearray(data.read(4))
version = (b[0] >> 4) & 0x0F
traffic_class = ((b[0] & 0x0F) << 4) | ((b[1] >> 4) & 0x0F)
flow_label = ((b[1] & 0x0F) << 16) | (b[2] << 8) | b[3]
payload_length = struct.unpack(">H", data.read(2))[0]
next_header = ord(data.read(1))
hop_limit = ord(data.read(1))
src_addr = bytearray(data.read(16))
dst_addr = bytearray(data.read(16))
return cls(src_addr,
dst_addr,
traffic_class,
flow_label,
hop_limit,
payload_length,
next_header)
def __repr__(self):
return "IPv6Header(source_address={}, destination_address={}, next_header={}, payload_length={}, \
hop_limit={}, traffic_class={}, flow_label={})".format(self.source_address.compressed,
self.destination_address.compressed,
self.next_header,
self.payload_length,
self.hop_limit,
self.traffic_class,
self.flow_label)
def __len__(self):
return self._header_length
class IPv6Packet(ConvertibleToBytes):
""" Class representing IPv6 packet.
IPv6 packet consists of IPv6 header, optional extension header, and upper layer protocol.
IPv6 packet
+-------------+----------------------------------+----------------------------------------------+
| | | |
| IPv6 header | extension headers (zero or more) | upper layer protocol (e.g. UDP, TCP, ICMPv6) |
| | | |
+-------------+----------------------------------+----------------------------------------------+
Extension headers:
- HopByHop
- Routing header (not implemented in this module)
Upper layer protocols:
- ICMPv6
- UDP
- TCP (not implemented in this module)
Example:
IPv6 packet construction without extension headers:
ipv6_packet = IPv6Packet(IPv6Header("fd00:1234:4555::ff:fe00:1800", "ff03::1"),
ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41]))))
IPv6 packet construction with extension headers:
ipv6_packet = IPv6Packet(IPv6Header("fd00:1234:4555::ff:fe00:1800", "ff03::1"),
ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41])),
[HopByHop(options=[
HopByHopOption(HopByHopOptionHeader(_type=0x6d),
MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytes([0x00, 0x18])))
])])
"""
def __init__(self, ipv6_header, upper_layer_protocol, extension_headers=None):
self.ipv6_header = ipv6_header
self.upper_layer_protocol = upper_layer_protocol
self.extension_headers = extension_headers if extension_headers is not None else []
self._update_next_header_values_in_headers()
if not upper_layer_protocol.is_valid_checksum():
self.upper_layer_protocol.checksum = self.calculate_checksum()
def _validate_checksum(self):
checksum = self.calculate_checksum()
if self.upper_layer_protocol.checksum != checksum:
raise RuntimeError("Could not create IPv6 packet. "
"Invalid checksum: {}!={}".format(self.upper_layer_protocol.checksum, checksum))
self.upper_layer_protocol.checksum = checksum
def _update_payload_length_value_in_ipv6_header(self):
self.ipv6_header.payload_length = len(self.upper_layer_protocol) + \
sum([len(extension_header) for extension_header in self.extension_headers])
def _update_next_header_values_in_headers(self):
last_header = self.ipv6_header
for extension_header in self.extension_headers:
last_header.next_header = extension_header.type
last_header = extension_header
last_header.next_header = self.upper_layer_protocol.type
def calculate_checksum(self):
saved_checksum = self.upper_layer_protocol.checksum
self.upper_layer_protocol.checksum = 0
upper_layer_protocol_bytes = self.upper_layer_protocol.to_bytes()
self.upper_layer_protocol.checksum = saved_checksum
pseudo_header = IPv6PseudoHeader(self.ipv6_header.source_address,
self.ipv6_header.destination_address,
len(upper_layer_protocol_bytes),
self.upper_layer_protocol.type)
return calculate_checksum(pseudo_header.to_bytes() + upper_layer_protocol_bytes)
def to_bytes(self):
self._update_payload_length_value_in_ipv6_header()
self._update_next_header_values_in_headers()
self.upper_layer_protocol.checksum = self.calculate_checksum()
ipv6_packet = self.ipv6_header.to_bytes()
for extension_header in self.extension_headers:
ipv6_packet += extension_header.to_bytes()
ipv6_packet += self.upper_layer_protocol.to_bytes()
return ipv6_packet
def __repr__(self):
return "IPv6Packet(header={}, upper_layer_protocol={})".format(self.ipv6_header, self.upper_layer_protocol)
class UDPHeader(ConvertibleToBytes, BuildableFromBytes):
""" Class representing UDP datagram header.
This header is required to construct UDP datagram.
"""
_header_length = 8
def __init__(self, src_port, dst_port, payload_length=0, checksum=0):
self.src_port = src_port
self.dst_port = dst_port
self._payload_length = payload_length
self.checksum = checksum
@property
def type(self):
return 17
@property
def payload_length(self):
return self._payload_length
@payload_length.setter
def payload_length(self, value):
self._payload_length = self._header_length + value
def to_bytes(self):
data = struct.pack(">H", self.src_port)
data += struct.pack(">H", self.dst_port)
data += struct.pack(">H", self.payload_length)
data += struct.pack(">H", self.checksum)
return data
@classmethod
def from_bytes(cls, data):
src_port = struct.unpack(">H", data.read(2))[0]
dst_port = struct.unpack(">H", data.read(2))[0]
payload_length = struct.unpack(">H", data.read(2))[0]
checksum = struct.unpack(">H", data.read(2))[0]
return cls(src_port, dst_port, payload_length, checksum)
def __len__(self):
return self._header_length
class UDPDatagram(UpperLayerProtocol):
""" Class representing UDP datagram.
UDP is an upper layer protocol for IPv6 so it can be passed to IPv6 packet as upper_layer_protocol.
This class consists of a UDP header and payload. The example below shows how a UDP datagram can be constructed.
Example:
udp_dgram = UDPDatagram(UDPHeader(src_port=19788, dst_port=19788),
bytes([0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x09, 0x01, 0x01, 0x0b, 0x03,
0x04, 0xc6, 0x69, 0x73, 0x51, 0x0e, 0x01, 0x80,
0x12, 0x02, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef]))
"""
@property
def type(self):
return 17
def __init__(self, header, payload):
super(UDPDatagram, self).__init__(header)
self.payload = payload
def to_bytes(self):
self.header.payload_length = len(self.payload)
data = bytearray()
data += self.header.to_bytes()
data += self.payload.to_bytes()
return data
def __len__(self):
return len(self.header) + len(self.payload)
class ICMPv6Header(ConvertibleToBytes, BuildableFromBytes):
""" Class representing ICMPv6 message header.
This header is required to construct ICMPv6 message.
"""
_header_length = 4
def __init__(self, _type, code, checksum=0):
self.type = _type
self.code = code
self.checksum = checksum
def to_bytes(self):
return bytearray([self.type, self.code]) + struct.pack(">H", self.checksum)
@classmethod
def from_bytes(cls, data):
_type = ord(data.read(1))
code = ord(data.read(1))
checksum = struct.unpack(">H", data.read(2))[0]
return cls(_type, code, checksum)
def __len__(self):
return self._header_length
class ICMPv6(UpperLayerProtocol):
""" Class representing ICMPv6 message.
ICMPv6 is an upper layer protocol for IPv6 so it can be passed to IPv6 packet as upper_layer_protocol.
This class consists of an ICMPv6 header and body. The example below shows how an ICMPv6 message can be constructed.
Example:
icmpv6_msg = ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41])))
"""
@property
def type(self):
return 58
def __init__(self, header, body):
super(ICMPv6, self).__init__(header)
self.body = body
def to_bytes(self):
return bytearray(self.header.to_bytes() + self.body.to_bytes())
def __len__(self):
return len(self.header) + len(self.body)
class HopByHop(ExtensionHeader):
""" Class representing HopByHop extension header.
HopByHop extension header consists of:
- next_header type
- extension header length which is multiple of 8
- options
"""
_one_byte_padding = 0x00
_many_bytes_padding = 0x01
@property
def type(self):
return 0
def __init__(self, next_header=None, options=None, hdr_ext_len=None):
super(HopByHop, self).__init__(next_header, hdr_ext_len)
self.options = options if options is not None else []
if hdr_ext_len is not None:
self.hdr_ext_len = hdr_ext_len
else:
payload_length = self._calculate_payload_length()
self.hdr_ext_len = self._calculate_hdr_ext_len(payload_length)
def _calculate_payload_length(self):
payload_length = 2
for option in self.options:
payload_length += len(option)
return payload_length
def _calculate_hdr_ext_len(self, payload_length):
count = payload_length >> 3
if (payload_length & 0x7) == 0 and count > 0:
return count - 1
return count
def to_bytes(self):
data = bytearray([self.next_header, self.hdr_ext_len])
for option in self.options:
data += option.to_bytes()
# Padding
#
# More details:
# https://tools.ietf.org/html/rfc2460#section-4.2
#
excess_bytes = len(data) & 0x7
if excess_bytes > 0:
padding_length = 8 - excess_bytes
if padding_length == 1:
data += bytearray([self._one_byte_padding])
else:
padding_length -= 2
data += bytearray([self._many_bytes_padding, padding_length])
data += bytearray([0x00 for _ in range(padding_length)])
return data
def __len__(self):
""" HopByHop extension header length
More details:
https://tools.ietf.org/html/rfc2460#section-4.3
"""
return (self.hdr_ext_len + 1) * 8
class HopByHopOptionHeader(ConvertibleToBytes, BuildableFromBytes):
""" Class representing HopByHop option header. """
_header_length = 2
def __init__(self, _type, length=None):
self.type = _type
self.length = length if length is not None else 0
def to_bytes(self):
return bytearray([self.type, self.length])
@classmethod
def from_bytes(cls, data):
_type = ord(data.read(1))
length = ord(data.read(1))
return cls(_type, length)
def __len__(self):
return self._header_length
def __repr__(self):
return "HopByHopOptionHeader(type={}, length={})".format(self.type, self.length)
class HopByHopOption(ConvertibleToBytes):
""" Class representing HopByHop option.
Class consists of two elements: HopByHopOptionHeader and value (e.g. for MPLOption).
The following example shows how any HopByHop option can be constructed.
Example:
HopByHop(next_header=0x3a,
options=[HopByHopOption(HopByHopOptionHeader(_type=0x6d),
MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytes([0x00, 0x18])))
"""
def __init__(self, header, value):
self.value = value
self.header = header
self.header.length = len(self.value)
def to_bytes(self):
return self.header.to_bytes() + self.value.to_bytes()
def __len__(self):
return len(self.header) + len(self.value)
def __repr__(self):
return "HopByHopOption(header={}, value={})".format(self.header, self.value)
class MPLOption(ConvertibleToBytes):
""" Class representing MPL option. """
_header_length = 2
_seed_id_length = {
0: 0,
1: 2,
2: 8,
3: 16
}
def __init__(self, S, M, V, sequence, seed_id):
self.S = S
self.M = M
self.V = V
self.sequence = sequence
self.seed_id = seed_id
def to_bytes(self):
smv = ((self.S & 0x03) << 6) | ((self.M & 0x01) << 5) | ((self.V & 0x01) << 4)
return bytearray([smv, self.sequence]) + self.seed_id
@classmethod
def from_bytes(cls, data):
b = ord(data.read(1))
s = ((b >> 6) & 0x03)
m = ((b >> 5) & 0x01)
v = ((b >> 4) & 0x01)
sequence = ord(data.read(1))
seed_id = data.read(cls._seed_id_length[s])
return cls(s, m, v, sequence, seed_id)
def __len__(self):
return self._header_length + self._seed_id_length[self.S]
def __repr__(self):
return "MPLOption(S={}, M={}, V={}, sequence={}, seed_id={})".format(self.S, self.M, self.V, self.sequence, hexlify(self.seed_id))
class IPv6PacketFactory(PacketFactory):
""" Factory that produces IPv6 packets from data.
This factory must be initialized with factories which allow to parse extension headers and upper layer protocols.
The following example shows preferable setup of IPv6PacketFactory.
Header types:
0: HopByHop
17: UDP
58: ICMPv6
Option types:
109: MPL
ICMPv6 body types:
128: Echo request
129: Echo response
Example usage:
ipv6_factory = IPv6PacketFactory(
ehf={
0: HopByHopFactory(options_factories={
109: MPLOptionFactory()
})
},
ulpf={
17: UDPDatagramFactory(dst_port_factories={
19788: MLEMessageFactory(),
19789: CoAPMessageFactory()
}),
58: ICMPv6Factory(body_factories={
128: ICMPv6EchoBodyFactory(),
129: ICMPv6EchoBodyFactory()
})
}
)
"""
def __init__(self, ehf=None, ulpf=None):
"""
ehf - Extension Header Factory
ulpf - Upper Layer Protocol Factory
Args:
ehf(dict[int: PacketFactory]): Dictionary mapping extension header types on specialized factories.
ulpf(dict[int: PacketFactory]): Dictionary mapping upper layer protocol types on specialized factories.
"""
self._ehf = ehf if ehf is not None else {}
self._ulpf = ulpf if ulpf is not None else {}
def _is_extension_header(self, header_type):
return not header_type in UPPER_LAYER_PROTOCOLS
def _get_extension_header_factory_for(self, next_header):
try:
return self._ehf[next_header]
except KeyError:
raise RuntimeError("Could not get Extension Header factory for next_header={}.".format(next_header))
def _get_upper_layer_protocol_factory_for(self, next_header):
try:
return self._ulpf[next_header]
except KeyError:
raise RuntimeError("Could not get Upper Layer Protocol factory for next_header={}.".format(next_header))
def _parse_extension_headers(self, data, next_header, message_info):
extension_headers = []
while self._is_extension_header(next_header):
factory = self._get_extension_header_factory_for(next_header)
extension_header = factory.parse(data, message_info)
next_header = extension_header.next_header
extension_headers.append(extension_header)
return next_header, extension_headers
def _parse_upper_layer_protocol(self, data, next_header, message_info):
factory = self._get_upper_layer_protocol_factory_for(next_header)
return factory.parse(data, message_info)
def parse(self, data, message_info):
ipv6_header = IPv6Header.from_bytes(data)
message_info.source_ipv6 = ipv6_header.source_address
message_info.destination_ipv6 = ipv6_header.destination_address
next_header, extension_headers = self._parse_extension_headers(data, ipv6_header.next_header, message_info)
upper_layer_protocol = self._parse_upper_layer_protocol(data, next_header, message_info)
return IPv6Packet(ipv6_header, upper_layer_protocol, extension_headers)
class HopByHopOptionsFactory(object):
""" Factory that produces HopByHop options. """
_one_byte_padding = 0x00
_many_bytes_padding = 0x01
def __init__(self, options_factories=None):
self._options_factories = options_factories if options_factories is not None else {}
def _get_HopByHopOption_value_factory(self, _type):
try:
return self._options_factories[_type]
except KeyError:
raise RuntimeError("Could not find HopByHopOption value factory for type={}.".format(_type))
def parse(self, data, message_info):
options = []
while data.tell() < len(data.getvalue()):
option_header = HopByHopOptionHeader.from_bytes(data)
if option_header.type == self._one_byte_padding:
# skip one byte padding
data.read(1)
elif option_header.type == self._many_bytes_padding:
# skip n bytes padding
data.read(option_header.length)
else:
factory = self._get_HopByHopOption_value_factory(option_header.type)
option_data = data.read(option_header.length)
option = HopByHopOption(option_header, factory.parse(io.BytesIO(option_data), message_info))
options.append(option)
return options
class HopByHopFactory(PacketFactory):
""" Factory that produces HopByHop extension headers from data. """
def __init__(self, hop_by_hop_options_factory):
self._hop_by_hop_options_factory = hop_by_hop_options_factory
def _calculate_extension_header_length(self, hdr_ext_len):
return (hdr_ext_len + 1) * 8
def parse(self, data, message_info):
next_header = ord(data.read(1))
hdr_ext_len = ord(data.read(1))
# Note! Two bytes were read (next_header and hdr_ext_len) so they must be substracted from header length
hop_by_hop_length = self._calculate_extension_header_length(hdr_ext_len) - 2
hop_by_hop_data = data.read(hop_by_hop_length)
options = self._hop_by_hop_options_factory.parse(io.BytesIO(hop_by_hop_data), message_info)
hop_by_hop = HopByHop(next_header, options, hdr_ext_len)
message_info.payload_length += len(hop_by_hop)
return hop_by_hop
class MPLOptionFactory(PacketFactory):
""" Factory that produces MPL options for HopByHop extension header. """
def parse(self, data, message_info):
return MPLOption.from_bytes(data)
class UDPHeaderFactory:
""" Factory that produces UDP header. """
def parse(self, data, message_info):
return UDPHeader.from_bytes(data)
class UdpBasedOnSrcDstPortsPayloadFactory:
# TODO: Unittests
""" Factory that produces UDP payload. """
def __init__(self, src_dst_port_based_payload_factories):
"""
Args:
src_dst_port_based_payload_factories (PacketFactory): Factories parse UDP payload based on source or destination port.
"""
self._factories = src_dst_port_based_payload_factories
def parse(self, data, message_info):
factory = None
if message_info.dst_port in self._factories:
factory = self._factories[message_info.dst_port]
if message_info.src_port in self._factories:
factory = self._factories[message_info.src_port]
if factory is None:
raise RuntimeError("Could not find factory to build UDP payload.")
return factory.parse(data, message_info)
class UDPDatagramFactory(PacketFactory):
# TODO: Unittests
""" Factory that produces UDP datagrams. """
def __init__(self, udp_header_factory, udp_payload_factory):
self._udp_header_factory = udp_header_factory
self._udp_payload_factory = udp_payload_factory
def parse(self, data, message_info):
header = self._udp_header_factory.parse(data, message_info)
# Update message payload length: UDP header (8B) + payload length
message_info.payload_length += len(header) + (len(data.getvalue()) - data.tell())
message_info.src_port = header.src_port
message_info.dst_port = header.dst_port
payload = self._udp_payload_factory.parse(data, message_info)
return UDPDatagram(header, payload)
class ICMPv6Factory(PacketFactory):
""" Factory that produces ICMPv6 messages from data. """
def __init__(self, body_factories=None):
self._body_factories = body_factories if body_factories is not None else {}
def _get_icmpv6_body_factory(self, _type):
try:
return self._body_factories[_type]
except KeyError:
if "default" not in self._body_factories:
raise RuntimeError("Could not find specialized factory to parse ICMP body. "
"Unsupported ICMP type: {}".format(_type))
default_factory = self._body_factories["default"]
print("Could not find specialized factory to parse ICMP body. "
"Take the default one: {}".format(type(default_factory)))
return default_factory
def parse(self, data, message_info):
header = ICMPv6Header.from_bytes(data)
factory = self._get_icmpv6_body_factory(header.type)
message_info.payload_length += len(header) + (len(data.getvalue()) - data.tell())
return ICMPv6(header, factory.parse(data, message_info))
class ICMPv6EchoBodyFactory(PacketFactory):
""" Factory that produces ICMPv6 echo message body. """
def parse(self, data, message_info):
return ICMPv6EchoBody.from_bytes(data)
class BytesPayload(ConvertibleToBytes, BuildableFromBytes):
""" Class representing bytes payload. """
def __init__(self, data):
self.data = data
def to_bytes(self):
return bytearray(self.data)
@classmethod
def from_bytes(cls, data):
return cls(data)
def __len__(self):
return len(self.data)
class BytesPayloadFactory(PacketFactory):
""" Factory that produces bytes payload. """
def parse(self, data, message_info):
return BytesPayload(data.read())
class ICMPv6EchoBody(ConvertibleToBytes, BuildableFromBytes):
""" Class representing body of ICMPv6 echo messages. """
_header_length = 4
def __init__(self, identifier, sequence_number, data):
self.identifier = identifier
self.sequence_number = sequence_number
self.data = data
def to_bytes(self):
data = struct.pack(">H", self.identifier)
data += struct.pack(">H", self.sequence_number)
data += self.data
return data
@classmethod
def from_bytes(cls, data):
identifier = struct.unpack(">H", data.read(2))[0]
sequence_number = struct.unpack(">H", data.read(2))[0]
return cls(identifier, sequence_number, data.read())
def __len__(self):
return self._header_length + len(self.data)
class ICMPv6DestinationUnreachableFactory(PacketFactory):
""" Factory that produces ICMPv6 echo message body. """
def parse(self, data, message_info):
return ICMPv6DestinationUnreachable.from_bytes(data)
class ICMPv6DestinationUnreachable(ConvertibleToBytes, BuildableFromBytes):
""" Class representing body of ICMPv6 Destination Unreachable messages. """
_header_length = 4
_unused = 0
def __init__(self, data):
self.data = data
def to_bytes(self):
data = bytearray(struct.pack(">I", self._unused))
data += self.data
return data
@classmethod
def from_bytes(cls, data):
unused = struct.unpack(">I", data.read(4))[0]
if unused != 0:
raise RuntimeError(
"Invalid value of unused field in the ICMPv6 Destination Unreachable data. Expected value: 0.")
return cls(bytearray(data.read()))
def __len__(self):
return self._header_length + len(self.data)
| bsd-3-clause |
wgwoods/lorax | src/pylorax/api/bisect.py | 2 | 1682 | #
# Copyright (C) 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def insort_left(a, x, key=None, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
:param a: sorted list
:type a: list
:param x: item to insert into the list
:type x: object
:param key: Function to use to compare items in the list
:type key: function
:returns: index where the item was inserted
:rtype: int
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
This is a modified version of bisect.insort_left that can use a
function for the compare, and returns the index position where it
was inserted.
"""
if key is None:
key = lambda i: i
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if key(a[mid]) < key(x): lo = mid+1
else: hi = mid
a.insert(lo, x)
return lo
| gpl-2.0 |
stackforge/monasca-api | monasca_api/tests/test_alarms.py | 1 | 94731 | # -*- coding: utf-8 -*-
# Copyright 2015 Cray Inc.
# (C) Copyright 2015,2017 Hewlett Packard Enterprise Development LP
# Copyright 2016-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import copy
import datetime
import json
import falcon.testing
import fixtures
import testtools.matchers as matchers
from unittest.mock import Mock
import oslo_config.fixture
import six
from monasca_api.common.repositories.model import sub_alarm_definition
from monasca_api.tests import base
from monasca_api.v2.common.exceptions import HTTPUnprocessableEntityError
from monasca_api.v2.reference import alarm_definitions
from monasca_api.v2.reference import alarms
CONF = oslo_config.cfg.CONF
TENANT_ID = u"fedcba9876543210fedcba9876543210"
ALARM_HISTORY = OrderedDict((
# Only present in data returned from InfluxDB:
(u"time", u"2015-01-01T00:00:00.000Z"),
# Only present in data returned from API:
(u"timestamp", u"2015-01-01T00:00:00.000Z"),
(u"alarm_id", u"10000000-1000-1000-1000-10000000000"),
(u"metrics", [{
u"id": None,
u"name": u"test.metric",
u"dimensions": {u"dim1": u"dval1", u"dim2": u"dval2"}
}]),
(u"new_state", u"ALARM"),
(u"old_state", u"OK"),
(u"reason", u"Alarm reason"),
(u"reason_data", u"{}"),
(u"sub_alarms", [{
u"sub_alarm_expression": {
u"function": u"MAX",
# Only present in data returned from InfluxDB:
u"metric_definition": {
u"id": None,
u"name": u"test.metric",
u"dimensions": {u"dim1": u"dval1"},
},
# Only present in data returned from API:
u'metric_name': u'test.metric',
# Only present in data returned from API:
u'dimensions': {u'dim1': u'dval1'},
u"operator": u"GT",
u"threshold": 50.0,
u"period": 60,
u"periods": 1
},
u"sub_alarm_state": u"ALARM",
u"current_values": [50.1],
}]),
# Only present in data returned from InfluxDB:
(u"tenant_id", TENANT_ID),
# Only present in data returned from API:
(u"id", u"1420070400000"),
))
class InfluxClientAlarmHistoryResponseFixture(fixtures.MockPatch):
def _build_series(self, name, column_dict):
return {
"name": name,
"columns": column_dict.keys(),
"values": [column_dict.values(), ],
}
def _setUp(self):
super(InfluxClientAlarmHistoryResponseFixture, self)._setUp()
mock_data = copy.deepcopy(ALARM_HISTORY)
del mock_data[u"id"]
del mock_data[u"timestamp"]
del mock_data[u"sub_alarms"][0][u"sub_alarm_expression"][u"metric_name"]
del mock_data[u"sub_alarms"][0][u"sub_alarm_expression"][u"dimensions"]
mock_data[u"sub_alarms"] = json.dumps(mock_data[u"sub_alarms"])
mock_data[u"metrics"] = json.dumps(mock_data[u"metrics"])
self.mock.return_value.query.return_value.raw = {
"series": [self._build_series("alarm_state_history", mock_data)]
}
class RESTResponseEquals(object):
"""Match if the supplied data contains a single string containing a JSON
object which decodes to match expected_data, excluding the contents of
the 'links' key.
"""
def __init__(self, expected_data):
self.expected_data = expected_data
if u"links" in expected_data:
del expected_data[u"links"]
def __str__(self):
return 'RESTResponseEquals(%s)' % (self.expected,)
def match(self, actual):
response_data = actual.json
if u"links" in response_data:
del response_data[u"links"]
return matchers.Equals(self.expected_data).match(response_data)
class AlarmTestBase(base.BaseApiTestCase):
def setUp(self):
super(AlarmTestBase, self).setUp()
self.useFixture(fixtures.MockPatch(
'monasca_api.common.messaging.kafka_publisher.KafkaPublisher'))
# [messaging]
self.conf_override(
driver='monasca_api.common.messaging.'
'kafka_publisher:KafkaPublisher',
group='messaging')
# [repositories]
self.conf_override(
alarms_driver='monasca_api.common.repositories.sqla.'
'alarms_repository:AlarmsRepository',
group='repositories')
self.conf_override(
alarm_definitions_driver='monasca_api.common.repositories.'
'alarm_definitions_repository:'
'AlarmDefinitionsRepository',
group='repositories')
self.conf_override(
metrics_driver='monasca_api.common.repositories.influxdb.'
'metrics_repository:MetricsRepository',
group='repositories')
class TestAlarmsStateHistory(AlarmTestBase):
def setUp(self):
super(TestAlarmsStateHistory, self).setUp()
self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.sqla.'
'alarms_repository.AlarmsRepository'))
self.useFixture(InfluxClientAlarmHistoryResponseFixture(
'monasca_api.common.repositories.influxdb.'
'metrics_repository.client.InfluxDBClient'))
self.alarms_resource = alarms.AlarmsStateHistory()
self.app.add_route(
'/v2.0/alarms/{alarm_id}/state-history/', self.alarms_resource)
self.app.add_route(
'/v2.0/alarms/state-history/', self.alarms_resource)
def test_alarm_state_history(self):
expected_elements = {u"elements": [dict(ALARM_HISTORY)]}
del expected_elements[u"elements"][0][u"time"]
del (expected_elements[u"elements"][0][u"sub_alarms"][0]
[u"sub_alarm_expression"][u"metric_definition"])
del expected_elements[u"elements"][0][u"tenant_id"]
response = self.simulate_request(
path=u'/v2.0/alarms/%s/state-history/' % ALARM_HISTORY[u"alarm_id"],
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_alarm_state_history_no_alarm_id(self):
expected_elements = {u'elements': []}
response = self.simulate_request(
path=u'/v2.0/alarms/state-history/',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
class TestAlarmsCount(AlarmTestBase):
def setUp(self):
super(TestAlarmsCount, self).setUp()
self.alarms_get_alarms_count_mock = self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.sqla.alarms_repository.AlarmsRepository'
)).mock
self.alarms_count_resource = alarms.AlarmsCount()
self.app.add_route('/v2.0/alarms/count',
self.alarms_count_resource)
def test_get_alarm_count(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_state_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='state=OK')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_severity_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='severity=LOW')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_group_by_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'columns': ['count', 'metric_name'],
'counts': [[2, 'cpu.idle_perc'],
[1, 'cpu.sys_mem']]}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2},
{'metric_name': u'cpu.sys_mem', 'count': 1}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
expected_elements = {'columns': ['count', 'metric_name', 'dimension_name'],
'counts': [[2, 'cpu.idle_perc', 'hostname'],
[1, 'cpu.sys_mem', 'hostname']]}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc',
'dimension_name': 'hostname',
'count': 2},
{'metric_name': u'cpu.sys_mem',
'dimension_name': 'hostname',
'count': 1}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name,dimension_name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_incorrect_group_by_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2},
{'metric_name': u'cpu.sys_mem', 'count': 1}]
response = self.simulate_request(
path='/v2.0/alarms/count',
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=hahahah')
self.assertEqual(response.status, falcon.HTTP_422)
def test_get_alarm_count_offset(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'columns': ['count', 'metric_name'],
'counts': [[2, 'cpu.idle_perc']]}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name&offset=1')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_incorrect_offset(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'description': 'Offset must be a valid integer, was hahahah',
'title': 'Unprocessable Entity'}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name&offset=hahahah')
self.assertEqual(response.status, falcon.HTTP_422)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_limit_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='limit=1')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
return_value.get_alarms_count.return_value = [{'count': 4}]
expected_elements = {'counts': [], 'columns': ['count']}
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='limit=0')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_when_count_is_zero(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'columns': ['count', 'metric_name'], 'counts': [[0, None]]}
return_value.get_alarms_count.return_value = [{'count': 0}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
expected_elements = {'columns': ['count'], 'counts': [[0]]}
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
class TestAlarms(AlarmTestBase):
def setUp(self):
super(TestAlarms, self).setUp()
self.alarms_repo_mock = self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.sqla.alarms_repository.AlarmsRepository'
)).mock
self.alarms_resource = alarms.Alarms()
self.app.add_route('/v2.0/alarms',
self.alarms_resource)
self.app.add_route('/v2.0/alarms/{alarm_id}',
self.alarms_resource)
def test_alarms_get_alarms(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarm(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarm.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}
response = self.simulate_request(path='/v2.0/alarms/1',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_state_parameter(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='state=OK')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_severity_parameter(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='severity=LOW')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_with_offset(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='offset=1')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_with_incorrect_offset(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
response = self.simulate_request(
path='/v2.0/alarms',
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='offset=ninccorect_offset')
self.assertEqual(response.status, falcon.HTTP_422)
def test_alarms_get_alarms_sort_by_parameter(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='sort_by=alarm_id')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_incorrect_sort_by_parameter(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='sort_by=random_string')
self.assertEqual(response.status, falcon.HTTP_422)
def test_alarms_delete_alarms(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarm_metrics.return_value = \
[{'alarm_id': u'2',
'name': u'cpu.idle_perc',
'dimensions': u'instance_id=123,service=monitoring'}]
return_value.get_sub_alarms.return_value = \
[{'sub_alarm_id': u'1',
'alarm_id': u'2',
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'alarm_definition_id': u'1'}]
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='DELETE')
self.assertEqual(response.status, falcon.HTTP_204)
def test_alarms_put(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarm_metrics.return_value = \
[{'alarm_id': u'2',
'name': u'cpu.idle_perc',
'dimensions': u'instance_id=123,service=monitoring'}]
return_value.get_sub_alarms.return_value = \
[{'sub_alarm_id': u'1',
'alarm_id': u'2',
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'alarm_definition_id': u'1'}]
return_value.update_alarm.return_value = \
({'state': u'UNDETERMINED',
'link': u'http://somesite.com/this-alarm-info',
'lifecycle_state': u'OPEN'},
1550835096962)
return_value.get_alarm_definition.return_value = \
{'description': None,
'tenant_id': u'bob',
'created_at': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'updated_at': datetime.datetime(2019, 2, 22, 12, 44, 25, 850963),
'name': u'90% CPU',
'actions_enabled': False,
'match_by': None,
'deleted_at': None,
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'id': u'1',
'severity': u'LOW'}
return_value.get_alarm.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'updated_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'alarm_id': '1',
'lifecycle_state': 'ALARM'}]
alarm_new_fields = {'state': 'ALARM',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info'}
expected_alarm = {u'alarm_definition': {u'id': u'1',
u'links': [
{u'href': u'http://falconframework.org'
u'/v2.0/alarm-definitions/1',
u'rel': u'self'}],
u'name': u'90% CPU',
u'severity': u'LOW'},
u'created_timestamp': u'2019-02-22T12:44:25.850947Z',
u'id': u'1',
u'lifecycle_state': u'ALARM',
u'link': u'http://somesite.com/this-alarm-info',
u'metrics': [{u'dimensions': {u'instance_id': u'123',
u'service': u'monitoring'},
u'name': u'cpu.idle_perc'}],
u'state': u'OK',
u'state_updated_timestamp': u'2019-02-22T12:44:25.850947Z',
u'updated_timestamp': u'2019-02-22T12:44:25.850947Z'}
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='PUT',
body=json.dumps(alarm_new_fields))
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarm))
def test_alarms_put_without_link(self):
alarm_new_fields = {'state': 'ALARM',
'lifecycle_state': 'OPEN'}
expected_response = {u'description': u"Field 'link' is required",
u'title': u'Unprocessable Entity'}
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='PUT',
body=json.dumps(alarm_new_fields))
self.assertEqual(response.status, falcon.HTTP_422)
self.assertThat(response, RESTResponseEquals(expected_response))
def test_alarms_put_without_lifecycle_state(self):
alarm_new_fields = {'state': 'ALARM',
'link': 'http://somesite.com/this-alarm-info'}
expected_response = {u'description': u"Field 'lifecycle_state' is required",
u'title': u'Unprocessable Entity'}
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='PUT',
body=json.dumps(alarm_new_fields))
self.assertEqual(response.status, falcon.HTTP_422)
self.assertThat(response, RESTResponseEquals(expected_response))
def test_alarms_put_without_state(self):
alarm_new_fields = {'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info'}
expected_response = {u'description': u"Field 'state' is required",
u'title': u'Unprocessable Entity'}
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='PUT',
body=json.dumps(alarm_new_fields))
self.assertEqual(response.status, falcon.HTTP_422)
self.assertThat(response, RESTResponseEquals(expected_response))
def test_alarms_patch(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarm_metrics.return_value = \
[{'alarm_id': u'2',
'name': u'cpu.idle_perc',
'dimensions': u'instance_id=123,service=monitoring'}]
return_value.get_sub_alarms.return_value = \
[{'sub_alarm_id': u'1',
'alarm_id': u'2',
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'alarm_definition_id': u'1'}]
return_value.update_alarm.return_value = \
({'state': u'UNDETERMINED',
'link': u'http://somesite.com/this-alarm-info',
'lifecycle_state': u'OPEN'},
1550835096962)
return_value.get_alarm_definition.return_value = \
{'description': None,
'tenant_id': u'bob',
'created_at': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'updated_at': datetime.datetime(2019, 2, 22, 12, 44, 25, 850963),
'name': u'90% CPU',
'actions_enabled': False,
'match_by': None,
'deleted_at': None,
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'id': u'1',
'severity': u'LOW'}
return_value.get_alarm.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'updated_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'alarm_id': '1',
'lifecycle_state': 'ALARM'}]
alarm_new_fields = {'state': 'ALARM',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info'}
expected_alarm = {u'alarm_definition': {u'id': u'1',
u'links': [
{u'href': u'http://falconframework.org'
u'/v2.0/alarm-definitions/1',
u'rel': u'self'}],
u'name': u'90% CPU',
u'severity': u'LOW'},
u'created_timestamp': u'2019-02-22T12:44:25.850947Z',
u'id': u'1',
u'lifecycle_state': u'ALARM',
u'link': u'http://somesite.com/this-alarm-info',
u'metrics': [{u'dimensions': {u'instance_id': u'123',
u'service': u'monitoring'},
u'name': u'cpu.idle_perc'}],
u'state': u'OK',
u'state_updated_timestamp': u'2019-02-22T12:44:25.850947Z',
u'updated_timestamp': u'2019-02-22T12:44:25.850947Z'}
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='PATCH',
body=json.dumps(alarm_new_fields))
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarm))
def test_alarms_patch_without_new_fields(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarm_metrics.return_value = \
[{'alarm_id': u'2',
'name': u'cpu.idle_perc',
'dimensions': u'instance_id=123,service=monitoring'}]
return_value.get_sub_alarms.return_value = \
[{'sub_alarm_id': u'1',
'alarm_id': u'2',
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'alarm_definition_id': u'1'}]
return_value.update_alarm.return_value = \
({'state': u'UNDETERMINED',
'link': u'http://somesite.com/this-alarm-info',
'lifecycle_state': u'OPEN'},
1550835096962)
return_value.get_alarm_definition.return_value = \
{'description': None,
'tenant_id': u'bob',
'created_at': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'updated_at': datetime.datetime(2019, 2, 22, 12, 44, 25, 850963),
'name': u'90% CPU',
'actions_enabled': False,
'match_by': None,
'deleted_at': None,
'expression': u'avg(cpu.idle_perc{instance_id=123, service=monitoring}) > 10',
'id': u'1',
'severity': u'LOW'}
return_value.get_alarm.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'updated_timestamp': datetime.datetime(2019, 2, 22, 12, 44, 25, 850947),
'alarm_id': '1',
'lifecycle_state': 'ALARM'}]
alarm_new_fields = {}
expected_alarm = {u'alarm_definition': {u'id': u'1',
u'links': [
{u'href': u'http://falconframework.org'
u'/v2.0/alarm-definitions/1',
u'rel': u'self'}],
u'name': u'90% CPU',
u'severity': u'LOW'},
u'created_timestamp': u'2019-02-22T12:44:25.850947Z',
u'id': u'1',
u'lifecycle_state': u'ALARM',
u'link': u'http://somesite.com/this-alarm-info',
u'metrics': [{u'dimensions': {u'instance_id': u'123',
u'service': u'monitoring'},
u'name': u'cpu.idle_perc'}],
u'state': u'OK',
u'state_updated_timestamp': u'2019-02-22T12:44:25.850947Z',
u'updated_timestamp': u'2019-02-22T12:44:25.850947Z'}
response = self.simulate_request(path='/v2.0/alarms/2',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='PATCH',
body=json.dumps(alarm_new_fields))
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarm))
class TestAlarmDefinition(AlarmTestBase):
def setUp(self):
super(TestAlarmDefinition, self).setUp()
self.alarm_def_repo_mock = self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.'
'alarm_definitions_repository.AlarmDefinitionsRepository'
)).mock
self.alarm_definition_resource = alarm_definitions.AlarmDefinitions()
self.alarm_definition_resource.send_event = Mock()
self._send_event = self.alarm_definition_resource.send_event
self.app.add_route("/v2.0/alarm-definitions/",
self.alarm_definition_resource)
self.app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}",
self.alarm_definition_resource)
def test_alarm_definition_create(self):
return_value = self.alarm_def_repo_mock.return_value
return_value.get_alarm_definitions.return_value = []
return_value.create_alarm_definition.return_value = u"00000001-0001-0001-0001-000000000001"
alarm_def = {
"name": "Test Definition",
"expression": "test.metric > 10"
}
expected_data = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [],
u'name': u'Test Definition',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'test.metric > 10',
u'deterministic': False,
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW',
}
response = self.simulate_request(path="/v2.0/alarm-definitions/",
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="POST",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_201)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_create_with_valid_expressions(self):
return_value = self.alarm_def_repo_mock.return_value
return_value.get_alarm_definitions.return_value = []
return_value.create_alarm_definition.return_value = u"00000001-0001-0001-0001-000000000001"
valid_expressions = [
u"max(-_.千幸福的笑脸{घोड़ा=馬, "
u"dn2=dv2,千幸福的笑脸घ=千幸福的笑脸घ}) gte 100 "
u"times 3 && "
u"(min(ເຮືອນ{dn3=dv3,家=дом}) < 10 or sum(biz{dn5=dv5}) >99 and "
u"count(fizzle) lt 0or count(baz) > 1)",
u"max(foo{hostname=mini-mon,千=千}, 120) > 100 and (max(bar)>100 "
u" or max(biz)>100)",
u"max(foo)>=100",
u"test_metric{this=that, that = this} < 1",
u"max ( 3test_metric5 { this = that }) lt 5 times 3",
u"3test_metric5 lt 3",
u"ntp.offset > 1 or ntp.offset < -5",
]
alarm_def = {
u'name': u'Test Definition',
u'expression': u'test.metric > 10'
}
expected_data = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [],
u'name': u'Test Definition',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'test.metric > 10',
u'deterministic': False,
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW',
}
for expression in valid_expressions:
alarm_def[u'expression'] = expression
expected_data[u'expression'] = expression
response = self.simulate_request(path="/v2.0/alarm-definitions/",
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="POST",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_201,
u'Expression {} should have passed'.format(expression))
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_create_with_invalid_expressions(self):
bad_expressions = [
"test=metric > 10",
"test.metric{dim=this=that} > 10",
"test_metric(5) > 2"
"test_metric > 10 and or alt_metric > 10"
]
alarm_def = {
u'name': 'Test Definition',
u'expression': 'test.metric > 10'
}
for expression in bad_expressions:
alarm_def[u'expression'] = expression
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="POST",
body=json.dumps(alarm_def))
self.assertEqual(response.status, '422 Unprocessable Entity',
u'Expression {} should have failed'.format(expression))
def test_alarm_definition_create_with_occupied_alarm_definition_name(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = [{
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'Test Alarm',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}]
alarm_def = {
u'name': u'Test Definition',
u'expression': u'max(test.metric{hostname=host}) gte 1'
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="POST",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_409)
def test_alarm_definition_update(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
self.alarm_def_repo_mock.return_value.update_or_patch_alarm_definition.return_value = (
{u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'match_by': u'hostname',
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'is_deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW'},
{'old': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})},
'changed': {},
'new': {},
'unchanged': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})}})
expected_def = {
u'id': u'00000001-0001-0001-0001-000000000001',
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'links': [{u'href': u'http://falconframework.org/v2.0/alarm-definitions/'
u'00000001-0001-0001-0001-000000000001',
u'rel': u'self'}],
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
result = self.simulate_request(path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(result.status, falcon.HTTP_200)
result_def = result.json
self.assertEqual(result_def, expected_def)
def test_alarm_definition_patch_incorrect_id(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = [{
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'Test Alarm',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}]
alarm_def = {
u'name': u'Test Alarm Definition Updated',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/9999999-0001-0001-0001-000000000001",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PATCH",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_409)
def test_alarm_definition_put_incorrect_period_value(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
period = 'times 0'
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1 ' + period,
u'severity': u'LOW',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/00000001-0001-0001-0001-000000000001",
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_422)
def test_alarm_definition_patch_no_id(self):
alarm_def = {
u'name': u'Test Alarm Definition Updated',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PATCH",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_400)
def test_alarm_definition_update_no_id(self):
alarm_def = {
u'name': u'Test Alarm Definition Updated',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_400)
def test_alarm_definition_delete(self):
self.alarm_def_repo_mock.return_value.get_get_sub_alarm_definitions.return_value = [{
'alarm_definition_id': '123',
'dimensions': 'flavor_id=777',
'function': 'AVG',
'id': '111',
'metric_name': 'cpu.idle_perc',
'operator': 'GT',
'period': 60,
'periods': 1,
'is_deterministic': False,
'threshold': 10.0}]
self.alarm_def_repo_mock.return_value.get_alarm_metrics.return_value = [{
'alarm_id': '1',
'dimensions': 'flavor_id=777',
'name': 'cpu.idle_perc'}]
self.alarm_def_repo_mock.return_value.get_sub_alarms.return_value = [{
'alarm_definition_id': '1',
'alarm_id': '2',
'expression': 'avg(cpu.idle_perc{flavor_id=777}) > 10',
'sub_alarm_id': '43'}]
self.alarm_def_repo_mock.return_value.delete_alarm_definition.return_value = True
response = self.simulate_request(
path='/v2.0/alarm-definitions/00000001-0001-0001-0001-000000000001',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='DELETE')
self.assertEqual(response.status, falcon.HTTP_204)
def test_alarm_definition_delete_alarm_definition_not_exist(self):
self.alarm_def_repo_mock.return_value.get_get_sub_alarm_definitions.return_value = []
self.alarm_def_repo_mock.return_value.get_alarm_metrics.return_value = []
self.alarm_def_repo_mock.return_value.get_sub_alarms.return_value = []
self.alarm_def_repo_mock.return_value.delete_alarm_definition.return_value = False
response = self.simulate_request(
path='/v2.0/alarm-definitions/00000001-0001-0001-0001-000000000001',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='DELETE')
self.assertEqual(response.status, falcon.HTTP_404)
def test_alarm_definition_delete_no_id(self):
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="DELETE")
self.assertEqual(response.status, falcon.HTTP_400)
def test_alarm_definition_patch(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
description = u'Non-ASCII character: \u2603'
new_name = u'Test Alarm Updated'
actions_enabled = True
alarm_def_id = u'00000001-0001-0001-0001-000000000001'
alarm_expression = u'max(test.metric{hostname=host}) gte 1'
severity = u'LOW'
match_by = u'hostname'
self.alarm_def_repo_mock.return_value.update_or_patch_alarm_definition.return_value = (
{u'alarm_actions': [],
u'ok_actions': [],
u'description': description,
u'match_by': match_by,
u'name': new_name,
u'actions_enabled': actions_enabled,
u'undetermined_actions': [],
u'is_deterministic': False,
u'expression': alarm_expression,
u'id': alarm_def_id,
u'severity': severity},
{'old': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})},
'changed': {},
'new': {},
'unchanged': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})}})
expected_def = {
u'id': alarm_def_id,
u'alarm_actions': [],
u'ok_actions': [],
u'description': description,
u'links': [{u'href': u'http://falconframework.org/v2.0/alarm-definitions/'
u'00000001-0001-0001-0001-000000000001',
u'rel': u'self'}],
u'match_by': [match_by],
u'name': new_name,
u'actions_enabled': actions_enabled,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': alarm_expression,
u'severity': severity,
}
alarm_def = {
u'name': u'Test Alarm Updated',
}
result = self.simulate_request(path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PATCH",
body=json.dumps(alarm_def))
self.assertEqual(result.status, falcon.HTTP_200)
result_def = result.json
self.assertEqual(result_def, expected_def)
# If the alarm-definition-updated event does not have all of the
# fields set, the Threshold Engine will get confused. For example,
# if alarmActionsEnabled is none, thresh will read that as false
# and pass that value onto the Notification Engine which will not
# create a notification even actions_enabled is True in the
# database. So, ensure all fields are set correctly
((_, event), _) = self._send_event.call_args
expr = u'max(test.metric{hostname=host}, 60) gte 1 times 1'
sub_expression = {'11111': {u'expression': expr,
u'function': 'max',
u'metricDefinition': {
u'dimensions': {'hostname': 'host'},
u'name': 'test.metric'},
u'operator': 'gte',
u'period': 60,
u'periods': 1,
u'threshold': 1}}
fields = {u'alarmActionsEnabled': actions_enabled,
u'alarmDefinitionId': alarm_def_id,
u'alarmDescription': description,
u'alarmExpression': alarm_expression,
u'alarmName': new_name,
u'changedSubExpressions': {},
u'matchBy': [match_by],
u'severity': severity,
u'tenantId': u'fedcba9876543210fedcba9876543210',
u'newAlarmSubExpressions': {},
u'oldAlarmSubExpressions': sub_expression,
u'unchangedSubExpressions': sub_expression}
reference = {u'alarm-definition-updated': fields}
self.assertEqual(reference, event)
def test_alarm_definition_update_missing_fields(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
self.alarm_def_repo_mock.return_value.update_or_patch_alarm_definition.return_value = (
{u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'match_by': u'hostname',
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'is_deterministic': False,
u'severity': u'LOW'},
{'old': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'periods': 1,
'is_deterministic': False})},
'changed': {},
'new': {},
'unchanged': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'periods': 1,
'is_deterministic': False})}})
expected_def = {
u'id': u'00000001-0001-0001-0001-000000000001',
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'links': [{u'href': u'http://falconframework.org/v2.0/alarm-definitions/'
u'00000001-0001-0001-0001-000000000001',
u'rel': u'self'}],
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
u'deterministic': False
}
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW'
}
result = self.simulate_request(path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(result.status, falcon.HTTP_200)
result_def = result.json
self.assertEqual(result_def, expected_def)
for key, value in list(alarm_def.items()):
del alarm_def[key]
response = self.simulate_request(
path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(response.status, "422 Unprocessable Entity",
u"should have failed without key {}".format(key))
alarm_def[key] = value
def test_alarm_definition_get_specific_alarm(self):
self.alarm_def_repo_mock.return_value.get_alarm_definition.return_value = {
'alarm_actions': None,
'ok_actions': None,
# The description field was decoded to unicode when the
# alarm_definition was created.
'description': u'Non-ASCII character: \u2603',
'match_by': u'hostname',
'name': u'Test Alarm',
'actions_enabled': 1,
'undetermined_actions': None,
'deterministic': False,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}
expected_data = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW',
}
response = self.simulate_request(
path='/v2.0/alarm-definitions/%s' % (expected_data[u'id']),
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_get_specific_alarm_description_none(self):
self.alarm_def_repo_mock.return_value.get_alarm_definition.return_value = {
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'Test Alarm',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}
expected_data = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': None,
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW',
}
response = self.simulate_request(
path='/v2.0/alarm-definitions/%s' % (expected_data[u'id']),
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_get_alarm_definitions_with_multibyte_character(self):
def_name = 'alarm_definition'
if six.PY2:
def_name = def_name.decode('utf8')
expected_data = {
u'alarm_actions': [], u'ok_actions': [],
u'description': None, u'match_by': [u'hostname'],
u'actions_enabled': True, u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW', u'name': def_name
}
self.alarm_def_repo_mock.return_value.get_alarm_definition.return_value = {
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': def_name,
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}
response = self.simulate_request(
path='/v2.0/alarm-definitions/%s' % (expected_data[u'id']),
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
}
)
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_get_alarm_definition_list(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = [{
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'Test Alarm',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}]
link = 'http://falconframework.org/v2.0/alarm-definitions/' \
'00000001-0001-0001-0001-000000000001'
expected_data = {
u'elements': [{
u'alarm_actions': [],
u'ok_actions': [],
u'description': '',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
'links': [{
'href': link,
'rel': 'self'}],
u'severity': u'LOW'}]
}
response = self.simulate_request(
path='/v2.0/alarm-definitions',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID
},
query_string='name=Test Alarm')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
response = self.simulate_request(
path='/v2.0/alarm-definitions',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID
},
query_string='sort_by=name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
response = self.simulate_request(
path='/v2.0/alarm-definitions',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID
},
query_string='severity=LOW')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
response = self.simulate_request(
path='/v2.0/alarm-definitions',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID
},
query_string='offset=1')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_get_alarm_definition_list_incorrect(self):
response = self.simulate_request(
path='/v2.0/alarm-definitions',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID
},
query_string='offset=definitelyNotINT')
self.assertEqual(response.status, falcon.HTTP_422)
def test_alarm_definition_get_query_alarm_definition_name(self):
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
name = alarm_definitions.get_query_alarm_definition_name(alarm_def)
self.assertEqual(alarm_def['name'], name)
alarm_def.pop('name')
self.assertRaises(HTTPUnprocessableEntityError,
alarm_definitions.get_query_alarm_definition_name,
alarm_def)
name = alarm_definitions.get_query_alarm_definition_name(alarm_def, return_none=True)
self.assertIsNone(name)
def test_alarm_definition_get_query_alarm_definition_expression(self):
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
expression = alarm_definitions.get_query_alarm_definition_expression(alarm_def)
self.assertEqual(alarm_def['expression'], expression)
alarm_def.pop('expression')
self.assertRaises(HTTPUnprocessableEntityError,
alarm_definitions.get_query_alarm_definition_expression,
alarm_def)
expression = alarm_definitions.get_query_alarm_definition_expression(alarm_def,
return_none=True)
self.assertIsNone(expression)
def test_alarm_definition_get_query_alarm_definition_description(self):
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Short description',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
description = alarm_definitions.get_query_alarm_definition_description(alarm_def)
self.assertEqual(alarm_def['description'], description)
alarm_def.pop('description')
description = alarm_definitions.get_query_alarm_definition_description(alarm_def)
self.assertEqual('', description)
description = alarm_definitions.get_query_alarm_definition_description(alarm_def,
return_none=True)
self.assertIsNone(description)
def test_alarm_definition_get_query_alarm_definition_severity(self):
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'CRITICAL',
}
severity = alarm_definitions.get_query_alarm_definition_severity(alarm_def)
self.assertEqual(alarm_def['severity'], severity)
alarm_def['severity'] = u'Why so serious'
self.assertRaises(HTTPUnprocessableEntityError,
alarm_definitions.get_query_alarm_definition_severity,
alarm_def)
alarm_def.pop('severity')
severity = alarm_definitions.get_query_alarm_definition_severity(alarm_def)
self.assertEqual('LOW', severity)
severity = alarm_definitions.get_query_alarm_definition_severity(alarm_def,
return_none=True)
self.assertIsNone(severity)
def test_alarm_definition_get_query_alarm_definition_match_by(self):
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
match_by = alarm_definitions.get_query_alarm_definition_match_by(alarm_def)
self.assertEqual(alarm_def['match_by'], match_by)
alarm_def.pop('match_by')
match_by = alarm_definitions.get_query_alarm_definition_match_by(alarm_def)
self.assertEqual([], match_by)
expression = alarm_definitions.get_query_alarm_definition_match_by(alarm_def,
return_none=True)
self.assertIsNone(expression)
def test_alarm_definition_get_query_alarm_definition_alarm_actions(self):
alarm_def = {
u'alarm_actions': 'c60ec47e-5038-4bf1-9f95-4046c6e9a759',
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
alarm_actions = alarm_definitions.get_query_alarm_definition_alarm_actions(alarm_def)
self.assertEqual(alarm_def['alarm_actions'], alarm_actions)
alarm_def.pop('alarm_actions')
alarm_actions = alarm_definitions.get_query_alarm_definition_alarm_actions(alarm_def)
self.assertEqual([], alarm_actions)
alarm_actions = alarm_definitions.get_query_alarm_definition_alarm_actions(alarm_def,
return_none=True)
self.assertIsNone(alarm_actions)
def test_alarm_definition_get_query_alarm_definition_undetermined_actions(self):
alarm_def = {
u'alarm_actions': 'c60ec47e-5038-4bf1-9f95-4046c6e9a759',
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': 'c60ec47e-5038-4bf1-9f95-4046c6e9a759',
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
undetermined_actions = \
alarm_definitions.get_query_alarm_definition_undetermined_actions(alarm_def)
self.assertEqual(alarm_def['undetermined_actions'], undetermined_actions)
alarm_def.pop('undetermined_actions')
undetermined_actions = \
alarm_definitions.get_query_alarm_definition_undetermined_actions(alarm_def)
self.assertEqual([], undetermined_actions)
undetermined_actions = \
alarm_definitions.get_query_alarm_definition_undetermined_actions(alarm_def,
return_none=True)
self.assertIsNone(undetermined_actions)
def test_alarm_definition_get_query_alarm_definition_ok_actions(self):
alarm_def = {
u'ok_actions': 'c60ec47e-5038-4bf1-9f95-4046c6e9a759',
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
ok_actions = alarm_definitions.get_query_ok_actions(alarm_def)
self.assertEqual(alarm_def['ok_actions'], ok_actions)
alarm_def.pop('ok_actions')
ok_actions = alarm_definitions.get_query_ok_actions(alarm_def)
self.assertEqual([], ok_actions)
ok_actions = alarm_definitions.get_query_ok_actions(alarm_def, return_none=True)
self.assertIsNone(ok_actions)
def test_alarm_definition_get_query_alarm_definition_actions_enabled(self):
alarm_def = {
u'alarm_actions': 'c60ec47e-5038-4bf1-9f95-4046c6e9a759',
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
actions_enabled = alarm_definitions.get_query_alarm_definition_actions_enabled(alarm_def)
self.assertEqual(alarm_def['actions_enabled'], actions_enabled)
alarm_def.pop('actions_enabled')
actions_enabled = alarm_definitions.get_query_alarm_definition_actions_enabled(alarm_def)
self.assertEqual('', actions_enabled)
actions_enabled = alarm_definitions.\
get_query_alarm_definition_actions_enabled(alarm_def,
return_none=True)
self.assertIsNone(actions_enabled)
actions_enabled = alarm_definitions. \
get_query_alarm_definition_actions_enabled(alarm_def,
required=True,
return_none=True)
self.assertIsNone(actions_enabled)
self.assertRaises(HTTPUnprocessableEntityError,
alarm_definitions.get_query_alarm_definition_actions_enabled,
alarm_def,
required=True,
return_none=False)
def test_alarm_definition_get_query_alarm_definition_is_definition_deterministic(self):
expression = u'max(test.metric{hostname=host}) gte 1'
is_deterministic = alarm_definitions.is_definition_deterministic(expression)
self.assertEqual(False, is_deterministic)
expression = u'max(test.metric{hostname=host}, deterministic) gte 1'
is_deterministic = alarm_definitions.is_definition_deterministic(expression)
self.assertEqual(True, is_deterministic)
| apache-2.0 |
Cedev/a10-neutron-lbaas | a10_neutron_lbaas/db/migration/alembic_migrations/versions/2a280aba7701_create_orchestration.py | 4 | 2753 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""create orchestration
Revision ID: 2a280aba7701
Revises: 579f359e6e30
Create Date: 2016-04-12 18:25:17.910876
"""
# revision identifiers, used by Alembic.
revision = '2a280aba7701'
down_revision = '579f359e6e30'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'a10_device_instances',
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
sa.Column('tenant_id', sa.String(36), nullable=False),
sa.Column('name', sa.String(1024), nullable=False),
sa.Column('username', sa.String(255), nullable=False),
sa.Column('password', sa.String(255), nullable=False),
sa.Column('api_version', sa.String(255), nullable=False),
sa.Column('protocol', sa.String(32), nullable=False),
sa.Column('port', sa.Integer, nullable=False),
sa.Column('autosnat', sa.Boolean(), nullable=False),
sa.Column('v_method', sa.String(32), nullable=False),
sa.Column('shared_partition', sa.String(1024), nullable=False),
sa.Column('use_float', sa.Boolean(), nullable=False),
sa.Column('default_virtual_server_vrid', sa.Integer, nullable=True),
sa.Column('ipinip', sa.Boolean(), nullable=False),
sa.Column('write_memory', sa.Boolean(), nullable=False),
sa.Column('nova_instance_id', sa.String(36), nullable=False),
sa.Column('host', sa.String(255), nullable=False)
)
op.create_table(
'a10_slbs',
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
sa.Column('tenant_id', sa.String(36), nullable=False),
sa.Column('device_name', sa.String(1024), nullable=False),
sa.Column('pool_id', sa.String(36)),
sa.Column('loadbalancer_id', sa.String(36)),
)
pass
def downgrade():
op.drop_table('a10_slbs')
op.drop_table('a10_device_instances')
pass
| apache-2.0 |
bealdav/OpenUpgrade | addons/project/res_config.py | 34 | 4484 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_project_mrp': fields.boolean('Generate tasks from sale orders',
help='This feature automatically creates project tasks from service products in sale orders. '
'More precisely, tasks are created for procurement lines with product of type \'Service\', '
'procurement method \'Make to Order\', and supply method \'Manufacture\'.\n'
'-This installs the module project_mrp.'),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help='Lets the company customize which Pad installation should be used to link to new pads '
'(for example: http://ietherpad.com/).\n'
'-This installs the module pad.'),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help='This allows you to transfer the entries under tasks defined for Project Management to '
'the timesheet line entries for particular date and user, with the effect of creating, '
'editing and deleting either ways.\n'
'-This installs the module project_timesheet.'),
'module_project_issue': fields.boolean("Track issues and bugs",
help='Provides management of issues/bugs in projects.\n'
'-This installs the module project_issue.'),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help="""This will set the unit of measure used in projects and tasks."""),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help='Provides timesheet support for the issues/bugs management in project.\n'
'-This installs the module project_issue_sheet.'),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
apyrgio/ganeti | lib/rapi/client_utils.py | 3 | 3149 | #
#
# Copyright (C) 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""RAPI client utilities.
"""
from ganeti import constants
from ganeti import cli
from ganeti.rapi import client
# Local constant to avoid importing ganeti.http
HTTP_NOT_FOUND = 404
class RapiJobPollCb(cli.JobPollCbBase):
def __init__(self, cl):
"""Initializes this class.
@param cl: RAPI client instance
"""
cli.JobPollCbBase.__init__(self)
self.cl = cl
def WaitForJobChangeOnce(self, job_id, fields,
prev_job_info, prev_log_serial):
"""Waits for changes on a job.
"""
try:
result = self.cl.WaitForJobChange(job_id, fields,
prev_job_info, prev_log_serial)
except client.GanetiApiError, err:
if err.code == HTTP_NOT_FOUND:
return None
raise
if result is None:
return constants.JOB_NOTCHANGED
return (result["job_info"], result["log_entries"])
def QueryJobs(self, job_ids, fields):
"""Returns the given fields for the selected job IDs.
@type job_ids: list of numbers
@param job_ids: Job IDs
@type fields: list of strings
@param fields: Fields
"""
if len(job_ids) != 1:
raise NotImplementedError("Only one job supported at this time")
try:
result = self.cl.GetJobStatus(job_ids[0])
except client.GanetiApiError, err:
if err.code == HTTP_NOT_FOUND:
return [None]
raise
return [[result[name] for name in fields], ]
def PollJob(rapi_client, job_id, reporter):
"""Function to poll for the result of a job.
@param rapi_client: RAPI client instance
@type job_id: number
@param job_id: Job ID
@type reporter: L{cli.JobPollReportCbBase}
@param reporter: PollJob reporter instance
"""
return cli.GenericPollJob(job_id, RapiJobPollCb(rapi_client), reporter)
| bsd-2-clause |
eyalfa/spark | python/pyspark/java_gateway.py | 6 | 7460 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import os
import sys
import select
import signal
import shlex
import shutil
import socket
import platform
import tempfile
import time
from subprocess import Popen, PIPE
if sys.version >= '3':
xrange = range
from py4j.java_gateway import java_import, JavaGateway, JavaObject, GatewayParameters
from pyspark.find_spark_home import _find_spark_home
from pyspark.serializers import read_int, write_with_length, UTF8Deserializer
def launch_gateway(conf=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:return:
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
gateway_secret = os.environ["PYSPARK_GATEWAY_SECRET"]
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Create a temporary directory where the gateway server should write the connection
# information.
conn_info_dir = tempfile.mkdtemp()
try:
fd, conn_info_file = tempfile.mkstemp(dir=conn_info_dir)
os.close(fd)
os.unlink(conn_info_file)
env = dict(os.environ)
env["_PYSPARK_DRIVER_CONN_INFO_PATH"] = conn_info_file
# Launch the Java gateway.
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = Popen(command, stdin=PIPE, preexec_fn=preexec_func, env=env)
else:
# preexec_fn not supported on Windows
proc = Popen(command, stdin=PIPE, env=env)
# Wait for the file to appear, or for the process to exit, whichever happens first.
while not proc.poll() and not os.path.isfile(conn_info_file):
time.sleep(0.1)
if not os.path.isfile(conn_info_file):
raise Exception("Java gateway process exited before sending its port number")
with open(conn_info_file, "rb") as info:
gateway_port = read_int(info)
gateway_secret = UTF8Deserializer().loads(info)
finally:
shutil.rmtree(conn_info_dir)
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=gateway_port, auth_token=gateway_secret,
auto_convert=True))
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway
def do_server_auth(conn, auth_secret):
"""
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
"""
write_with_length(auth_secret.encode("utf-8"), conn)
conn.flush()
reply = UTF8Deserializer().loads(conn)
if reply != "ok":
conn.close()
raise Exception("Unexpected reply from iterator server.")
def ensure_callback_server_started(gw):
"""
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
"""
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
gw.callback_server_parameters.eager_load = True
gw.callback_server_parameters.daemonize = True
gw.callback_server_parameters.daemonize_connections = True
gw.callback_server_parameters.port = 0
gw.start_callback_server(gw.callback_server_parameters)
cbport = gw._callback_server.server_socket.getsockname()[1]
gw._callback_server.port = cbport
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port)
| apache-2.0 |
chunyang-wen/orc | c++/libs/gmock-1.7.0/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| apache-2.0 |
elijah513/ice | csharp/test/Ice/enums/run.py | 9 | 1064 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
print("Running test with 1.0 encoding.")
TestUtil.clientServerTest(additionalClientOptions="--Ice.Default.EncodingVersion=1.0",
additionalServerOptions="--Ice.Default.EncodingVersion=1.0")
print("Running test with 1.1 encoding.")
TestUtil.clientServerTest()
| gpl-2.0 |
atlashealth/ansible-modules-extras | cloud/lxc/lxc_container.py | 3 | 54951 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: lxc_container
short_description: Manage LXC Containers
version_added: 1.8.0
description:
- Management of LXC containers
author: '"Kevin Carter (@cloudnull)" <kevin.carter@rackspace.com>'
options:
name:
description:
- Name of a container.
required: true
backing_store:
choices:
- dir
- lvm
- loop
- btrfs
- overlayfs
description:
- Backend storage type for the container.
required: false
default: dir
template:
description:
- Name of the template to use within an LXC create.
required: false
default: ubuntu
template_options:
description:
- Template options when building the container.
required: false
config:
description:
- Path to the LXC configuration file.
required: false
default: /etc/lxc/default.conf
lv_name:
description:
- Name of the logical volume, defaults to the container name.
default: $CONTAINER_NAME
required: false
vg_name:
description:
- If Backend store is lvm, specify the name of the volume group.
default: lxc
required: false
thinpool:
description:
- Use LVM thin pool called TP.
required: false
fs_type:
description:
- Create fstype TYPE.
default: ext4
required: false
fs_size:
description:
- File system Size.
default: 5G
required: false
directory:
description:
- Place rootfs directory under DIR.
required: false
zfs_root:
description:
- Create zfs under given zfsroot.
required: false
container_command:
description:
- Run a command within a container.
required: false
lxc_path:
description:
- Place container under PATH
required: false
container_log:
choices:
- true
- false
description:
- Enable a container log for host actions to the container.
default: false
container_log_level:
choices:
- INFO
- ERROR
- DEBUG
description:
- Set the log level for a container where *container_log* was set.
required: false
default: INFO
clone_name:
version_added: "2.0"
description:
- Name of the new cloned server. This is only used when state is
clone.
required: false
default: false
clone_snapshot:
version_added: "2.0"
required: false
choices:
- true
- false
description:
- Create a snapshot a container when cloning. This is not supported
by all container storage backends. Enabling this may fail if the
backing store does not support snapshots.
default: false
archive:
choices:
- true
- false
description:
- Create an archive of a container. This will create a tarball of the
running container.
default: false
archive_path:
description:
- Path the save the archived container. If the path does not exist
the archive method will attempt to create it.
default: /tmp
archive_compression:
choices:
- gzip
- bzip2
- none
description:
- Type of compression to use when creating an archive of a running
container.
default: gzip
state:
choices:
- started
- stopped
- restarted
- absent
- frozen
description:
- Define the state of a container. If you clone a container using
`clone_name` the newly cloned container created in a stopped state.
The running container will be stopped while the clone operation is
happening and upon completion of the clone the original container
state will be restored.
required: false
default: started
container_config:
description:
- list of 'key=value' options to use when configuring a container.
required: false
requirements:
- 'lxc >= 1.0'
- 'python >= 2.6'
- 'python2-lxc >= 0.1'
notes:
- Containers must have a unique name. If you attempt to create a container
with a name that already exists in the users namespace the module will
simply return as "unchanged".
- The "container_command" can be used with any state except "absent". If
used with state "stopped" the container will be "started", the command
executed, and then the container "stopped" again. Likewise if the state
is "stopped" and the container does not exist it will be first created,
"started", the command executed, and then "stopped". If you use a "|"
in the variable you can use common script formatting within the variable
iteself The "container_command" option will always execute as BASH.
When using "container_command" a log file is created in the /tmp/ directory
which contains both stdout and stderr of any command executed.
- If "archive" is **true** the system will attempt to create a compressed
tarball of the running container. The "archive" option supports LVM backed
containers and will create a snapshot of the running container when
creating the archive.
- If your distro does not have a package for "python2-lxc", which is a
requirement for this module, it can be installed from source at
"https://github.com/lxc/python2-lxc"
"""
EXAMPLES = """
- name: Create a started container
lxc_container:
name: test-container-started
container_log: true
template: ubuntu
state: started
template_options: --release trusty
- name: Create a stopped container
lxc_container:
name: test-container-stopped
container_log: true
template: ubuntu
state: stopped
template_options: --release trusty
- name: Create a frozen container
lxc_container:
name: test-container-frozen
container_log: true
template: ubuntu
state: frozen
template_options: --release trusty
container_command: |
echo 'hello world.' | tee /opt/started-frozen
# Create filesystem container, configure it, and archive it, and start it.
- name: Create filesystem container
lxc_container:
name: test-container-config
backing_store: dir
container_log: true
template: ubuntu
state: started
archive: true
archive_compression: none
container_config:
- "lxc.aa_profile=unconfined"
- "lxc.cgroup.devices.allow=a *:* rmw"
template_options: --release trusty
# Create an lvm container, run a complex command in it, add additional
# configuration to it, create an archive of it, and finally leave the container
# in a frozen state. The container archive will be compressed using bzip2
- name: Create a frozen lvm container
lxc_container:
name: test-container-lvm
container_log: true
template: ubuntu
state: frozen
backing_store: lvm
template_options: --release trusty
container_command: |
apt-get update
apt-get install -y vim lxc-dev
echo 'hello world.' | tee /opt/started
if [[ -f "/opt/started" ]]; then
echo 'hello world.' | tee /opt/found-started
fi
container_config:
- "lxc.aa_profile=unconfined"
- "lxc.cgroup.devices.allow=a *:* rmw"
archive: true
archive_compression: bzip2
register: lvm_container_info
- name: Debug info on container "test-container-lvm"
debug: var=lvm_container_info
- name: Run a command in a container and ensure its in a "stopped" state.
lxc_container:
name: test-container-started
state: stopped
container_command: |
echo 'hello world.' | tee /opt/stopped
- name: Run a command in a container and ensure its it in a "frozen" state.
lxc_container:
name: test-container-stopped
state: frozen
container_command: |
echo 'hello world.' | tee /opt/frozen
- name: Start a container
lxc_container:
name: test-container-stopped
state: started
- name: Run a command in a container and then restart it
lxc_container:
name: test-container-started
state: restarted
container_command: |
echo 'hello world.' | tee /opt/restarted
- name: Run a complex command within a "running" container
lxc_container:
name: test-container-started
container_command: |
apt-get update
apt-get install -y curl wget vim apache2
echo 'hello world.' | tee /opt/started
if [[ -f "/opt/started" ]]; then
echo 'hello world.' | tee /opt/found-started
fi
# Create an archive of an existing container, save the archive to a defined
# path and then destroy it.
- name: Archive container
lxc_container:
name: test-container-started
state: absent
archive: true
archive_path: /opt/archives
# Create a container using overlayfs, create an archive of it, create a
# snapshot clone of the container and and finally leave the container
# in a frozen state. The container archive will be compressed using gzip.
- name: Create an overlayfs container archive and clone it
lxc_container:
name: test-container-overlayfs
container_log: true
template: ubuntu
state: started
backing_store: overlayfs
template_options: --release trusty
clone_snapshot: true
clone_name: test-container-overlayfs-clone-snapshot
archive: true
archive_compression: gzip
register: clone_container_info
- name: debug info on container "test-container"
debug: var=clone_container_info
- name: Clone a container using snapshot
lxc_container:
name: test-container-overlayfs-clone-snapshot
backing_store: overlayfs
clone_name: test-container-overlayfs-clone-snapshot2
clone_snapshot: true
- name: Create a new container and clone it
lxc_container:
name: test-container-new-archive
backing_store: dir
clone_name: test-container-new-archive-clone
- name: Archive and clone a container then destroy it
lxc_container:
name: test-container-new-archive
state: absent
clone_name: test-container-new-archive-destroyed-clone
archive: true
archive_compression: gzip
- name: Start a cloned container.
lxc_container:
name: test-container-new-archive-destroyed-clone
state: started
- name: Destroy a container
lxc_container:
name: "{{ item }}"
state: absent
with_items:
- test-container-stopped
- test-container-started
- test-container-frozen
- test-container-lvm
- test-container-config
- test-container-overlayfs
- test-container-overlayfs-clone
- test-container-overlayfs-clone-snapshot
- test-container-overlayfs-clone-snapshot2
- test-container-new-archive
- test-container-new-archive-clone
- test-container-new-archive-destroyed-clone
"""
try:
import lxc
except ImportError:
msg = 'The lxc module is not importable. Check the requirements.'
print("failed=True msg='%s'" % msg)
raise SystemExit(msg)
# LXC_COMPRESSION_MAP is a map of available compression types when creating
# an archive of a container.
LXC_COMPRESSION_MAP = {
'gzip': {
'extension': 'tar.tgz',
'argument': '-czf'
},
'bzip2': {
'extension': 'tar.bz2',
'argument': '-cjf'
},
'none': {
'extension': 'tar',
'argument': '-cf'
}
}
# LXC_COMMAND_MAP is a map of variables that are available to a method based
# on the state the container is in.
LXC_COMMAND_MAP = {
'create': {
'variables': {
'config': '--config',
'template': '--template',
'backing_store': '--bdev',
'lxc_path': '--lxcpath',
'lv_name': '--lvname',
'vg_name': '--vgname',
'thinpool': '--thinpool',
'fs_type': '--fstype',
'fs_size': '--fssize',
'directory': '--dir',
'zfs_root': '--zfsroot'
}
},
'clone': {
'variables': {
'backing_store': '--backingstore',
'lxc_path': '--lxcpath',
'fs_size': '--fssize',
'name': '--orig',
'clone_name': '--new'
}
}
}
# LXC_BACKING_STORE is a map of available storage backends and options that
# are incompatible with the given storage backend.
LXC_BACKING_STORE = {
'dir': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
],
'lvm': [
'zfs_root'
],
'btrfs': [
'lv_name', 'vg_name', 'thinpool', 'zfs_root'
],
'loop': [
'lv_name', 'vg_name', 'thinpool', 'zfs_root'
],
'overlayfs': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
]
}
# LXC_LOGGING_LEVELS is a map of available log levels
LXC_LOGGING_LEVELS = {
'INFO': ['info', 'INFO', 'Info'],
'ERROR': ['error', 'ERROR', 'Error'],
'DEBUG': ['debug', 'DEBUG', 'Debug']
}
# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
# when a particular state is evoked.
LXC_ANSIBLE_STATES = {
'started': '_started',
'stopped': '_stopped',
'restarted': '_restarted',
'absent': '_destroyed',
'frozen': '_frozen',
'clone': '_clone'
}
# This is used to attach to a running container and execute commands from
# within the container on the host. This will provide local access to a
# container without using SSH. The template will attempt to work within the
# home directory of the user that was attached to the container and source
# that users environment variables by default.
ATTACH_TEMPLATE = """#!/usr/bin/env bash
pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
if [[ -f ".bashrc" ]];then
source .bashrc
fi
popd
# User defined command
%(container_command)s
"""
def create_script(command):
"""Write out a script onto a target.
This method should be backward compatible with Python 2.4+ when executing
from within the container.
:param command: command to run, this can be a script and can use spacing
with newlines as separation.
:type command: ``str``
"""
import os
import os.path as path
import subprocess
import tempfile
# Ensure that the directory /opt exists.
if not path.isdir('/opt'):
os.mkdir('/opt')
# Create the script.
script_file = path.join('/opt', '.lxc-attach-script')
f = open(script_file, 'wb')
try:
f.write(ATTACH_TEMPLATE % {'container_command': command})
f.flush()
finally:
f.close()
# Ensure the script is executable.
os.chmod(script_file, 1755)
# Get temporary directory.
tempdir = tempfile.gettempdir()
# Output log file.
stdout_file = open(path.join(tempdir, 'lxc-attach-script.log'), 'ab')
# Error log file.
stderr_file = open(path.join(tempdir, 'lxc-attach-script.err'), 'ab')
# Execute the script command.
try:
subprocess.Popen(
[script_file],
stdout=stdout_file,
stderr=stderr_file
).communicate()
finally:
# Close the log files.
stderr_file.close()
stdout_file.close()
# Remove the script file upon completion of execution.
os.remove(script_file)
class LxcContainerManagement(object):
def __init__(self, module):
"""Management of LXC containers via Ansible.
:param module: Processed Ansible Module.
:type module: ``object``
"""
self.module = module
self.state = self.module.params.get('state', None)
self.state_change = False
self.lxc_vg = None
self.container_name = self.module.params['name']
self.container = self.get_container_bind()
self.archive_info = None
self.clone_info = None
def get_container_bind(self):
return lxc.Container(name=self.container_name)
@staticmethod
def _roundup(num):
"""Return a rounded floating point number.
:param num: Number to round up.
:type: ``float``
:returns: Rounded up number.
:rtype: ``int``
"""
num, part = str(num).split('.')
num = int(num)
if int(part) != 0:
num += 1
return num
@staticmethod
def _container_exists(container_name):
"""Check if a container exists.
:param container_name: Name of the container.
:type: ``str``
:returns: True or False if the container is found.
:rtype: ``bol``
"""
if [i for i in lxc.list_containers() if i == container_name]:
return True
else:
return False
@staticmethod
def _add_variables(variables_dict, build_command):
"""Return a command list with all found options.
:param variables_dict: Pre-parsed optional variables used from a
seed command.
:type variables_dict: ``dict``
:param build_command: Command to run.
:type build_command: ``list``
:returns: list of command options.
:rtype: ``list``
"""
for key, value in variables_dict.items():
build_command.append(
'%s %s' % (key, value)
)
else:
return build_command
def _get_vars(self, variables):
"""Return a dict of all variables as found within the module.
:param variables: Hash of all variables to find.
:type variables: ``dict``
"""
# Remove incompatible storage backend options.
variables = variables.copy()
for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
variables.pop(v, None)
return_dict = dict()
for k, v in variables.items():
_var = self.module.params.get(k)
if not [i for i in [None, ''] + BOOLEANS_FALSE if i == _var]:
return_dict[v] = _var
else:
return return_dict
def _run_command(self, build_command, unsafe_shell=False, timeout=600):
"""Return information from running an Ansible Command.
This will squash the build command list into a string and then
execute the command via Ansible. The output is returned to the method.
This output is returned as `return_code`, `stdout`, `stderr`.
Prior to running the command the method will look to see if the LXC
lockfile is present. If the lockfile "/var/lock/subsys/lxc" the method
will wait upto 10 minutes for it to be gone; polling every 5 seconds.
:param build_command: Used for the command and all options.
:type build_command: ``list``
:param unsafe_shell: Enable or Disable unsafe sell commands.
:type unsafe_shell: ``bol``
:param timeout: Time before the container create process quites.
:type timeout: ``int``
"""
lockfile = '/var/lock/subsys/lxc'
for _ in xrange(timeout):
if os.path.exists(lockfile):
time.sleep(1)
else:
return self.module.run_command(
' '.join(build_command),
use_unsafe_shell=unsafe_shell
)
else:
message = (
'The LXC subsystem is locked and after 5 minutes it never'
' became unlocked. Lockfile [ %s ]' % lockfile
)
self.failure(
error='LXC subsystem locked',
rc=0,
msg=message
)
def _config(self):
"""Configure an LXC container.
Write new configuration values to the lxc config file. This will
stop the container if it's running write the new options and then
restart the container upon completion.
"""
_container_config = self.module.params.get('container_config')
if not _container_config:
return False
container_config_file = self.container.config_file_name
with open(container_config_file, 'rb') as f:
container_config = f.readlines()
# Note used ast literal_eval because AnsibleModule does not provide for
# adequate dictionary parsing.
# Issue: https://github.com/ansible/ansible/issues/7679
# TODO(cloudnull) adjust import when issue has been resolved.
import ast
options_dict = ast.literal_eval(_container_config)
parsed_options = [i.split('=', 1) for i in options_dict]
config_change = False
for key, value in parsed_options:
new_entry = '%s = %s\n' % (key, value)
for option_line in container_config:
# Look for key in config
if option_line.startswith(key):
_, _value = option_line.split('=')
config_value = ' '.join(_value.split())
line_index = container_config.index(option_line)
# If the sanitized values don't match replace them
if value != config_value:
line_index += 1
if new_entry not in container_config:
config_change = True
container_config.insert(line_index, new_entry)
# Break the flow as values are written or not at this point
break
else:
config_change = True
container_config.append(new_entry)
# If the config changed restart the container.
if config_change:
container_state = self._get_state()
if container_state != 'stopped':
self.container.stop()
with open(container_config_file, 'wb') as f:
f.writelines(container_config)
self.state_change = True
if container_state == 'running':
self._container_startup()
elif container_state == 'frozen':
self._container_startup()
self.container.freeze()
def _container_create_clone(self):
"""Clone a new LXC container from an existing container.
This method will clone an existing container to a new container using
the `clone_name` variable as the new container name. The method will
create a container if the container `name` does not exist.
Note that cloning a container will ensure that the original container
is "stopped" before the clone can be done. Because this operation can
require a state change the method will return the original container
to its prior state upon completion of the clone.
Once the clone is complete the new container will be left in a stopped
state.
"""
# Ensure that the state of the original container is stopped
container_state = self._get_state()
if container_state != 'stopped':
self.state_change = True
self.container.stop()
build_command = [
self.module.get_bin_path('lxc-clone', True),
]
build_command = self._add_variables(
variables_dict=self._get_vars(
variables=LXC_COMMAND_MAP['clone']['variables']
),
build_command=build_command
)
# Load logging for the instance when creating it.
if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
build_command.append('--snapshot')
# Check for backing_store == overlayfs if so force the use of snapshot
# If overlay fs is used and snapshot is unset the clone command will
# fail with an unsupported type.
elif self.module.params.get('backing_store') == 'overlayfs':
build_command.append('--snapshot')
rc, return_data, err = self._run_command(build_command)
if rc != 0:
message = "Failed executing lxc-clone."
self.failure(
err=err, rc=rc, msg=message, command=' '.join(
build_command
)
)
else:
self.state_change = True
# Restore the original state of the origin container if it was
# not in a stopped state.
if container_state == 'running':
self.container.start()
elif container_state == 'frozen':
self.container.start()
self.container.freeze()
return True
def _create(self):
"""Create a new LXC container.
This method will build and execute a shell command to build the
container. It would have been nice to simply use the lxc python library
however at the time this was written the python library, in both py2
and py3 didn't support some of the more advanced container create
processes. These missing processes mainly revolve around backing
LXC containers with block devices.
"""
build_command = [
self.module.get_bin_path('lxc-create', True),
'--name %s' % self.container_name,
'--quiet'
]
build_command = self._add_variables(
variables_dict=self._get_vars(
variables=LXC_COMMAND_MAP['create']['variables']
),
build_command=build_command
)
# Load logging for the instance when creating it.
if self.module.params.get('container_log') in BOOLEANS_TRUE:
# Set the logging path to the /var/log/lxc if uid is root. else
# set it to the home folder of the user executing.
try:
if os.getuid() != 0:
log_path = os.getenv('HOME')
else:
if not os.path.isdir('/var/log/lxc/'):
os.makedirs('/var/log/lxc/')
log_path = '/var/log/lxc/'
except OSError:
log_path = os.getenv('HOME')
build_command.extend([
'--logfile %s' % os.path.join(
log_path, 'lxc-%s.log' % self.container_name
),
'--logpriority %s' % self.module.params.get(
'container_log_level'
).upper()
])
# Add the template commands to the end of the command if there are any
template_options = self.module.params.get('template_options', None)
if template_options:
build_command.append('-- %s' % template_options)
rc, return_data, err = self._run_command(build_command)
if rc != 0:
message = "Failed executing lxc-create."
self.failure(
err=err, rc=rc, msg=message, command=' '.join(build_command)
)
else:
self.state_change = True
def _container_data(self):
"""Returns a dict of container information.
:returns: container data
:rtype: ``dict``
"""
return {
'interfaces': self.container.get_interfaces(),
'ips': self.container.get_ips(),
'state': self._get_state(),
'init_pid': int(self.container.init_pid)
}
def _unfreeze(self):
"""Unfreeze a container.
:returns: True or False based on if the container was unfrozen.
:rtype: ``bol``
"""
unfreeze = self.container.unfreeze()
if unfreeze:
self.state_change = True
return unfreeze
def _get_state(self):
"""Return the state of a container.
If the container is not found the state returned is "absent"
:returns: state of a container as a lower case string.
:rtype: ``str``
"""
if self._container_exists(container_name=self.container_name):
return str(self.container.state).lower()
else:
return str('absent')
def _execute_command(self):
"""Execute a shell command."""
container_command = self.module.params.get('container_command')
if container_command:
container_state = self._get_state()
if container_state == 'frozen':
self._unfreeze()
elif container_state == 'stopped':
self._container_startup()
self.container.attach_wait(create_script, container_command)
self.state_change = True
def _container_startup(self, timeout=60):
"""Ensure a container is started.
:param timeout: Time before the destroy operation is abandoned.
:type timeout: ``int``
"""
self.container = self.get_container_bind()
for _ in xrange(timeout):
if self._get_state() != 'running':
self.container.start()
self.state_change = True
# post startup sleep for 1 second.
time.sleep(1)
else:
return True
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to start. Check to lxc is'
' available and that the container is in a functional'
' state.' % self.container_name
)
def _check_archive(self):
"""Create a compressed archive of a container.
This will store archive_info in as self.archive_info
"""
if self.module.params.get('archive') in BOOLEANS_TRUE:
self.archive_info = {
'archive': self._container_create_tar()
}
def _check_clone(self):
"""Create a compressed archive of a container.
This will store archive_info in as self.archive_info
"""
clone_name = self.module.params.get('clone_name')
if clone_name:
if not self._container_exists(container_name=clone_name):
self.clone_info = {
'cloned': self._container_create_clone()
}
else:
self.clone_info = {
'cloned': False
}
def _destroyed(self, timeout=60):
"""Ensure a container is destroyed.
:param timeout: Time before the destroy operation is abandoned.
:type timeout: ``int``
"""
for _ in xrange(timeout):
if not self._container_exists(container_name=self.container_name):
break
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
if self._get_state() != 'stopped':
self.state_change = True
self.container.stop()
if self.container.destroy():
self.state_change = True
# post destroy attempt sleep for 1 second.
time.sleep(1)
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to destroy container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to be destroyed. Check'
' that lxc is available and that the container is in a'
' functional state.' % self.container_name
)
def _frozen(self, count=0):
"""Ensure a container is frozen.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='frozen')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
container_state = self._get_state()
if container_state == 'frozen':
pass
elif container_state == 'running':
self.container.freeze()
self.state_change = True
else:
self._container_startup()
self.container.freeze()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._frozen(count)
def _restarted(self, count=0):
"""Ensure a container is restarted.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='restart')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
if self._get_state() != 'stopped':
self.container.stop()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._restarted(count)
def _stopped(self, count=0):
"""Ensure a container is stopped.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='stop')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
if self._get_state() != 'stopped':
self.container.stop()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._stopped(count)
def _started(self, count=0):
"""Ensure a container is started.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='start')
if self._container_exists(container_name=self.container_name):
container_state = self._get_state()
if container_state == 'running':
pass
elif container_state == 'frozen':
self._unfreeze()
elif not self._container_startup():
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to start. Check to lxc is'
' available and that the container is in a functional'
' state.' % self.container_name
)
# Return data
self._execute_command()
# Perform any configuration updates
self._config()
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._started(count)
def _get_lxc_vg(self):
"""Return the name of the Volume Group used in LXC."""
build_command = [
self.module.get_bin_path('lxc-config', True),
"lxc.bdev.lvm.vg"
]
rc, vg, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to read LVM VG from LXC config',
command=' '.join(build_command)
)
else:
return str(vg.strip())
def _lvm_lv_list(self):
"""Return a list of all lv in a current vg."""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('lvs', True)
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to get list of LVs',
command=' '.join(build_command)
)
all_lvms = [i.split() for i in stdout.splitlines()][1:]
return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
def _get_vg_free_pe(self, vg_name):
"""Return the available size of a given VG.
:param vg_name: Name of volume.
:type vg_name: ``str``
:returns: size and measurement of an LV
:type: ``tuple``
"""
build_command = [
'vgdisplay',
vg_name,
'--units',
'g'
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to read vg %s' % vg_name,
command=' '.join(build_command)
)
vg_info = [i.strip() for i in stdout.splitlines()][1:]
free_pe = [i for i in vg_info if i.startswith('Free')]
_free_pe = free_pe[0].split()
return float(_free_pe[-2]), _free_pe[-1]
def _get_lv_size(self, lv_name):
"""Return the available size of a given LV.
:param lv_name: Name of volume.
:type lv_name: ``str``
:returns: size and measurement of an LV
:type: ``tuple``
"""
vg = self._get_lxc_vg()
lv = os.path.join(vg, lv_name)
build_command = [
'lvdisplay',
lv,
'--units',
'g'
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to read lv %s' % lv,
command=' '.join(build_command)
)
lv_info = [i.strip() for i in stdout.splitlines()][1:]
_free_pe = [i for i in lv_info if i.startswith('LV Size')]
free_pe = _free_pe[0].split()
return self._roundup(float(free_pe[-2])), free_pe[-1]
def _lvm_snapshot_create(self, source_lv, snapshot_name,
snapshot_size_gb=5):
"""Create an LVM snapshot.
:param source_lv: Name of lv to snapshot
:type source_lv: ``str``
:param snapshot_name: Name of lv snapshot
:type snapshot_name: ``str``
:param snapshot_size_gb: Size of snapshot to create
:type snapshot_size_gb: ``int``
"""
vg = self._get_lxc_vg()
free_space, messurement = self._get_vg_free_pe(vg_name=vg)
if free_space < float(snapshot_size_gb):
message = (
'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
' [ %s ]' % (snapshot_size_gb, free_space, vg)
)
self.failure(
error='Not enough space to create snapshot',
rc=2,
msg=message
)
# Create LVM Snapshot
build_command = [
self.module.get_bin_path('lvcreate', True),
"-n",
snapshot_name,
"-s",
os.path.join(vg, source_lv),
"-L%sg" % snapshot_size_gb
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to Create LVM snapshot %s/%s --> %s'
% (vg, source_lv, snapshot_name)
)
def _lvm_lv_mount(self, lv_name, mount_point):
"""mount an lv.
:param lv_name: name of the logical volume to mount
:type lv_name: ``str``
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('mount', True),
"/dev/%s/%s" % (vg, lv_name),
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to mountlvm lv %s/%s to %s'
% (vg, lv_name, mount_point)
)
def _create_tar(self, source_dir):
"""Create an archive of a given ``source_dir`` to ``output_path``.
:param source_dir: Path to the directory to be archived.
:type source_dir: ``str``
"""
archive_path = self.module.params.get('archive_path')
if not os.path.isdir(archive_path):
os.makedirs(archive_path)
archive_compression = self.module.params.get('archive_compression')
compression_type = LXC_COMPRESSION_MAP[archive_compression]
# remove trailing / if present.
archive_name = '%s.%s' % (
os.path.join(
archive_path,
self.container_name
),
compression_type['extension']
)
build_command = [
self.module.get_bin_path('tar', True),
'--directory=%s' % os.path.realpath(
os.path.expanduser(source_dir)
),
compression_type['argument'],
archive_name,
'.'
]
rc, stdout, err = self._run_command(
build_command=build_command,
unsafe_shell=True
)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to create tar archive',
command=' '.join(build_command)
)
return archive_name
def _lvm_lv_remove(self, lv_name):
"""Remove an LV.
:param lv_name: The name of the logical volume
:type lv_name: ``str``
"""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('lvremove', True),
"-f",
"%s/%s" % (vg, lv_name),
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
command=' '.join(build_command)
)
def _rsync_data(self, container_path, temp_dir):
"""Sync the container directory to the temp directory.
:param container_path: path to the container container
:type container_path: ``str``
:param temp_dir: path to the temporary local working directory
:type temp_dir: ``str``
"""
# This loop is created to support overlayfs archives. This should
# squash all of the layers into a single archive.
fs_paths = container_path.split(':')
if 'overlayfs' in fs_paths:
fs_paths.pop(fs_paths.index('overlayfs'))
for fs_path in fs_paths:
# Set the path to the container data
fs_path = os.path.dirname(fs_path)
# Run the sync command
build_command = [
self.module.get_bin_path('rsync', True),
'-aHAX',
fs_path,
temp_dir
]
rc, stdout, err = self._run_command(
build_command,
unsafe_shell=True
)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to perform archive',
command=' '.join(build_command)
)
def _unmount(self, mount_point):
"""Unmount a file system.
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
build_command = [
self.module.get_bin_path('umount', True),
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to unmount [ %s ]' % mount_point,
command=' '.join(build_command)
)
def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
"""mount an lv.
:param lowerdir: name/path of the lower directory
:type lowerdir: ``str``
:param upperdir: name/path of the upper directory
:type upperdir: ``str``
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
build_command = [
self.module.get_bin_path('mount', True),
'-t overlayfs',
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
'overlayfs',
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
% (lowerdir, upperdir, mount_point, build_command)
)
def _container_create_tar(self):
"""Create a tar archive from an LXC container.
The process is as follows:
* Stop or Freeze the container
* Create temporary dir
* Copy container and config to temporary directory
* If LVM backed:
* Create LVM snapshot of LV backing the container
* Mount the snapshot to tmpdir/rootfs
* Restore the state of the container
* Create tar of tmpdir
* Clean up
"""
# Create a temp dir
temp_dir = tempfile.mkdtemp()
# Set the name of the working dir, temp + container_name
work_dir = os.path.join(temp_dir, self.container_name)
# LXC container rootfs
lxc_rootfs = self.container.get_config_item('lxc.rootfs')
# Test if the containers rootfs is a block device
block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
# Test if the container is using overlayfs
overlayfs_backed = lxc_rootfs.startswith('overlayfs')
mount_point = os.path.join(work_dir, 'rootfs')
# Set the snapshot name if needed
snapshot_name = '%s_lxc_snapshot' % self.container_name
container_state = self._get_state()
try:
# Ensure the original container is stopped or frozen
if container_state not in ['stopped', 'frozen']:
if container_state == 'running':
self.container.freeze()
else:
self.container.stop()
# Sync the container data from the container_path to work_dir
self._rsync_data(lxc_rootfs, temp_dir)
if block_backed:
if snapshot_name not in self._lvm_lv_list():
if not os.path.exists(mount_point):
os.makedirs(mount_point)
# Take snapshot
size, measurement = self._get_lv_size(
lv_name=self.container_name
)
self._lvm_snapshot_create(
source_lv=self.container_name,
snapshot_name=snapshot_name,
snapshot_size_gb=size
)
# Mount snapshot
self._lvm_lv_mount(
lv_name=snapshot_name,
mount_point=mount_point
)
else:
self.failure(
err='snapshot [ %s ] already exists' % snapshot_name,
rc=1,
msg='The snapshot [ %s ] already exists. Please clean'
' up old snapshot of containers before continuing.'
% snapshot_name
)
elif overlayfs_backed:
lowerdir, upperdir = lxc_rootfs.split(':')[1:]
self._overlayfs_mount(
lowerdir=lowerdir,
upperdir=upperdir,
mount_point=mount_point
)
# Set the state as changed and set a new fact
self.state_change = True
return self._create_tar(source_dir=work_dir)
finally:
if block_backed or overlayfs_backed:
# unmount snapshot
self._unmount(mount_point)
if block_backed:
# Remove snapshot
self._lvm_lv_remove(snapshot_name)
# Restore original state of container
if container_state == 'running':
if self._get_state() == 'frozen':
self.container.unfreeze()
else:
self.container.start()
# Remove tmpdir
shutil.rmtree(temp_dir)
def check_count(self, count, method):
if count > 1:
self.failure(
error='Failed to %s container' % method,
rc=1,
msg='The container [ %s ] failed to %s. Check to lxc is'
' available and that the container is in a functional'
' state.' % (self.container_name, method)
)
def failure(self, **kwargs):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(**kwargs)
def run(self):
"""Run the main method."""
action = getattr(self, LXC_ANSIBLE_STATES[self.state])
action()
outcome = self._container_data()
if self.archive_info:
outcome.update(self.archive_info)
if self.clone_info:
outcome.update(self.clone_info)
self.module.exit_json(
changed=self.state_change,
lxc_container=outcome
)
def main():
"""Ansible Main module."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
template=dict(
type='str',
default='ubuntu'
),
backing_store=dict(
type='str',
choices=LXC_BACKING_STORE.keys(),
default='dir'
),
template_options=dict(
type='str'
),
config=dict(
type='str',
default='/etc/lxc/default.conf'
),
vg_name=dict(
type='str',
default='lxc'
),
thinpool=dict(
type='str'
),
fs_type=dict(
type='str',
default='ext4'
),
fs_size=dict(
type='str',
default='5G'
),
directory=dict(
type='str'
),
zfs_root=dict(
type='str'
),
lv_name=dict(
type='str'
),
lxc_path=dict(
type='str'
),
state=dict(
choices=LXC_ANSIBLE_STATES.keys(),
default='started'
),
container_command=dict(
type='str'
),
container_config=dict(
type='str'
),
container_log=dict(
choices=BOOLEANS,
default='false'
),
container_log_level=dict(
choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
default='INFO'
),
clone_name=dict(
type='str',
required=False
),
clone_snapshot=dict(
choices=BOOLEANS,
default='false'
),
archive=dict(
choices=BOOLEANS,
default='false'
),
archive_path=dict(
type='str',
default='/tmp'
),
archive_compression=dict(
choices=LXC_COMPRESSION_MAP.keys(),
default='gzip'
)
),
supports_check_mode=False,
)
lv_name = module.params.get('lv_name')
if not lv_name:
module.params['lv_name'] = module.params.get('name')
lxc_manage = LxcContainerManagement(module=module)
lxc_manage.run()
# import module bits
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
sgml/popcorn_maker | vendor-local/lib/python/debug_toolbar/utils/sqlparse/lexer.py | 12 | 12456 | # -*- coding: utf-8 -*-
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
from debug_toolbar.utils.sqlparse import tokens
from debug_toolbar.utils.sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
class include(str):
pass
class combined(tuple):
"""Indicates a state combined from multiple states."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
class LexerMeta(type):
"""
Metaclass for Lexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokenlist.extend(cls._process_state(
unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError(("uncompilable regex %r in state"
" %r of %r: %s"
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
('token type must be simple type or callable, not %r'
% (tdef[1],))
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, \
'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokenlist.append((rex, tdef[1], new_state))
return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
cls._tmpname = 0
processed = cls._all_tokens[cls.__name__] = {}
#tokendefs = tokendefs or cls.tokens[name]
for state in cls.tokens.keys():
cls._process_state(cls.tokens, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef()
return type.__call__(cls, *args, **kwds)
class Lexer(object):
__metaclass__ = LexerMeta
encoding = 'utf-8'
stripall = False
stripnl = False
tabsize = 0
flags = re.IGNORECASE
tokens = {
'root': [
(r'--.*?(\r\n|\r|\n)', tokens.Comment.Single),
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'--.*?$', tokens.Comment.Single),
(r'(\r|\n|\r\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
(r'CASE\b', tokens.Keyword), # extended CASE(foo)
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'[$:?%][a-zA-Z0-9_]+[^$:?%]?', tokens.Name.Placeholder),
(r'@[a-zA-Z_][a-zA-Z0-9_]+', tokens.Name),
(r'[a-zA-Z_][a-zA-Z0-9_]*(?=[.(])', tokens.Name), # see issue39
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
(r'0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
(r'[0-9]*\.[0-9]+', tokens.Number.Float),
(r'[0-9]+', tokens.Number.Integer),
# TODO: Backslash escapes?
(r"(''|'.*?[^\\]')", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
(r'(\[.*[^\]]\])', tokens.Name),
(r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword),
(r'END( IF| LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
(r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL),
(r'[a-zA-Z_][a-zA-Z0-9_]*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
],
'multiline-comments': [
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline)
]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
from debug_toolbar.utils.sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
enc = chardet.detect(text)
text = text.decode(enc['encoding'])
else:
text = text.decode(self.encoding)
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
# if not text.endswith('\n'):
# text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
known_names = {}
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
# print rex.pattern
value = m.group()
if value in known_names:
yield pos, known_names[value], value
elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
known_names[value] = ttype
yield pos, ttype, value
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, tokens.Text, u'\n'
continue
yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
def tokenize(sql):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
lexer = Lexer()
return lexer.get_tokens(sql)
| bsd-3-clause |
richm/designate | designate/openstack/common/systemd.py | 1 | 3061 | # Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occured
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
| apache-2.0 |
adityachap/fabric-sdk-node | node_modules/grpc/third_party/boringssl/third_party/googletest/scripts/common.py | 1180 | 2919 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared utilities for writing scripts for Google Test/Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
# Matches the line from 'svn info .' output that describes what SVN
# path the current local directory corresponds to. For example, in
# a googletest SVN workspace's trunk/test directory, the output will be:
#
# URL: https://googletest.googlecode.com/svn/trunk/test
_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)')
def GetCommandOutput(command):
"""Runs the shell command and returns its stdout as a list of lines."""
f = os.popen(command, 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
return lines
def GetSvnInfo():
"""Returns the project name and the current SVN workspace's root path."""
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1) # googletest or googlemock
rel_path = m.group(2)
root = os.path.realpath(rel_path.count('/') * '../')
return project, root
return None, None
def GetSvnTrunk():
"""Returns the current SVN workspace's trunk root path."""
_, root = GetSvnInfo()
return root + '/trunk' if root else None
def IsInGTestSvn():
project, _ = GetSvnInfo()
return project == 'googletest'
def IsInGMockSvn():
project, _ = GetSvnInfo()
return project == 'googlemock'
| apache-2.0 |
xinjiguaike/edx-platform | openedx/core/djangoapps/user_api/tests/test_middleware.py | 152 | 4415 | """Tests for user API middleware"""
from mock import Mock, patch
from unittest import TestCase
from django.http import HttpResponse
from django.test.client import RequestFactory
from student.tests.factories import UserFactory, AnonymousUserFactory
from ..tests.factories import UserCourseTagFactory
from ..middleware import UserTagsEventContextMiddleware
class TagsMiddlewareTest(TestCase):
"""
Test the UserTagsEventContextMiddleware
"""
def setUp(self):
super(TagsMiddlewareTest, self).setUp()
self.middleware = UserTagsEventContextMiddleware()
self.user = UserFactory.create()
self.other_user = UserFactory.create()
self.course_id = 'mock/course/id'
self.request_factory = RequestFactory()
# TODO: Make it so we can use reverse. Appears to fail depending on the order in which tests are run
#self.request = RequestFactory().get(reverse('courseware', kwargs={'course_id': self.course_id}))
self.request = RequestFactory().get('/courses/{}/courseware'.format(self.course_id))
self.request.user = self.user
self.response = Mock(spec=HttpResponse)
patcher = patch('openedx.core.djangoapps.user_api.middleware.tracker')
self.tracker = patcher.start()
self.addCleanup(patcher.stop)
def process_request(self):
"""
Execute process request using the request, and verify that it returns None
so that the request continues.
"""
# Middleware should pass request through
self.assertEquals(self.middleware.process_request(self.request), None)
def assertContextSetTo(self, context):
"""Asserts UserTagsEventContextMiddleware.CONTEXT_NAME matches ``context``"""
self.tracker.get_tracker.return_value.enter_context.assert_called_with( # pylint: disable=maybe-no-member
UserTagsEventContextMiddleware.CONTEXT_NAME,
context
)
def test_tag_context(self):
for key, value in (('int_value', 1), ('str_value', "two")):
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.user,
key=key,
value=value,
)
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.other_user,
key="other_user",
value="other_user_value"
)
UserCourseTagFactory.create(
course_id='other/course/id',
user=self.user,
key="other_course",
value="other_course_value"
)
self.process_request()
self.assertContextSetTo({
'course_id': self.course_id,
'course_user_tags': {
'int_value': '1',
'str_value': 'two',
}
})
def test_no_tags(self):
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_not_course_url(self):
self.request = self.request_factory.get('/not/a/course/url')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_invalid_course_id(self):
self.request = self.request_factory.get('/courses/edX/101/')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_anonymous_user(self):
self.request.user = AnonymousUserFactory()
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_remove_context(self):
get_tracker = self.tracker.get_tracker # pylint: disable=maybe-no-member
exit_context = get_tracker.return_value.exit_context
# The middleware should clean up the context when the request is done
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
exit_context.assert_called_with(UserTagsEventContextMiddleware.CONTEXT_NAME)
exit_context.reset_mock()
# Even if the tracker blows up, the middleware should still return the response
get_tracker.side_effect = Exception
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
| agpl-3.0 |
siddharths067/HuHubaProject | lib/chardet/mbcsgroupprober.py | 343 | 2012 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self, lang_filter=None):
super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
self.probers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| mit |
diogommartins/ryu | ryu/hooks.py | 43 | 2575 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from setuptools.command import easy_install
from ryu import version
# Global variables in this module doesn't work as we expect
# because, during the setup procedure, this module seems to be
# copied (as a file) and can be loaded multiple times.
# We save them into __main__ module instead.
def _main_module():
return sys.modules['__main__']
def save_orig():
"""Save original easy_install.get_script_args.
This is necessary because pbr's setup_hook is sometimes called
before ours."""
_main_module()._orig_get_script_args = easy_install.get_script_args
def setup_hook(config):
"""Filter config parsed from a setup.cfg to inject our defaults."""
metadata = config['metadata']
if sys.platform == 'win32':
requires = metadata.get('requires_dist', '').split('\n')
requires.append('pywin32')
requires.append('wmi')
metadata['requires_dist'] = "\n".join(requires)
config['metadata'] = metadata
metadata['version'] = str(version)
# pbr's setup_hook replaces easy_install.get_script_args with
# their own version, override_get_script_args, prefering simpler
# scripts which are not aware of multi-version.
# prevent that by doing the opposite. it's a horrible hack
# but we are in patching wars already...
from pbr import packaging
def my_get_script_args(*args, **kwargs):
return _main_module()._orig_get_script_args(*args, **kwargs)
packaging.override_get_script_args = my_get_script_args
easy_install.get_script_args = my_get_script_args
# another hack to allow setup from tarball.
orig_get_version = packaging.get_version
def my_get_version(package_name, pre_version=None):
if package_name == 'ryu':
return str(version)
return orig_get_version(package_name, pre_version)
packaging.get_version = my_get_version
| apache-2.0 |
zimmerman-zimmerman/OIPA | OIPA/iati/management/commands/set_searchable_activities.py | 2 | 1970 | from django.conf import settings
from django.core.management.base import BaseCommand
from iati.models import Activity
from iati.transaction.models import Transaction
class Command(BaseCommand):
def update_searchable_activities(self):
"""
Set all activities to searchable if the reporting org is in the
settings.ROOT_ORGANISATIONS list
"""
# set all activities as non searchable
Activity.objects.filter(
is_searchable=True
).exclude(
reporting_organisations__ref__in=settings.ROOT_ORGANISATIONS
).update(
is_searchable=False
)
# set all root activities as searchable
Activity.objects.filter(
is_searchable=False,
reporting_organisations__ref__in=settings.ROOT_ORGANISATIONS
).update(
is_searchable=True
)
# loop through root activities and set children as searchable
activities = Activity.objects.filter(
reporting_organisations__ref__in=settings.ROOT_ORGANISATIONS)
for activity in activities:
self.set_children_searchable(activity)
def set_children_searchable(self, orig_activity):
"""
sets all the children to searchable
recursively calls itself but keeps a list of already set activities
"""
# all transactions where this id is given as provider activity
provider_activity_transactions = Transaction.objects.filter(
provider_organisation__provider_activity_id=orig_activity.id)
for transaction in provider_activity_transactions:
activity = transaction.activity
if not activity.is_searchable:
activity.is_searchable = True
activity.save()
self.set_children_searchable(activity)
return
def handle(self, *args, **options):
self.update_searchable_activities()
| agpl-3.0 |
jotes/ansible | v2/ansible/plugins/action/fail.py | 16 | 1140 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
return dict(failed=True, msg=msg)
| gpl-3.0 |
webjunkie/python-social-auth | social/backends/github_enterprise.py | 53 | 1330 | """
Github Enterprise OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/github_enterprise.html
"""
from six.moves.urllib.parse import urljoin
from social.utils import append_slash
from social.backends.github import GithubOAuth2, GithubOrganizationOAuth2, \
GithubTeamOAuth2
class GithubEnterpriseMixin(object):
def api_url(self):
return append_slash(self.setting('API_URL'))
def authorization_url(self):
return self._url('login/oauth/authorize')
def access_token_url(self):
return self._url('login/oauth/access_token')
def _url(self, path):
return urljoin(append_slash(self.setting('URL')), path)
class GithubEnterpriseOAuth2(GithubEnterpriseMixin, GithubOAuth2):
"""Github Enterprise OAuth authentication backend"""
name = 'github-enterprise'
class GithubEnterpriseOrganizationOAuth2(GithubEnterpriseMixin,
GithubOrganizationOAuth2):
"""Github Enterprise OAuth2 authentication backend for
organizations"""
name = 'github-enterprise-org'
DEFAULT_SCOPE = ['read:org']
class GithubEnterpriseTeamOAuth2(GithubEnterpriseMixin, GithubTeamOAuth2):
"""Github Enterprise OAuth2 authentication backend for teams"""
name = 'github-enterprise-team'
DEFAULT_SCOPE = ['read:org']
| bsd-3-clause |
jdavisp3/TigerShark | tigershark/tools/surveyPyX12.py | 2 | 1592 | #!/usr/bin/env python
"""Survey PyX12 Segment Definitions.
Read ALL pyx12 message definitions, accumulate all Segment definitions.
"""
from __future__ import print_function
import xml.dom.minidom as DOM
import os, glob
segmentTypes= {}
segmentUse= {}
def getDef( aFile ):
"""Get the Segment Definitions from an XML file."""
msgDef= DOM.parse( aFile )
theDoc= msgDef.documentElement
msgName= theDoc.attributes['xid'].value
for segment in msgDef.getElementsByTagName( "segment"):
xid= segment.attributes['xid'].value
for n in segment.childNodes:
if n.nodeType == DOM.Node.ELEMENT_NODE and n.nodeName == "name":
segName= " ".join( [ c.nodeValue for c in n.childNodes ] )
segmentUse.setdefault( xid, set() )
segmentUse[xid].add( msgName )
segmentTypes.setdefault( xid, set() )
segmentTypes[xid].add( segName )
def survey( pattern ):
"""Survey an entire directory, using a wild-card pattern."""
for f in glob.glob( pattern ):
getDef( f )
segs= segmentTypes.keys()
segs.sort()
format1 = "+%-8s+%-128s+"
format2 = "|%-8s|%-128s|"
print( format1 % ( 8*'-', 128*'-' ) )
for k in segs:
v= list( segmentTypes[k] )
print( format2 % ( "``%s``" % k, v[0] ) )
for m in v[1:]:
print( format2 % ( '', ", "+m ) )
print( format1 % ( 8*'-', 128*'-' ) )
if __name__ == "__main__":
baseDir= r"C:\Python25\share\pyx12\map"
#survey( os.path.join( baseDir, "[0-9]*.xml" ) )
survey( os.path.join( baseDir, "837*.xml" ) )
| bsd-3-clause |
ThiefMaster/indico | indico/util/json.py | 4 | 1603 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import UserDict
from datetime import date, datetime
from speaklater import _LazyString
try:
import simplejson as _json
except ImportError:
import json as _json
class IndicoJSONEncoder(_json.JSONEncoder):
"""Custom JSON encoder that supports more types.
* datetime objects
"""
def __init__(self, *args, **kwargs):
if kwargs.get('separators') is None:
kwargs['separators'] = (',', ':')
super().__init__(*args, **kwargs)
def default(self, o):
if isinstance(o, _LazyString):
return o.value
elif isinstance(o, UserDict):
return dict(o)
elif isinstance(o, datetime):
return {'date': str(o.date()), 'time': str(o.time()), 'tz': str(o.tzinfo)}
elif isinstance(o, date):
return str(o)
return _json.JSONEncoder.default(self, o)
def dumps(obj, **kwargs):
"""Simple wrapper around json.dumps()."""
if kwargs.pop('pretty', False):
kwargs['indent'] = 4 * ' '
textarea = kwargs.pop('textarea', False)
ret = _json.dumps(obj, cls=IndicoJSONEncoder, **kwargs).replace('/', '\\/')
if textarea:
return '<html><head></head><body><textarea>%s</textarea></body></html>' % ret
else:
return ret
def loads(string):
"""Simple wrapper around json.decode()."""
return _json.loads(string)
| mit |
h3llrais3r/SickRage | lib/sqlalchemy/dialects/drizzle/mysqldb.py | 154 | 1270 | """
.. dialect:: drizzle+mysqldb
:name: MySQL-Python
:dbapi: mysqldb
:connectstring: drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
"""
from sqlalchemy.dialects.drizzle.base import (
DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler,
DrizzleIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector)
class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext,
DrizzleExecutionContext):
pass
class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
pass
class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
pass
class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
execution_ctx_cls = DrizzleExecutionContext_mysqldb
statement_compiler = DrizzleCompiler_mysqldb
preparer = DrizzleIdentifierPreparer_mysqldb
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return 'utf8'
dialect = DrizzleDialect_mysqldb
| gpl-3.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.3/django/core/management/commands/runserver.py | 158 | 5632 | from optparse import make_option
import os
import re
import sys
import socket
from django.core.management.base import BaseCommand, CommandError
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers.basehttp import AdminMediaHandler, run, WSGIServerException
from django.utils import autoreload
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class BaseRunserverCommand(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return WSGIHandler()
def handle(self, addrport='', *args, **options):
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
if args:
raise CommandError('Usage is runserver %s' % self.args)
self._raw_ipv6 = False
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.use_ipv6 and '::1' or '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(*args, **options)
def run(self, *args, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader', True)
if use_reloader:
autoreload.main(self.inner_run, args, options)
else:
self.inner_run(*args, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
self.stdout.write("Validating models...\n\n")
self.validate(display_num_errors=True)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Development server is running at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler, ipv6=self.use_ipv6)
except WSGIServerException, e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
13: "You don't have permission to access that port.",
98: "That port is already in use.",
99: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.args[0].args[0]]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write("%s\n" % shutdown_message)
sys.exit(0)
class Command(BaseRunserverCommand):
option_list = BaseRunserverCommand.option_list + (
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
)
def get_handler(self, *args, **options):
"""
Serves admin media like old-school (deprecation pending).
"""
handler = super(Command, self).get_handler(*args, **options)
return AdminMediaHandler(handler, options.get('admin_media_path', ''))
| apache-2.0 |
WarmongeR1/pyvideo.ru | proposal/parsers.py | 2 | 6473 | # coding: utf-8
import re
from django.core.serializers import SerializerDoesNotExist
from django.conf import settings
from rest_framework.settings import api_settings
from richard.videos.models import Category
from .serializers import CategorySerializer, VideoSerializer
from .utils import force_path
from .exceptions import ProposalError, TemplateError, ObjectError
class BaseParser(object):
def __init__(self):
# build a format -> media type map
self.formats = {
renderer.format: renderer.media_type for renderer
in api_settings.DEFAULT_RENDERER_CLASSES
}
# associate media types with their parser classes
self.parsers = {
parser.media_type: parser for parser
in api_settings.DEFAULT_PARSER_CLASSES
}
def deserialize(self, format, stream):
"""
Attempt to read and deserialize data from ``stream``.
Return a deserialized object.
:param format: Serialization format (json, yaml, xml, etc)
:param stream: file-like obj
:raises django.core.serializers.SerializerDoesNotExist:
If provided ``format`` value is not registered with a parser class.
"""
try:
media_type = self.formats[format]
parser_class = self.parsers[media_type]
except KeyError:
raise SerializerDoesNotExist(format)
else:
return parser_class().parse(stream)
class Videos(BaseParser):
"""
Parser class that takes a file path as an argument.
An instance of the class is an iterator that walks the file tree
seeking for serialized video and category objects, deserializes the objects
with either ``proposal.serializers.VideoSerializer`` or
``proposal.serializers.CategorySerializer`` serializers and
yields "unsaved" instances of the video serializer class.
:param path: Path to a proposal root (may as well be a pathlib.Path instance)
:raises proposal.exceptions.ProposalError:
If proposal root does not follow the correct file structure or contains unserializable files.
"""
def __init__(self, path):
super(Videos, self).__init__()
self.path = path
def __iter__(self):
"""
Return and iterator that walks the file tree seeking for serialized objects
and yielding unsaved instances of ``proposal.serializers.VideoSerializer``.
"""
return self._iter_proposal_root(self.path)
def _iter_proposal_root(self, path):
# list of caught exceptions
exc_list = []
try:
for category_path in force_path(path).iterdir():
# skip directories/files that begin with a non-alphanumeric character
if not re.match(r'^[a-z0-9]', category_path.stem, flags=re.I):
continue
try:
yield from self._iter_category(category_path)
except ProposalError as e:
exc_list.append((type(e), str(e)))
# catch all uncaught OSError's
except OSError as e:
raise TemplateError(str(e))
if exc_list:
# provide the caller with a full list of occured errors
raise ProposalError(exc_list)
def _iter_category(self, path):
# a proposal root must contain directories only
if not path.is_dir():
raise TemplateError('%s is not a directory' % path)
# list of video objects (dictionaries) found in the directory
objects = []
# category description object
category_obj = None
# attempt to deserialize objects using associated serializers
for video_path in path.iterdir():
# skip files that begin with a dot
if video_path.is_file() and video_path.stem.startswith('.'):
continue
obj = self._get_object(video_path)
# this is the category metafile
if video_path.stem == settings.PROPOSAL_CATEGORY_META:
# a metafile (probably with another extension) has already been found
if category_obj is not None:
raise TemplateError('duplicate metafiles are found in %s' % path)
category_obj = obj
# this is a video object
else:
objects.append(obj)
# acquire a Category instance either from non-empty description object
# or directory name as a category slug
category = self._get_category(category_obj, path.stem)
for video_obj in objects:
# assign the saved category id to all assotitated video objects
video_obj.update({'category': category.pk})
serializer = VideoSerializer(data=video_obj)
if not serializer.is_valid():
raise ObjectError('%s is not a valid video object' % video_obj)
yield serializer
def _get_object(self, path):
path = force_path(path)
try:
with open(str(path), 'rb') as f:
try:
obj = self.deserialize(path.suffix[1:], f)
except SerializerDoesNotExist:
raise TemplateError('%s is not a registered serializer format (%s)' % (path.suffix[1:], path))
except Exception as e:
raise ObjectError('failed to deserialize %s (%s)' % (path, e))
if not isinstance(obj, dict):
raise ObjectError('%s does not yield a dictionary' % path)
return obj
except OSError:
raise TemplateError('failed to open %s' % path)
def _get_category(self, obj, slug=''):
if not obj:
# attempt to deslugify title
title = re.sub(r'\W+', ' ', slug).strip().capitalize()
# category title must not be empty
if not title:
raise ProposalError('failed to deslugify "%s"' % slug)
return Category.objects.get_or_create(slug=slug, defaults={'title': title, 'slug': None})[0]
else:
# attempt to map the deserialized object to a django model
serializer = CategorySerializer(data=obj)
if not serializer.is_valid():
raise ObjectError('failed to map %s to a model' % obj)
return serializer.save() or serializer.object
# backward compat
videos = Videos
| bsd-3-clause |
cloudbase/nova-virtualbox | nova/virt/configdrive.py | 2 | 6568 | # Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _LW
from nova.openstack.common import fileutils
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
help='Config drive format. One of iso9660 (default) or vfat'),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
self.mdfiles = []
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, data) in instance_md.metadata_for_config_drive():
self.mdfiles.append((path, data))
def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])
def _make_iso9660(self, path, tmpdir):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
tmpdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path, tmpdir):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here,
# because the destination directory already
# exists. This is annoying.
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
def make_drive(self, path):
"""Make the config drive.
:param path: the path to place the config drive image at
:raises ProcessExecuteError if a helper process has failed.
"""
with utils.tempdir() as tmpdir:
self._write_md_files(tmpdir)
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path, tmpdir)
elif CONF.config_drive_format == 'vfat':
self._make_vfat(path, tmpdir)
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
def __repr__(self):
return "<ConfigDriveBuilder: " + str(self.mdfiles) + ">"
def required_by(instance):
image_prop = utils.instance_sys_meta(instance).get(
utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive', 'optional')
if image_prop not in ['optional', 'mandatory']:
LOG.warning(_LW('Image config drive option %(image_prop)s is invalid '
'and will be ignored'),
{'image_prop': image_prop},
instance=instance)
return (instance.get('config_drive') or
'always' == CONF.force_config_drive or
strutils.bool_from_string(CONF.force_config_drive) or
image_prop == 'mandatory'
)
| apache-2.0 |
YarnSeemannsgarn/pyshorteners | pyshorteners/shorteners/readability.py | 1 | 1691 | # encoding: utf-8
"""
Readbility url shortner api implementation
Located at: https://readability.com/developers/api/shortener
Doesnt' need anything from the app
"""
from .base import BaseShortener
from ..exceptions import ShorteningErrorException, ExpandingErrorException
class ReadabilityShortener(BaseShortener):
api_url = 'http://www.readability.com/api/shortener/v1/urls/'
def short(self, url):
params = {'url': url}
response = self._post(self.api_url, data=params)
if response.ok:
try:
data = response.json()
except ValueError:
raise ShorteningErrorException('There was an error shortening'
' this url - {0}'.format(
response.content))
return data['meta']['rdd_url']
raise ShorteningErrorException('There was an error shortening this '
'url - {0}'.format(response.content))
def expand(self, url):
url_id = url.split('/')[-1]
api_url = '{0}{1}'.format(self.api_url, url_id)
response = self._get(api_url)
if response.ok:
try:
data = response.json()
except ValueError as e:
raise ExpandingErrorException('There was an error expanding'
' this url - {0}'.format(e))
return data['meta']['full_url']
raise ExpandingErrorException('There was an error expanding'
' this url - {0}'.format(
response.content))
| mit |
jumpojoy/neutron | neutron/tests/functional/cmd/test_netns_cleanup.py | 52 | 2634 | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.linux import dhcp
from neutron.agent.linux import ip_lib
from neutron.cmd import netns_cleanup
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
GET_NAMESPACES = 'neutron.agent.linux.ip_lib.IPWrapper.get_namespaces'
TEST_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
class NetnsCleanupTest(base.BaseSudoTestCase):
def setUp(self):
super(NetnsCleanupTest, self).setUp()
self.get_namespaces_p = mock.patch(GET_NAMESPACES)
self.get_namespaces = self.get_namespaces_p.start()
def setup_config(self, args=None):
if args is None:
args = []
# force option enabled to make sure non-empty namespaces are
# cleaned up and deleted
args.append('--force')
self.conf = netns_cleanup.setup_conf()
self.conf.set_override('interface_driver', TEST_INTERFACE_DRIVER)
self.config_parse(conf=self.conf, args=args)
def test_cleanup_network_namespaces_cleans_dhcp_and_l3_namespaces(self):
dhcp_namespace = self.useFixture(
net_helpers.NamespaceFixture(dhcp.NS_PREFIX)).name
l3_namespace = self.useFixture(
net_helpers.NamespaceFixture(l3_agent.NS_PREFIX)).name
bridge = self.useFixture(
net_helpers.VethPortFixture(namespace=dhcp_namespace)).bridge
self.useFixture(
net_helpers.VethPortFixture(bridge, l3_namespace))
# we scope the get_namespaces to our own ones not to affect other
# tests, as otherwise cleanup will kill them all
self.get_namespaces.return_value = [l3_namespace, dhcp_namespace]
netns_cleanup.cleanup_network_namespaces(self.conf)
self.get_namespaces_p.stop()
namespaces_now = ip_lib.IPWrapper.get_namespaces()
self.assertNotIn(l3_namespace, namespaces_now)
self.assertNotIn(dhcp_namespace, namespaces_now)
| apache-2.0 |
prometheanfire/portage | pym/portage/util/_eventloop/PollSelectAdapter.py | 4 | 1908 | # Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from .PollConstants import PollConstants
import select
class PollSelectAdapter(object):
"""
Use select to emulate a poll object, for
systems that don't support poll().
"""
def __init__(self):
self._registered = {}
self._select_args = [[], [], []]
def register(self, fd, *args):
"""
Only POLLIN is currently supported!
"""
if len(args) > 1:
raise TypeError(
"register expected at most 2 arguments, got " + \
repr(1 + len(args)))
eventmask = PollConstants.POLLIN | \
PollConstants.POLLPRI | PollConstants.POLLOUT
if args:
eventmask = args[0]
self._registered[fd] = eventmask
self._select_args = None
def unregister(self, fd):
self._select_args = None
del self._registered[fd]
def poll(self, *args):
if len(args) > 1:
raise TypeError(
"poll expected at most 2 arguments, got " + \
repr(1 + len(args)))
timeout = None
if args:
timeout = args[0]
select_args = self._select_args
if select_args is None:
select_args = [list(self._registered), [], []]
if timeout is not None:
select_args = select_args[:]
# Translate poll() timeout args to select() timeout args:
#
# | units | value(s) for indefinite block
# ---------|--------------|------------------------------
# poll | milliseconds | omitted, negative, or None
# ---------|--------------|------------------------------
# select | seconds | omitted
# ---------|--------------|------------------------------
if timeout is not None and timeout < 0:
timeout = None
if timeout is not None:
select_args.append(float(timeout) / 1000)
select_events = select.select(*select_args)
poll_events = []
for fd in select_events[0]:
poll_events.append((fd, PollConstants.POLLIN))
return poll_events
| gpl-2.0 |
sdecoder/CMDS-HDFS | common/contrib/hod/hodlib/GridServices/mapred.py | 182 | 8167 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""define MapReduce as subclass of Service"""
# -*- python -*-
import os, copy, time
from service import *
from hodlib.Hod.nodePool import *
from hodlib.Common.desc import CommandDesc
from hodlib.Common.util import get_exception_string, parseEquals
class MapReduceExternal(MasterSlave):
"""dummy proxy to external MapReduce instance"""
def __init__(self, serviceDesc, workDirs, version):
MasterSlave.__init__(self, serviceDesc, workDirs,None)
self.launchedMaster = True
self.masterInitialized = True
self.version = version
def getMasterRequest(self):
return None
def getMasterCommands(self, serviceDict):
return []
def getAdminCommands(self, serviceDict):
return []
def getWorkerCommands(self, serviceDict):
return []
def getMasterAddrs(self):
attrs = self.serviceDesc.getfinalAttrs()
addr = attrs['mapred.job.tracker']
return [addr]
def needsMore(self):
return 0
def needsLess(self):
return 0
def setMasterParams(self, dict):
self.serviceDesc['final-attrs']['mapred.job.tracker'] = "%s:%s" % (dict['host'],
dict['tracker_port'])
if self.version < 16:
self.serviceDesc.dict['final-attrs']['mapred.job.tracker.info.port'] = \
str(self.serviceDesc.dict['info_port'])
else:
# After Hadoop-2185
self.serviceDesc['final-attrs']['mapred.job.tracker.http.address'] = \
"%s:%s" %(dict['host'], dict['info_port'])
def getInfoAddrs(self):
attrs = self.serviceDesc.getfinalAttrs()
if self.version < 16:
addr = attrs['mapred.job.tracker']
k,v = addr.split( ":")
infoaddr = k + ':' + attrs['mapred.job.tracker.info.port']
else:
# After Hadoop-2185
# Note: earlier,we never respected mapred.job.tracker.http.address
infoaddr = attrs['mapred.job.tracker.http.address']
return [infoaddr]
class MapReduce(MasterSlave):
def __init__(self, serviceDesc, workDirs,required_node, version,
workers_per_ring = 1):
MasterSlave.__init__(self, serviceDesc, workDirs,required_node)
self.masterNode = None
self.masterAddr = None
self.infoAddr = None
self.workers = []
self.required_node = required_node
self.version = version
self.workers_per_ring = workers_per_ring
def isLaunchable(self, serviceDict):
hdfs = serviceDict['hdfs']
if (hdfs.isMasterInitialized()):
return True
return False
def getMasterRequest(self):
req = NodeRequest(1, [], False)
return req
def getMasterCommands(self, serviceDict):
hdfs = serviceDict['hdfs']
cmdDesc = self._getJobTrackerCommand(hdfs)
return [cmdDesc]
def getAdminCommands(self, serviceDict):
return []
def getWorkerCommands(self, serviceDict):
hdfs = serviceDict['hdfs']
workerCmds = []
for id in range(1, self.workers_per_ring + 1):
workerCmds.append(self._getTaskTrackerCommand(str(id), hdfs))
return workerCmds
def setMasterNodes(self, list):
node = list[0]
self.masterNode = node
def getMasterAddrs(self):
return [self.masterAddr]
def getInfoAddrs(self):
return [self.infoAddr]
def getWorkers(self):
return self.workers
def requiredNode(self):
return self.required_host
def setMasterParams(self, list):
dict = self._parseEquals(list)
self.masterAddr = dict['mapred.job.tracker']
k,v = self.masterAddr.split(":")
self.masterNode = k
if self.version < 16:
self.infoAddr = self.masterNode + ':' + dict['mapred.job.tracker.info.port']
else:
# After Hadoop-2185
self.infoAddr = dict['mapred.job.tracker.http.address']
def _parseEquals(self, list):
return parseEquals(list)
def _setWorkDirs(self, workDirs, envs, attrs, parentDirs, subDir):
local = []
system = None
temp = None
hadooptmpdir = None
dfsclient = []
for p in parentDirs:
workDirs.append(p)
workDirs.append(os.path.join(p, subDir))
dir = os.path.join(p, subDir, 'mapred-local')
local.append(dir)
if not system:
system = os.path.join(p, subDir, 'mapred-system')
if not temp:
temp = os.path.join(p, subDir, 'mapred-temp')
if not hadooptmpdir:
# Not used currently, generating hadooptmpdir just in case
hadooptmpdir = os.path.join(p, subDir, 'hadoop-tmp')
dfsclientdir = os.path.join(p, subDir, 'dfs-client')
dfsclient.append(dfsclientdir)
workDirs.append(dfsclientdir)
# FIXME!! use csv
attrs['mapred.local.dir'] = ','.join(local)
attrs['mapred.system.dir'] = 'fillindir'
attrs['mapred.temp.dir'] = temp
attrs['hadoop.tmp.dir'] = hadooptmpdir
envs['HADOOP_ROOT_LOGGER'] = "INFO,DRFA"
def _getJobTrackerCommand(self, hdfs):
sd = self.serviceDesc
parentDirs = self.workDirs
workDirs = []
attrs = sd.getfinalAttrs().copy()
envs = sd.getEnvs().copy()
if 'mapred.job.tracker' not in attrs:
attrs['mapred.job.tracker'] = 'fillinhostport'
if self.version < 16:
if 'mapred.job.tracker.info.port' not in attrs:
attrs['mapred.job.tracker.info.port'] = 'fillinport'
else:
# Addressing Hadoop-2185,
if 'mapred.job.tracker.http.address' not in attrs:
attrs['mapred.job.tracker.http.address'] = 'fillinhostport'
attrs['fs.default.name'] = hdfs.getMasterAddrs()[0]
self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-jt')
dict = { 'name' : 'jobtracker' }
dict['version'] = self.version
dict['program'] = os.path.join('bin', 'hadoop')
dict['argv'] = ['jobtracker']
dict['envs'] = envs
dict['pkgdirs'] = sd.getPkgDirs()
dict['workdirs'] = workDirs
dict['final-attrs'] = attrs
dict['attrs'] = sd.getAttrs()
cmd = CommandDesc(dict)
return cmd
def _getTaskTrackerCommand(self, id, hdfs):
sd = self.serviceDesc
parentDirs = self.workDirs
workDirs = []
attrs = sd.getfinalAttrs().copy()
envs = sd.getEnvs().copy()
jt = self.masterAddr
if jt == None:
raise ValueError, "Can't get job tracker address"
attrs['mapred.job.tracker'] = jt
attrs['fs.default.name'] = hdfs.getMasterAddrs()[0]
if self.version < 16:
if 'tasktracker.http.port' not in attrs:
attrs['tasktracker.http.port'] = 'fillinport'
# earlier to 16, tasktrackers always took ephemeral port 0 for
# tasktracker.report.bindAddress
else:
# Adding the following. Hadoop-2185
if 'mapred.task.tracker.report.address' not in attrs:
attrs['mapred.task.tracker.report.address'] = 'fillinhostport'
if 'mapred.task.tracker.http.address' not in attrs:
attrs['mapred.task.tracker.http.address'] = 'fillinhostport'
# unique parentDirs in case of multiple tasktrackers per hodring
pd = []
for dir in parentDirs:
dir = dir + "-" + id
pd.append(dir)
parentDirs = pd
# end of unique workdirs
self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-tt')
dict = { 'name' : 'tasktracker' }
dict['program'] = os.path.join('bin', 'hadoop')
dict['argv'] = ['tasktracker']
dict['envs'] = envs
dict['pkgdirs'] = sd.getPkgDirs()
dict['workdirs'] = workDirs
dict['final-attrs'] = attrs
dict['attrs'] = sd.getAttrs()
cmd = CommandDesc(dict)
return cmd
| apache-2.0 |
pedropva/AIprojects | sources/Main.py | 1 | 6236 | import os
from Node import Node
from Search import Search
from Data import Data
from Utils import Utils
cities = Data.cities()
situations = Data.cfg()
def Main():
option = 0
option1 = 0
start = None
finisht = None
while True:
os.system('cls')
if(option == 0):
option=0
print('{:=^15}'.format(' Menu '))
print('What do you want to do?')
print('1-List Data from nodes')
print('2-Path to City')
print('3-Chicken, Fox and Grains')
print('4-Puzzle with numbers')
print("5-Exit")
option = int(input("Option:"))
elif(option == 1):
option=0
while True:
os.system('cls')
print('Choose the data to be shown:')
print('0-return to menu')
print('1-Cities')
print('2-Chicken, Fox and Wheat')
option1 = int(input("Option: "))
if(option1 == 0):
break
elif(option1 == 1):
Node.printNodes(cities)
input()
elif(option1 == 2):
Node.printNodes(situations)
input()
else:
print('Invalid input!')
elif(option == 2):
option=0
while True:
os.system('cls')
print('Choose algorithm:')
print('0-return to menu')
print('1-BFS')
print('2-Djikstra')
print('3-A*')
option1 = int(input("Option:"))
if(option1 == 0):
break
elif(option1 == 1):
start = input("Departure:").title()
finish = input("Destination:").title()
result = Search.bfs(cities[start],cities[finish])
if result != False:
print()
print("Result:",result[1])
else:
print('Found no path')
input()
elif(option1 == 2):
start = input("Departure:").title()
finish = input("Destination:").title()
result = Search.djikstra(cities[start],cities[finish])
if result != False:
print()
print("Result:",result[1],' with cost: ', result[2])
else:
print('Found no path')
input()
elif(option1 == 3):
start = input("Departure:").title()
print("Only destination is avaliable bucharest, no heuristics data for the other ones")
result = Search.aStar(cities[start],cities['Bucareste'])
if result != False:
print()
print("Result:",result[1],' with cost: ', result[2])
else:
print('Found no path')
input()
else:
print('Invalid input!')
elif(option == 3):
option=0
os.system('cls')
print('Insert the desired starting and final states,use the first caracther of each word, and put them separated by a comma and separating left and right side of the river by a |')
print("boatman = 'b', chicken = 'c', fox = 'f' and grains = 'g'")
print('Example of state: b,c,f,g|')
print()
start = input("Starting state:").lower()
finish = input("Final state:").lower()
start=Data.cfgCode(start)
finish=Data.cfgCode(finish)
if(start != False and finish != False and Utils.isInList(situations.keys(),start) and Utils.isInList(situations.keys(),finish)):
result = Search.bfs(situations[start],situations[finish])##checks if those are valid states and if they are then search best path
if result != False:
print()
print("Result:")
Data.cfgDecode(result[1])
input()
else:
print('Found no path')
else:
print('Invalid State!')
input()
elif(option == 4):
option=0
os.system('cls')
print('Fill the intial matrix:')
start = [[int(input("[Line 1 column 1]: ")),
int(input("[Line 1 column 2]: ")),
int(input("[Line 1 column 3]: "))],
[int(input("[Line 2 column 1]: ")),
int(input("[Line 2 column 2]: ")),
int(input("[Line 2 column 3]: "))],
[int(input("[Line 3 column 1]: ")),
int(input("[Line 3 column 2]: ")),
int(input("[Line 3 column 3]: "))]]
print('Initial matrix:')
for l in start:
print(l)
if not Data.isAValidMAtrix(start):
print('Invalid input!')
input()
elif (not Data.isSolvable(start,True)):#second argument is True of the number of invertions on the final matrix is pair else is false
print('Unsolvable!')
input()
else:
finish = [[0,1,2],[3,4,5],[6,7,8]]
start = Data.puzzle(start,finish)##Data.puzzle(start,finish)##creating the node with the matrix itself and the objective matrix
finish = Data.puzzle(finish,finish)
if(start and finish):
result = Search.aStar(start,finish)
if result != False:
print()
print("Result:")
Data.puzzleDecode(result[1],result[2])
else:
print('Found no path')
input()
elif(option == 5):
break
else:
print('Invalid input!')
Main() | gpl-3.0 |
joomel1/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/examples/add_person.py | 432 | 1656 | #! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# This function fills in a Person message based on user input.
def PromptForAddress(person):
person.id = int(raw_input("Enter person ID number: "))
person.name = raw_input("Enter name: ")
email = raw_input("Enter email address (blank for none): ")
if email != "":
person.email = email
while True:
number = raw_input("Enter a phone number (or leave blank to finish): ")
if number == "":
break
phone_number = person.phone.add()
phone_number.number = number
type = raw_input("Is this a mobile, home, or work phone? ")
if type == "mobile":
phone_number.type = addressbook_pb2.Person.MOBILE
elif type == "home":
phone_number.type = addressbook_pb2.Person.HOME
elif type == "work":
phone_number.type = addressbook_pb2.Person.WORK
else:
print "Unknown phone type; leaving as default value."
# Main procedure: Reads the entire address book from a file,
# adds one person based on user input, then writes it back out to the same
# file.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
try:
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
except IOError:
print sys.argv[1] + ": File not found. Creating a new file."
# Add an address.
PromptForAddress(address_book.person.add())
# Write the new address book back to disk.
f = open(sys.argv[1], "wb")
f.write(address_book.SerializeToString())
f.close()
| bsd-3-clause |
mscelnik/exercises | exercises/random_list.py | 1 | 2865 | """ Python training exercise
Random list
Below is a list of random numbers between 0 and 100. Using built-in funcions
and the list sub-functions, can you?:
(a) Sort the list?
HINT: Use the sorted() built-in function.
(b) What are the smallest and largest numbers?
HINT: There are two ways to do this; get directly from the ordered list, or
use the built-in min/max functions. Use a negative index to get the
largest number.
(c) How many numbers are greater than or equal to 50?
HINT: Use the index sub-function to locate 50 in the ordered data.
(d) What is the sum of the last 4 numbers?
HINT: Use negative indices, and a colon (:) to take a list slice.
"""
data = [
41, 26, 45, 8, 67, 98, 1, 43, 19, 62, 11, 47, 46, 1, 29, 36, 69, 59, 65, 96,
91, 76, 53, 100, 17, 89, 38, 50, 82, 99, 63, 16, 64, 65, 75, 68, 99, 27, 8,
92, 17, 91, 3, 41, 40, 32, 29, 49, 54, 90, 55, 7, 38, 11, 33, 56, 87, 27,
31, 82, 96, 26, 66, 61, 65, 66, 4, 43, 10, 80, 13, 22, 12, 62, 24, 61, 67,
88, 73, 24, 14, 35, 38, 71, 87, 96, 42, 95, 91, 83, 46, 38, 26, 22, 86, 79,
86, 98, 58, 28, 44, 30, 60, 17, 48, 56, 77, 32, 44, 26, 50, 62, 49, 55, 6,
18, 60, 22, 45, 97, 51, 53, 59, 93, 35, 29, 52, 91, 33, 56, 95, 23, 69, 54,
4, 83, 17, 73, 46, 79, 99, 75, 53, 20, 96, 91, 72, 93, 32, 9, 54, 45, 80,
88, 10, 93, 24, 15, 84, 67, 68, 13, 60, 9, 31, 87, 80, 87, 29, 24, 27, 81,
28, 22, 11, 0, 79, 47, 60, 17, 98, 84, 43, 62, 46, 64, 41, 27, 41, 0, 73,
89, 1, 92, 12, 1, 49, 46, 54, 99
]
# (a) Sort the list.
# HINT: Use the sorted() built-in function.
# YOUR CODE HERE.
# <<<<<<< SOLUTION
ordered_data = sorted(data)
print(ordered_data)
# =======
# (b) What are the smallest and largest numbers?
# HINT: There are two ways to do this; get directly from the ordered list,
# or use the built-in min/max functions.
# YOUR CODE HERE.
# <<<<<<< SOLUTION
# There are two ways to do this. Either get directly from the ordered list, or
# use the built-in min/max functions.
smallest = ordered_data[0]
smallest = min(data)
print('Smallest number is {}.'.format(smallest))
largest = ordered_data[-1]
largest = max(data)
print('Largest number is {}.'.format(largest))
# =======
# (c) How many numbers are greater than or equal to 50?
# HINT: Use the index sub-function to locate 50 in the ordered data.
# YOUR CODE HERE.
# <<<<<<< SOLUTION
i = ordered_data.index(50)
n = len(ordered_data) - i
print('There are {} numbers >= 50.'.format(n))
# =======
# (d) What is the sum of the last 4 numbers?
# Hint: The sum of the first 4 numbers is:
total = sum(data[:4])
print('First 4 numbers total is {}.'.format(total))
# <<<<<<< SOLUTION
total = sum(data[-4:])
print('Last 4 numbers total is {}.'.format(total))
# =======
| gpl-3.0 |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/custom_columns_regress/tests.py | 91 | 2919 | from django.test import TestCase
from django.core.exceptions import FieldError
from models import Author, Article
def pks(objects):
""" Return pks to be able to compare lists"""
return [o.pk for o in objects]
class CustomColumnRegression(TestCase):
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
self.assertEqual(msg, str(e))
self.assertTrue(isinstance(e, exc), "Expected %s, got %s" % (exc, type(e)))
def setUp(self):
self.a1 = Author.objects.create(first_name='John', last_name='Smith')
self.a2 = Author.objects.create(first_name='Peter', last_name='Jones')
self.authors = [self.a1, self.a2]
def test_basic_creation(self):
art = Article(headline='Django lets you build Web apps easily', primary_author=self.a1)
art.save()
art.authors = [self.a1, self.a2]
def test_author_querying(self):
self.assertQuerysetEqual(
Author.objects.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
def test_author_filtering(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact='John'),
['<Author: John Smith>']
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistant_field(self):
self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'firstname' into field. Choices are: Author_ID, article, first_name, last_name, primary_set",
Author.objects.filter,
firstname__exact='John'
)
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'firstname'",
getattr,
a, 'firstname'
)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'last'",
getattr,
a, 'last'
)
def test_m2m_table(self):
art = Article.objects.create(headline='Django lets you build Web apps easily', primary_author=self.a1)
art.authors = self.authors
self.assertQuerysetEqual(
art.authors.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
self.assertQuerysetEqual(
self.a1.article_set.all(),
['<Article: Django lets you build Web apps easily>']
)
self.assertQuerysetEqual(
art.authors.filter(last_name='Jones'),
['<Author: Peter Jones>']
)
| gpl-3.0 |
lowiki-org/localwiki-backend-server | localwiki/pages/fields.py | 3 | 3448 | from django import forms
from django.utils.translation import ugettext_lazy as _
from ckeditor.models import HTML5FragmentField
class WikiHTMLField(HTML5FragmentField):
allowed_elements = [
'p', 'br', 'a', 'em', 'strong', 'u', 'img', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'hr', 'ul', 'ol', 'li', 'pre', 'table',
'thead', 'tbody', 'tr', 'th', 'td', 'span', 'strike', 'sub',
'sup', 'tt', 'input']
allowed_attributes_map = {
'p': ['class', 'style'],
'h1': ['style'],
'h2': ['style'],
'h3': ['style'],
'h4': ['style'],
'h5': ['style'],
'h6': ['style'],
'ul': ['class'],
'a': ['class', 'name', 'href', 'style'],
'img': ['class', 'src', 'alt', 'title', 'style'],
'span': ['class', 'style'],
'table': ['class', 'style'],
'th': ['class', 'colspan', 'rowspan', 'style'],
'td': ['class', 'colspan', 'rowspan', 'style'],
'input': ['class', 'type', 'value']
}
allowed_styles_map = {
'p': ['text-align'],
'h1': ['text-align'],
'h2': ['text-align'],
'h3': ['text-align'],
'h4': ['text-align'],
'h5': ['text-align'],
'h6': ['text-align'],
'img': ['width', 'height'],
'span': ['width', 'height'],
'table': ['width', 'height'],
'th': ['text-align', 'background-color'],
'td': ['text-align', 'background-color', 'width',
'height', 'vertical-align'],
'a': ['width']
}
rename_elements = {'b': 'strong', 'i': 'em'}
def __init__(self, *args, **kwargs):
super(WikiHTMLField, self).__init__(*args, **kwargs)
self.allowed_elements = self.__class__.allowed_elements
self.allowed_attributes_map = self.__class__.allowed_attributes_map
self.allowed_styles_map = self.__class__.allowed_styles_map
self.rename_elements = self.__class__.rename_elements
class PageChoiceField(forms.ModelChoiceField):
"""
Use this in ModelForms when you've got a ForeignKey to a Page.
"""
def __init__(self, *args, **kwargs):
from .models import Page
kwargs['widget'] = kwargs.pop('widget', forms.widgets.TextInput)
# Limit to the specified region
region = kwargs.pop('region', None)
kwargs['queryset'] = Page.objects.filter(region=region)
super(PageChoiceField, self).__init__(*args, **kwargs)
def clean(self, value):
from .models import slugify
if not value and not self.required:
return None
try:
return self.queryset.filter(slug=slugify(value)).get()
except self.queryset.model.DoesNotExist:
raise forms.ValidationError(
_("Page %s does not exist! Please enter a valid page name.") % (
self.queryset.model._meta.verbose_name,))
def prepare_value(self, value):
from .models import Page
if isinstance(value, basestring):
# already a page name
return
value = super(PageChoiceField, self).prepare_value(value)
# Turn it into a page name rather than a pk integer.
if value:
return Page.objects.get(pk=value).name
return ''
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^pages\.fields"])
except ImportError:
pass
| gpl-2.0 |
Jusedawg/SickRage | lib/tmdb_api/test_tmdb_api.py | 52 | 5468 | """
test.py contains unit tests for tmdbsimple.py
Fill in Global Variables below before running tests.
Created by Celia Oakley on 2013-11-05
"""
import unittest
import sys
from tmdb_api import TMDB
#
# Global Variables (fill in or put in keys.py)
#
TMDB_API_KEY = 'edc5f123313769de83a71e157758030b'
try:
from keys import *
except ImportError:
pass
class TVCheck(unittest.TestCase):
def testTVInfo(self):
name = u'Game of Thrones'
tmdb = TMDB(TMDB_API_KEY)
find = tmdb.Find(121361)
response = find.info({'external_source': 'tvdb_id'})
self.assertEqual(response['tv_results'][0]['name'], name)
def testTVSearch(self):
id = 1396
name = 'UFC'
tmdb = TMDB(TMDB_API_KEY)
# get TMDB configuration info
config = tmdb.Configuration()
response = config.info()
base_url = response['images']['base_url']
sizes = response['images']['poster_sizes']
def size_str_to_int(x):
return float("inf") if x == 'original' else int(x[1:])
max_size = max(sizes, key=size_str_to_int)
# get show ID on TMDB
search = tmdb.Search()
response = search.collection({'query': name})
for result in response['results']:
id = result['id']
# get show images
collection = tmdb.Collections(id)
response = collection.images()
rel_path = response['posters'][0]['file_path']
url = "{0}{1}{2}".format(base_url, max_size, rel_path)
self.assertTrue(hasattr(response, name))
def testTVCredits(self):
id = 1396
tmdb = TMDB(TMDB_API_KEY)
tv = tmdb.TV(id)
response = tv.credits()
self.assertTrue(hasattr(tv, 'cast'))
def testTVExternalIds(self):
id = 1396
imdb_id = 'tt0903747'
tmdb = TMDB(TMDB_API_KEY)
tv = tmdb.TV(id)
response = tv.external_ids()
self.assertEqual(tv.imdb_id, imdb_id)
def testTVImages(self):
id = 1396
tmdb = TMDB(TMDB_API_KEY)
tv = tmdb.TV(id)
response = tv.images()
self.assertTrue(hasattr(tv, 'backdrops'))
def testTVTranslations(self):
id = 1396
tmdb = TMDB(TMDB_API_KEY)
tv = tmdb.TV(id)
response = tv.translations()
self.assertTrue(hasattr(tv, 'translations'))
def testTVTopRated(self):
tmdb = TMDB(TMDB_API_KEY)
tv = tmdb.TV()
response = tv.top_rated()
self.assertTrue(hasattr(tv, 'results'))
def testTVPopular(self):
tmdb = TMDB(TMDB_API_KEY)
tv = tmdb.TV()
response = tv.popular()
self.assertTrue(hasattr(tv, 'results'))
class TVSeasonsCheck(unittest.TestCase):
def testTVSeasonsInfo(self):
id = 3572
season_number = 1
name = 'Season 1'
tmdb = TMDB(TMDB_API_KEY)
tv_seasons = tmdb.TV_Seasons(id, season_number)
response = tv_seasons.info()
self.assertEqual(tv_seasons.name, name)
def testTVSeasonsCredits(self):
id = 3572
season_number = 1
tmdb = TMDB(TMDB_API_KEY)
tv_seasons = tmdb.TV_Seasons(id, season_number)
response = tv_seasons.credits()
self.assertTrue(hasattr(tv_seasons, 'crew'))
def testTVSeasonsExternalIds(self):
id = 3572
season_number = 1
tvdb_id = 2547
tmdb = TMDB(TMDB_API_KEY)
tv_seasons = tmdb.TV_Seasons(id, season_number)
response = tv_seasons.external_ids()
self.assertEqual(tv_seasons.tvdb_id, tvdb_id)
def testTVSeasonsImages(self):
id = 3572
season_number = 1
tmdb = TMDB(TMDB_API_KEY)
tv_seasons = tmdb.TV_Seasons(id, season_number)
response = tv_seasons.images()
self.assertTrue(hasattr(tv_seasons, 'posters'))
class TVEpisodesCheck(unittest.TestCase):
def testTVEpisodesInfo(self):
id = 1396
season_number = 1
episode_number = 1
name = 'Pilot'
tmdb = TMDB(TMDB_API_KEY)
tv_episodes = tmdb.TV_Episodes(id, season_number, episode_number)
response = tv_episodes.info()
self.assertEqual(tv_episodes.name, name)
def testTVEpisodesCredits(self):
id = 1396
season_number = 1
episode_number = 1
tmdb = TMDB(TMDB_API_KEY)
tv_episodes = tmdb.TV_Episodes(id, season_number, episode_number)
response = tv_episodes.credits()
self.assertTrue(hasattr(tv_episodes, 'guest_stars'))
def testTVEpisodesExternalIds(self):
id = 1396
season_number = 1
episode_number = 1
imdb_id = 'tt0959621'
tmdb = TMDB(TMDB_API_KEY)
tv_episodes = tmdb.TV_Episodes(id, season_number, episode_number)
response = tv_episodes.external_ids()
self.assertEqual(tv_episodes.imdb_id, imdb_id)
def testTVEpisodesImages(self):
id = 1396
season_number = 1
episode_number = 1
tmdb = TMDB(TMDB_API_KEY)
tv_episodes = tmdb.TV_Episodes(id, season_number, episode_number)
response = tv_episodes.images()
self.assertTrue(hasattr(tv_episodes, 'stills'))
if __name__ == "__main__":
unittest.main()
# Run with:
# python3 test_tmdbsimple.py ConfigurationCheck -v
# python3 test_tmdbsimple.py ConfigurationCheck
# ... or other Check classes
# python3 test_tmdbsimple.py -v
# python3 test_tmdbsimple.py
| gpl-3.0 |
angelman/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/filters/webkit_extras.py | 121 | 2830 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from django.template.defaultfilters import stringfilter
from google.appengine.ext import webapp
register = webapp.template.create_template_register()
bug_regexp = re.compile(r"bug (?P<bug_id>\d+)")
patch_regexp = re.compile(r"patch (?P<patch_id>\d+)")
@register.filter
@stringfilter
def webkit_linkify(value):
value = bug_regexp.sub(r'<a href="http://webkit.org/b/\g<bug_id>">bug \g<bug_id></a>', value)
value = patch_regexp.sub(r'<a href="https://bugs.webkit.org/attachment.cgi?id=\g<patch_id>&action=prettypatch">patch \g<patch_id></a>', value)
return value
@register.filter
@stringfilter
def webkit_bug_id(value):
return '<a href="http://webkit.org/b/%s">%s</a>' % (value, value)
@register.filter
@stringfilter
def webkit_attachment_id(value):
return '<a href="https://bugs.webkit.org/attachment.cgi?id=%s&action=prettypatch">%s</a>' % (value, value)
@register.filter
@stringfilter
def results_link(status_id):
return '<a href="/results/%s">results</a>' % status_id
@register.filter
@stringfilter
def queue_status_link(queue_name, text):
return '<a href="/queue-status/%s">%s</a>' % (queue_name, text)
@register.filter
@stringfilter
def queue_charts_link(queue_name, text):
return '<a href="/queue-charts/%s">%s</a>' % (queue_name, text)
| bsd-3-clause |
UITools/saleor | saleor/order/migrations/0045_auto_20180329_0142.py | 1 | 1425 | # Generated by Django 2.0.3 on 2018-03-29 06:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('product', '0054_merge_20180320_1108'),
('order', '0044_auto_20180326_1055'),
]
operations = [
migrations.RemoveField(
model_name='orderline',
name='stock',
),
migrations.RemoveField(
model_name='orderline',
name='stock_location',
),
migrations.AddField(
model_name='orderline',
name='variant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='product.ProductVariant'),
),
migrations.RemoveField(
model_name='orderline',
name='product',
),
migrations.AlterField(
model_name='orderline',
name='unit_price_gross',
field=django_prices.models.MoneyField(currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12),
),
migrations.AlterField(
model_name='orderline',
name='unit_price_net',
field=django_prices.models.MoneyField(currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12),
),
]
| bsd-3-clause |
maxsocl/django | django/db/backends/mysql/operations.py | 26 | 7908 | from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 4294967295),
PositiveIntegerField=(0, 18446744073709551615),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
return 'INTERVAL %s MICROSECOND' % sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
if internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
return converters
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
| bsd-3-clause |
apache/incubator-airflow | tests/providers/amazon/aws/operators/test_emr_terminate_job_flow.py | 10 | 1847 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import MagicMock, patch
from airflow.providers.amazon.aws.operators.emr_terminate_job_flow import EmrTerminateJobFlowOperator
TERMINATE_SUCCESS_RETURN = {'ResponseMetadata': {'HTTPStatusCode': 200}}
class TestEmrTerminateJobFlowOperator(unittest.TestCase):
def setUp(self):
# Mock out the emr_client (moto has incorrect response)
mock_emr_client = MagicMock()
mock_emr_client.terminate_job_flows.return_value = TERMINATE_SUCCESS_RETURN
mock_emr_session = MagicMock()
mock_emr_session.client.return_value = mock_emr_client
# Mock out the emr_client creator
self.boto3_session_mock = MagicMock(return_value=mock_emr_session)
def test_execute_terminates_the_job_flow_and_does_not_error(self):
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrTerminateJobFlowOperator(
task_id='test_task', job_flow_id='j-8989898989', aws_conn_id='aws_default'
)
operator.execute(None)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.