repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Batterfii/django | django/utils/deprecation.py | 34 | 2556 | from __future__ import absolute_import
import inspect
import warnings
class RemovedInDjango20Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = DeprecationWarning
class warn_about_renamed_method(object):
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
| bsd-3-clause |
elkingtonmcb/sympy | sympy/plotting/pygletplot/plot_camera.py | 120 | 3888 | from __future__ import print_function, division
from pyglet.gl import *
from plot_rotation import get_spherical_rotatation
from util import get_model_matrix
from util import screen_to_model, model_to_screen
from util import vec_subs
class PlotCamera(object):
min_dist = 0.05
max_dist = 500.0
min_ortho_dist = 100.0
max_ortho_dist = 10000.0
_default_dist = 6.0
_default_ortho_dist = 600.0
rot_presets = {
'xy': (0, 0, 0),
'xz': (-90, 0, 0),
'yz': (0, 90, 0),
'perspective': (-45, 0, -45)
}
def __init__(self, window, ortho=False):
self.window = window
self.axes = self.window.plot.axes
self.ortho = ortho
self.reset()
def init_rot_matrix(self):
glPushMatrix()
glLoadIdentity()
self._rot = get_model_matrix()
glPopMatrix()
def set_rot_preset(self, preset_name):
self.init_rot_matrix()
try:
r = self.rot_presets[preset_name]
except AttributeError:
raise ValueError(
"%s is not a valid rotation preset." % preset_name)
try:
self.euler_rotate(r[0], 1, 0, 0)
self.euler_rotate(r[1], 0, 1, 0)
self.euler_rotate(r[2], 0, 0, 1)
except AttributeError:
pass
def reset(self):
self._dist = 0.0
self._x, self._y = 0.0, 0.0
self._rot = None
if self.ortho:
self._dist = self._default_ortho_dist
else:
self._dist = self._default_dist
self.init_rot_matrix()
def mult_rot_matrix(self, rot):
glPushMatrix()
glLoadMatrixf(rot)
glMultMatrixf(self._rot)
self._rot = get_model_matrix()
glPopMatrix()
def setup_projection(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.ortho:
# yep, this is pseudo ortho (don't tell anyone)
gluPerspective(
0.3, float(self.window.width)/float(self.window.height),
self.min_ortho_dist - 0.01, self.max_ortho_dist + 0.01)
else:
gluPerspective(
30.0, float(self.window.width)/float(self.window.height),
self.min_dist - 0.01, self.max_dist + 0.01)
glMatrixMode(GL_MODELVIEW)
def _get_scale(self):
return 1.0, 1.0, 1.0
def apply_transformation(self):
glLoadIdentity()
glTranslatef(self._x, self._y, -self._dist)
if self._rot is not None:
glMultMatrixf(self._rot)
glScalef(*self._get_scale())
def spherical_rotate(self, p1, p2, sensitivity=1.0):
mat = get_spherical_rotatation(p1, p2, self.window.width,
self.window.height, sensitivity)
if mat is not None:
self.mult_rot_matrix(mat)
def euler_rotate(self, angle, x, y, z):
glPushMatrix()
glLoadMatrixf(self._rot)
glRotatef(angle, x, y, z)
self._rot = get_model_matrix()
glPopMatrix()
def zoom_relative(self, clicks, sensitivity):
if self.ortho:
dist_d = clicks * sensitivity * 50.0
min_dist = self.min_ortho_dist
max_dist = self.max_ortho_dist
else:
dist_d = clicks * sensitivity
min_dist = self.min_dist
max_dist = self.max_dist
new_dist = (self._dist - dist_d)
if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist:
self._dist = new_dist
def mouse_translate(self, x, y, dx, dy):
glPushMatrix()
glLoadIdentity()
glTranslatef(0, 0, -self._dist)
z = model_to_screen(0, 0, 0)[2]
d = vec_subs(screen_to_model(x, y, z), screen_to_model(x - dx, y - dy, z))
glPopMatrix()
self._x += d[0]
self._y += d[1]
| bsd-3-clause |
dkodnik/Ant | addons/account_check_writing/account_voucher.py | 33 | 6716 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
from openerp.tools.amount_to_text_en import amount_to_text
from lxml import etree
class account_voucher(osv.osv):
_inherit = 'account.voucher'
def _make_journal_search(self, cr, uid, ttype, context=None):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('write_check',False) :
return journal_pool.search(cr, uid, [('allow_check_writing', '=', True)], limit=1)
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
_columns = {
'amount_in_word' : fields.char("Amount in Word" , size=128, readonly=True, states={'draft':[('readonly',False)]}),
'allow_check' : fields.related('journal_id', 'allow_check_writing', type='boolean', string='Allow Check Writing'),
'number': fields.char('Number', size=32),
}
def _amount_to_text(self, cr, uid, amount, currency_id, context=None):
# Currency complete name is not available in res.currency model
# Exceptions done here (EUR, USD, BRL) cover 75% of cases
# For other currencies, display the currency code
currency = self.pool['res.currency'].browse(cr, uid, currency_id, context=context)
if currency.name.upper() == 'EUR':
currency_name = 'Euro'
elif currency.name.upper() == 'USD':
currency_name = 'Dollars'
elif currency.name.upper() == 'BRL':
currency_name = 'reais'
else:
currency_name = currency.name
#TODO : generic amount_to_text is not ready yet, otherwise language (and country) and currency can be passed
#amount_in_word = amount_to_text(amount, context=context)
return amount_to_text(amount, currency=currency_name)
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
""" Inherited - add amount_in_word and allow_check_writting in returned value dictionary """
if not context:
context = {}
default = super(account_voucher, self).onchange_amount(cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=context)
if 'value' in default:
amount = 'amount' in default['value'] and default['value']['amount'] or amount
amount_in_word = self._amount_to_text(cr, uid, amount, currency_id, context=context)
default['value'].update({'amount_in_word':amount_in_word})
if journal_id:
allow_check_writing = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).allow_check_writing
default['value'].update({'allow_check':allow_check_writing})
return default
def print_check(self, cr, uid, ids, context=None):
if not ids:
return {}
check_layout_report = {
'top' : 'account.print.check.top',
'middle' : 'account.print.check.middle',
'bottom' : 'account.print.check.bottom',
}
check_layout = self.browse(cr, uid, ids[0], context=context).company_id.check_layout
return {
'type': 'ir.actions.report.xml',
'report_name':check_layout_report[check_layout],
'datas': {
'model':'account.voucher',
'id': ids and ids[0] or False,
'ids': ids and ids or [],
'report_type': 'pdf'
},
'nodestroy': True
}
def create(self, cr, uid, vals, context=None):
if vals.get('amount') and vals.get('journal_id') and 'amount_in_word' not in vals:
vals['amount_in_word'] = self._amount_to_text(cr, uid, vals['amount'], vals.get('currency_id') or \
self.pool['account.journal'].browse(cr, uid, vals['journal_id'], context=context).currency.id or \
self.pool['res.company'].browse(cr, uid, vals['company_id']).currency_id.id, context=context)
return super(account_voucher, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('amount') and vals.get('journal_id') and 'amount_in_word' not in vals:
vals['amount_in_word'] = self._amount_to_text(cr, uid, vals['amount'], vals.get('currency_id') or \
self.pool['account.journal'].browse(cr, uid, vals['journal_id'], context=context).currency.id or \
self.pool['res.company'].browse(cr, uid, vals['company_id']).currency_id.id, context=context)
return super(account_voucher, self).write(cr, uid, ids, vals, context=context)
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
"""
Add domain 'allow_check_writting = True' on journal_id field and remove 'widget = selection' on the same
field because the dynamic domain is not allowed on such widget
"""
if not context: context = {}
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='journal_id']")
if context.get('write_check', False) :
for node in nodes:
node.set('domain', "[('type', '=', 'bank'), ('allow_check_writing','=',True)]")
node.set('widget', '')
res['arch'] = etree.tostring(doc)
return res
account_voucher()
| agpl-3.0 |
Ali-aqrabawi/ezclinic | lib/django/db/migrations/operations/base.py | 127 | 4888 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
# Should this operation be considered safe to elide and optimize across?
elidable = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def reduce(self, operation, in_between, app_label=None):
"""
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
"""
if self.elidable:
return [operation]
elif operation.elidable:
return [self]
return False
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
| mit |
Orav/kbengine | kbe/res/scripts/common/Lib/test/test_unpack_ex.py | 5 | 4311 | # Tests for extended unpacking, starred expressions.
doctests = """
Unpack tuple
>>> t = (1, 2, 3)
>>> a, *b, c = t
>>> a == 1 and b == [2] and c == 3
True
Unpack list
>>> l = [4, 5, 6]
>>> a, *b = l
>>> a == 4 and b == [5, 6]
True
Unpack implied tuple
>>> *a, = 7, 8, 9
>>> a == [7, 8, 9]
True
Unpack string... fun!
>>> a, *b = 'one'
>>> a == 'o' and b == ['n', 'e']
True
Unpack long sequence
>>> a, b, c, *d, e, f, g = range(10)
>>> (a, b, c, d, e, f, g) == (0, 1, 2, [3, 4, 5, 6], 7, 8, 9)
True
Unpack short sequence
>>> a, *b, c = (1, 2)
>>> a == 1 and c == 2 and b == []
True
Unpack generic sequence
>>> class Seq:
... def __getitem__(self, i):
... if i >= 0 and i < 3: return i
... raise IndexError
...
>>> a, *b = Seq()
>>> a == 0 and b == [1, 2]
True
Unpack in for statement
>>> for a, *b, c in [(1,2,3), (4,5,6,7)]:
... print(a, b, c)
...
1 [2] 3
4 [5, 6] 7
Unpack in list
>>> [a, *b, c] = range(5)
>>> a == 0 and b == [1, 2, 3] and c == 4
True
Multiple targets
>>> a, *b, c = *d, e = range(5)
>>> a == 0 and b == [1, 2, 3] and c == 4 and d == [0, 1, 2, 3] and e == 4
True
Now for some failures
Unpacking non-sequence
>>> a, *b = 7
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
Unpacking sequence too short
>>> a, *b, c, d, e = Seq()
Traceback (most recent call last):
...
ValueError: need more than 3 values to unpack
Unpacking a sequence where the test for too long raises a different kind of
error
>>> class BozoError(Exception):
... pass
...
>>> class BadSeq:
... def __getitem__(self, i):
... if i >= 0 and i < 3:
... return i
... elif i == 3:
... raise BozoError
... else:
... raise IndexError
...
Trigger code while not expecting an IndexError (unpack sequence too long, wrong
error)
>>> a, *b, c, d, e = BadSeq()
Traceback (most recent call last):
...
test.test_unpack_ex.BozoError
Now some general starred expressions (all fail).
>>> a, *b, c, *d, e = range(10) # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: two starred expressions in assignment
>>> [*b, *c] = range(10) # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: two starred expressions in assignment
>>> *a = range(10) # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: starred assignment target must be in a list or tuple
>>> *a # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: can use starred expression only as assignment target
>>> *1 # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: can use starred expression only as assignment target
>>> x = *a # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: can use starred expression only as assignment target
Some size constraints (all fail.)
>>> s = ", ".join("a%d" % i for i in range(1<<8)) + ", *rest = range(1<<8 + 1)"
>>> compile(s, 'test', 'exec') # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: too many expressions in star-unpacking assignment
>>> s = ", ".join("a%d" % i for i in range(1<<8 + 1)) + ", *rest = range(1<<8 + 2)"
>>> compile(s, 'test', 'exec') # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SyntaxError: too many expressions in star-unpacking assignment
(there is an additional limit, on the number of expressions after the
'*rest', but it's 1<<24 and testing it takes too much memory.)
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=False):
import sys
from test import support
from test import test_unpack_ex
support.run_doctest(test_unpack_ex, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
cmjatai/cmj | cmj/cerimonial/models.py | 1 | 27858 |
from django.contrib.auth.models import Group
from django.db import models
from django.db.models.deletion import SET_NULL, PROTECT, CASCADE
from django.utils.translation import ugettext_lazy as _
from cmj.core.models import CmjModelMixin, Trecho, Distrito, RegiaoMunicipal,\
CmjAuditoriaModelMixin, CmjSearchMixin, AreaTrabalho, Bairro, Municipio
from cmj.utils import YES_NO_CHOICES, NONE_YES_NO_CHOICES,\
get_settings_auth_user_model
from sapl.parlamentares.models import Parlamentar, Partido
from sapl.utils import LISTA_DE_UFS
FEMININO = 'F'
MASCULINO = 'M'
SEXO_CHOICE = ((FEMININO, _('Feminino')),
(MASCULINO, _('Masculino')))
IMP_BAIXA = 'B'
IMP_MEDIA = 'M'
IMP_ALTA = 'A'
IMP_CRITICA = 'C'
IMPORTANCIA_CHOICE = (
(IMP_BAIXA, _('Baixa')),
(IMP_MEDIA, _('Média')),
(IMP_ALTA, _('Alta')),
(IMP_CRITICA, _('Crítica')),
)
class DescricaoAbstractModel(models.Model):
descricao = models.CharField(
default='', max_length=254, verbose_name=_('Nome / Descrição'))
class Meta:
abstract = True
ordering = ('descricao',)
def __str__(self):
return self.descricao
class TipoTelefone(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Tipo de Telefone')
verbose_name_plural = _('Tipos de Telefone')
class TipoEndereco(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Tipo de Endereço')
verbose_name_plural = _('Tipos de Endereço')
class TipoEmail(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Tipo de Email')
verbose_name_plural = _('Tipos de Email')
class Parentesco(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Parentesco')
verbose_name_plural = _('Parentescos')
class EstadoCivil(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Estado Civil')
verbose_name_plural = _('Estados Civis')
class PronomeTratamento(models.Model):
nome_por_extenso = models.CharField(
default='', max_length=254, verbose_name=_('Nome Por Extenso'))
abreviatura_singular_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Abreviatura Singular Masculino'))
abreviatura_singular_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Abreviatura Singular Feminino'))
abreviatura_plural_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Abreviatura Plural Masculino'))
abreviatura_plural_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Abreviatura Plural Feminino'))
vocativo_direto_singular_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Direto Singular Masculino'))
vocativo_direto_singular_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Direto Singular Feminino'))
vocativo_direto_plural_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Direto Plural Masculino'))
vocativo_direto_plural_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Direto Plural Feminino'))
vocativo_indireto_singular_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Indireto Singular Masculino'))
vocativo_indireto_singular_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Indireto Singular Feminino'))
vocativo_indireto_plural_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Indireto Plural Masculino'))
vocativo_indireto_plural_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Vocativo Indireto Plural Feminino'))
enderecamento_singular_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Endereçamento Singular Masculino'))
enderecamento_singular_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Endereçamento Singular Feminino'))
enderecamento_plural_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Endereçamento Plural Masculino'))
enderecamento_plural_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Endereçamento Plural Feminino'))
prefixo_nome_singular_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Prefixo Singular Masculino'))
prefixo_nome_singular_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Prefixo Singular Feminino'))
prefixo_nome_plural_m = models.CharField(
default='', max_length=254, verbose_name=_(
'Prefixo Plural Masculino'))
prefixo_nome_plural_f = models.CharField(
default='', max_length=254, verbose_name=_(
'Prefixo Plural Feminino'))
class Meta:
verbose_name = _('Pronome de Tratamento')
verbose_name_plural = _('Pronomes de tratamento')
def __str__(self):
return self.nome_por_extenso
class TipoAutoridade(DescricaoAbstractModel):
pronomes = models.ManyToManyField(
PronomeTratamento,
related_name='tipoautoridade_set')
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Tipo de Autoridade')
verbose_name_plural = _('Tipos de Autoridade')
class TipoLocalTrabalho(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Tipo do Local de Trabalho')
verbose_name_plural = _('Tipos de Local de Trabalho')
class NivelInstrucao(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Nível de Instrução')
verbose_name_plural = _('Níveis de Instrução')
class OperadoraTelefonia(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Operadora de Telefonia')
verbose_name_plural = _('Operadoras de Telefonia')
class Contato(CmjSearchMixin, CmjAuditoriaModelMixin):
nome = models.CharField(max_length=100, verbose_name=_('Nome'))
nome_social = models.CharField(
blank=True, default='', max_length=100, verbose_name=_('Nome Social'))
apelido = models.CharField(
blank=True, default='', max_length=100, verbose_name=_('Apelido'))
data_nascimento = models.DateField(
blank=True, null=True, verbose_name=_('Data de Nascimento'))
sexo = models.CharField(
max_length=1, blank=True,
verbose_name=_('Sexo Biológico'), choices=SEXO_CHOICE)
identidade_genero = models.CharField(
blank=True, default='',
max_length=100, verbose_name=_('Como se reconhece?'))
tem_filhos = models.NullBooleanField(
choices=NONE_YES_NO_CHOICES,
default=None, verbose_name=_('Tem Filhos?'))
quantos_filhos = models.PositiveSmallIntegerField(
default=0, blank=True, verbose_name=_('Quantos Filhos?'))
estado_civil = models.ForeignKey(
EstadoCivil,
related_name='contato_set',
blank=True, null=True, on_delete=SET_NULL,
verbose_name=_('Estado Civil'))
nivel_instrucao = models.ForeignKey(
NivelInstrucao,
related_name='contato_set',
blank=True, null=True, on_delete=SET_NULL,
verbose_name=_('Nivel de Instrução'))
naturalidade = models.CharField(
max_length=50, blank=True, verbose_name=_('Naturalidade'))
nome_pai = models.CharField(
max_length=100, blank=True, verbose_name=_('Nome do Pai'))
nome_mae = models.CharField(
max_length=100, blank=True, verbose_name=_('Nome da Mãe'))
numero_sus = models.CharField(
max_length=100, blank=True, verbose_name=_('Número do SUS'))
cpf = models.CharField(max_length=15, blank=True, verbose_name=_('CPF'))
titulo_eleitor = models.CharField(
max_length=15,
blank=True,
verbose_name=_('Título de Eleitor'))
rg = models.CharField(max_length=30, blank=True, verbose_name=_('RG'))
rg_orgao_expedidor = models.CharField(
max_length=20, blank=True, verbose_name=_('Órgão Expedidor'))
rg_data_expedicao = models.DateField(
blank=True, null=True, verbose_name=_('Data de Expedição'))
ativo = models.BooleanField(choices=YES_NO_CHOICES,
default=True, verbose_name=_('Ativo?'))
workspace = models.ForeignKey(
AreaTrabalho,
verbose_name=_('Área de Trabalho'),
related_name='contato_set',
blank=True, null=True, on_delete=PROTECT)
perfil_user = models.ForeignKey(
get_settings_auth_user_model(),
verbose_name=_('Perfil do Usuário'),
related_name='contato_set',
blank=True, null=True, on_delete=CASCADE)
profissao = models.CharField(
max_length=254, blank=True, verbose_name=_('Profissão'))
tipo_autoridade = models.ForeignKey(
TipoAutoridade,
verbose_name=TipoAutoridade._meta.verbose_name,
related_name='contato_set',
blank=True, null=True, on_delete=SET_NULL)
cargo = models.CharField(max_length=254, blank=True, default='',
verbose_name=_('Cargo/Função'))
pronome_tratamento = models.ForeignKey(
PronomeTratamento,
verbose_name=PronomeTratamento._meta.verbose_name,
related_name='contato_set',
blank=True, null=True, on_delete=SET_NULL,
help_text=_('O pronome de tratamento é opcional, mas será \
obrigatório caso seja selecionado um tipo de autoridade.'))
observacoes = models.TextField(
blank=True, default='',
verbose_name=_('Outros observações sobre o Contato'))
@property
def fields_search(self):
return ['nome',
'nome_social',
'apelido']
class Meta:
verbose_name = _('Contato')
verbose_name_plural = _('Contatos')
ordering = ['nome']
permissions = (
('print_impressoenderecamento',
_('Pode Imprimir Impressos de Endereçamento')),
('print_rel_contato_agrupado_por_processo',
_('Pode Imprimir Relatório de Contatos Agrupados por Processo')),
('print_rel_contato_agrupado_por_grupo',
_('Pode Imprimir Relatório de Contatos Agrupados '
'Grupos de Contato')),
)
unique_together = (
('nome', 'data_nascimento', 'workspace', 'perfil_user'),)
def __str__(self):
return self.nome
class PerfilManager(models.Manager):
def for_user(self, user):
return super(
PerfilManager, self).get_queryset().get(
perfil_user=user)
class Perfil(Contato):
objects = PerfilManager()
class Meta:
proxy = True
class Telefone(CmjAuditoriaModelMixin):
contato = models.ForeignKey(
Contato, on_delete=CASCADE,
verbose_name=_('Contato'),
related_name="telefone_set")
operadora = models.ForeignKey(
OperadoraTelefonia, on_delete=SET_NULL,
related_name='telefone_set',
blank=True, null=True,
verbose_name=OperadoraTelefonia._meta.verbose_name)
tipo = models.ForeignKey(
TipoTelefone,
blank=True, null=True,
on_delete=SET_NULL,
related_name='telefone_set',
verbose_name='Tipo')
telefone = models.CharField(max_length=100,
verbose_name='Número do Telefone')
proprio = models.NullBooleanField(
choices=NONE_YES_NO_CHOICES,
blank=True, null=True, verbose_name=_('Próprio?'))
de_quem_e = models.CharField(
max_length=40, verbose_name='De quem é?', blank=True,
help_text=_('Se não é próprio, de quem é?'))
preferencial = models.BooleanField(
choices=YES_NO_CHOICES,
default=True, verbose_name=_('Preferêncial?'))
permissao = models.BooleanField(
choices=YES_NO_CHOICES,
default=True, verbose_name=_('Permissão:'),
help_text=_("Permite que nossa instituição entre em contato \
com você neste telefone?"))
@property
def numero_nome_contato(self):
return str(self)
class Meta:
verbose_name = _('Telefone')
verbose_name_plural = _('Telefones')
def __str__(self):
return self.telefone
class TelefonePerfil(Telefone):
class Meta:
proxy = True
verbose_name = _('Telefone do Perfil')
verbose_name_plural = _('Telefones do Perfil')
class Email(CmjAuditoriaModelMixin):
contato = models.ForeignKey(
Contato, on_delete=CASCADE,
verbose_name=_('Contato'),
related_name="email_set")
tipo = models.ForeignKey(
TipoEmail,
blank=True, null=True,
on_delete=SET_NULL,
related_name='email_set',
verbose_name='Tipo')
email = models.EmailField(verbose_name='Email')
preferencial = models.BooleanField(
choices=YES_NO_CHOICES,
default=True, verbose_name=_('Preferêncial?'))
permissao = models.BooleanField(
choices=YES_NO_CHOICES,
default=True, verbose_name=_('Permissão:'),
help_text=_("Permite que nossa instituição envie informações \
para este email?"))
class Meta:
verbose_name = _('Email')
verbose_name_plural = _("Email's")
def __str__(self):
return self.email
class EmailPerfil(Email):
class Meta:
proxy = True
verbose_name = _('Email do Perfil')
verbose_name_plural = _("Email's do Perfil")
class Dependente(CmjAuditoriaModelMixin):
parentesco = models.ForeignKey(Parentesco,
on_delete=PROTECT,
related_name='+',
verbose_name=_('Parentesco'))
contato = models.ForeignKey(Contato,
verbose_name=_('Contato'),
related_name='dependente_set',
on_delete=CASCADE)
nome = models.CharField(max_length=100, verbose_name=_('Nome'))
nome_social = models.CharField(
blank=True, default='', max_length=100, verbose_name=_('Nome Social'))
apelido = models.CharField(
blank=True, default='', max_length=100, verbose_name=_('Apelido'))
sexo = models.CharField(
blank=True, max_length=1, verbose_name=_('Sexo Biológico'),
choices=SEXO_CHOICE)
data_nascimento = models.DateField(
blank=True, null=True, verbose_name=_('Data Nascimento'))
identidade_genero = models.CharField(
blank=True, default='',
max_length=100, verbose_name=_('Como se reconhece?'))
nivel_instrucao = models.ForeignKey(
NivelInstrucao,
related_name='dependente_set',
blank=True, null=True, on_delete=SET_NULL,
verbose_name=_('Nivel de Instrução'))
class Meta:
verbose_name = _('Dependente')
verbose_name_plural = _('Dependentes')
def __str__(self):
return self.nome
class DependentePerfil(Dependente):
class Meta:
proxy = True
verbose_name = _('Dependente do Perfil')
verbose_name_plural = _('Dependentes do Perfil')
class LocalTrabalho(CmjAuditoriaModelMixin):
contato = models.ForeignKey(Contato,
verbose_name=_('Contato'),
related_name='localtrabalho_set',
on_delete=CASCADE)
nome = models.CharField(
max_length=254, verbose_name=_('Nome / Razão Social'))
nome_social = models.CharField(
blank=True, default='', max_length=254,
verbose_name=_('Nome Fantasia'))
tipo = models.ForeignKey(
TipoLocalTrabalho,
related_name='localtrabalho_set',
blank=True, null=True, on_delete=SET_NULL,
verbose_name=_('Tipo do Local de Trabalho'))
trecho = models.ForeignKey(
Trecho,
verbose_name=_('Trecho'),
related_name='localtrabalho_set',
blank=True, null=True, on_delete=SET_NULL)
uf = models.CharField(max_length=2, blank=True, choices=LISTA_DE_UFS,
verbose_name=_('Estado'))
municipio = models.ForeignKey(
Municipio,
verbose_name=Municipio._meta.verbose_name,
related_name='localtrabalho_set',
blank=True, null=True, on_delete=SET_NULL)
cep = models.CharField(max_length=9, blank=True, default='',
verbose_name=_('CEP'))
endereco = models.CharField(
max_length=254, blank=True, default='',
verbose_name=_('Endereço'),
help_text=_('O campo endereço também é um campo de busca. Nele '
'você pode digitar qualquer informação, inclusive '
'digitar o cep para localizar o endereço, e vice-versa!'))
numero = models.CharField(max_length=50, blank=True, default='',
verbose_name=_('Número'))
bairro = models.ForeignKey(
Bairro,
verbose_name=Bairro._meta.verbose_name,
related_name='localtrabalho_set',
blank=True, null=True, on_delete=SET_NULL)
distrito = models.ForeignKey(
Distrito,
verbose_name=Distrito._meta.verbose_name,
related_name='localtrabalho_set',
blank=True, null=True, on_delete=SET_NULL)
regiao_municipal = models.ForeignKey(
RegiaoMunicipal,
verbose_name=RegiaoMunicipal._meta.verbose_name,
related_name='localtrabalho_set',
blank=True, null=True, on_delete=SET_NULL)
complemento = models.CharField(max_length=30, blank=True, default='',
verbose_name=_('Complemento'))
data_inicio = models.DateField(
blank=True, null=True, verbose_name=_('Data de Início'))
data_fim = models.DateField(
blank=True, null=True, verbose_name=_('Data de Fim'))
preferencial = models.BooleanField(
choices=YES_NO_CHOICES,
default=True, verbose_name=_('Preferencial?'))
cargo = models.CharField(
max_length=254, blank=True, default='',
verbose_name=_('Cargo/Função'),
help_text=_('Ao definir um cargo e função aqui, o '
'Cargo/Função preenchido na aba "Dados Básicos", '
'será desconsiderado ao gerar impressos!'))
class Meta:
verbose_name = _('Local de Trabalho')
verbose_name_plural = _('Locais de Trabalho')
def __str__(self):
return self.nome
class LocalTrabalhoPerfil(LocalTrabalho):
class Meta:
proxy = True
verbose_name = _('Local de Trabalho do Perfil')
verbose_name_plural = _('Locais de Trabalho do Perfil')
class Endereco(CmjAuditoriaModelMixin):
contato = models.ForeignKey(Contato,
verbose_name=_('Contato'),
related_name='endereco_set',
on_delete=CASCADE)
tipo = models.ForeignKey(
TipoEndereco,
related_name='endereco_set',
blank=True, null=True, on_delete=SET_NULL,
verbose_name=_('Tipo do Endereço'))
trecho = models.ForeignKey(
Trecho,
verbose_name=_('Trecho'),
related_name='endereco_set',
blank=True, null=True, on_delete=SET_NULL)
uf = models.CharField(max_length=2, blank=True, choices=LISTA_DE_UFS,
verbose_name=_('Estado'))
municipio = models.ForeignKey(
Municipio,
verbose_name=_('Município'),
related_name='endereco_set',
blank=True, null=True, on_delete=SET_NULL)
cep = models.CharField(max_length=9, blank=True, default='',
verbose_name=_('CEP'))
endereco = models.CharField(
max_length=254, blank=True, default='',
verbose_name=_('Endereço'),
help_text=_('O campo endereço também é um campo de busca, nele '
'você pode digitar qualquer informação, inclusive '
'digitar o cep para localizar o endereço, e vice-versa!'))
numero = models.CharField(max_length=50, blank=True, default='',
verbose_name=_('Número'))
bairro = models.ForeignKey(
Bairro,
verbose_name=Bairro._meta.verbose_name,
related_name='endereco_set',
blank=True, null=True, on_delete=SET_NULL)
distrito = models.ForeignKey(
Distrito,
verbose_name=Distrito._meta.verbose_name,
related_name='endereco_set',
blank=True, null=True, on_delete=SET_NULL)
regiao_municipal = models.ForeignKey(
RegiaoMunicipal,
verbose_name=RegiaoMunicipal._meta.verbose_name,
related_name='endereco_set',
blank=True, null=True, on_delete=SET_NULL)
complemento = models.CharField(max_length=254, blank=True, default='',
verbose_name=_('Complemento'))
ponto_referencia = models.CharField(max_length=254, blank=True, default='',
verbose_name=_('Pontos de Referência'))
observacoes = models.TextField(
blank=True, default='',
verbose_name=_('Outros observações sobre o Endereço'))
preferencial = models.BooleanField(
choices=YES_NO_CHOICES,
default=True, verbose_name=_('Preferencial?'))
"""help_text=_('Correspondências automáticas serão geradas sempre '
'para os endereços preferenciais.')"""
class Meta:
verbose_name = _('Endereço')
verbose_name_plural = _('Endereços')
def __str__(self):
numero = (' - ' + self.numero) if self.numero else ''
return self.endereco + numero
class EnderecoPerfil(Endereco):
class Meta:
proxy = True
verbose_name = _('Endereço do Perfil')
verbose_name_plural = _('Endereços do Perfil')
class FiliacaoPartidaria(CmjAuditoriaModelMixin):
contato = models.ForeignKey(Contato,
verbose_name=_('Contato'),
related_name='filiacaopartidaria_set',
on_delete=CASCADE)
data = models.DateField(verbose_name=_('Data de Filiação'))
partido = models.ForeignKey(Partido,
related_name='filiacaopartidaria_set',
verbose_name=Partido._meta.verbose_name,
on_delete=PROTECT)
data_desfiliacao = models.DateField(
blank=True, null=True, verbose_name=_('Data de Desfiliação'))
@property
def contato_nome(self):
return str(self.contato)
class Meta:
verbose_name = _('Filiação Partidária')
verbose_name_plural = _('Filiações Partidárias')
def __str__(self):
return str(self.partido)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# PROCESSOS
# -----------------------------------------------------------------
# -----------------------------------------------------------------
class StatusProcesso(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Status de Processo')
verbose_name_plural = _('Status de Processos')
class ClassificacaoProcesso(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Classificacao de Processo')
verbose_name_plural = _('Classificações de Processos')
class TopicoProcesso(DescricaoAbstractModel):
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Tópico de Processo')
verbose_name_plural = _('Tópicos de Processos')
class AssuntoProcesso(DescricaoAbstractModel, CmjAuditoriaModelMixin):
workspace = models.ForeignKey(
AreaTrabalho,
verbose_name=_('Área de Trabalho'),
related_name='assuntoprocesso_set',
on_delete=PROTECT)
class Meta(DescricaoAbstractModel.Meta):
verbose_name = _('Assunto de Processo')
verbose_name_plural = _('Assuntos de Processos')
class Processo(CmjSearchMixin, CmjAuditoriaModelMixin):
titulo = models.CharField(max_length=9999, verbose_name=_('Título'))
data = models.DateField(verbose_name=_('Data de Abertura'))
descricao = models.TextField(
blank=True, default='',
verbose_name=_('Descrição do Processo'))
observacoes = models.TextField(
blank=True, default='',
verbose_name=_('Outras observações sobre o Processo'))
solucao = models.TextField(
blank=True, default='',
verbose_name=_('Solução do Processo'))
contatos = models.ManyToManyField(Contato,
blank=True,
verbose_name=_(
'Contatos Interessados no Processo'),
related_name='processo_set',)
status = models.ForeignKey(StatusProcesso,
blank=True, null=True,
verbose_name=_('Status do Processo'),
related_name='processo_set',
on_delete=SET_NULL)
importancia = models.CharField(
max_length=1, blank=True,
verbose_name=_('Importância'), choices=IMPORTANCIA_CHOICE)
topicos = models.ManyToManyField(
TopicoProcesso, blank=True,
related_name='processo_set',
verbose_name=_('Tópicos'))
classificacoes = models.ManyToManyField(
ClassificacaoProcesso, blank=True,
related_name='processo_set',
verbose_name=_('Classificações'),)
assuntos = models.ManyToManyField(
AssuntoProcesso, blank=True,
related_name='processo_set',
verbose_name=_('Assuntos'),)
workspace = models.ForeignKey(
AreaTrabalho,
verbose_name=_('Área de Trabalho'),
related_name='processo_set',
on_delete=PROTECT)
class Meta:
verbose_name = _('Processo')
verbose_name_plural = _('Processos')
ordering = ('titulo', )
def __str__(self):
return str(self.titulo)
@property
def fields_search(self):
return ['titulo',
'observacoes',
'descricao']
class ProcessoContato(Processo):
class Meta:
proxy = True
verbose_name = _('Processo')
verbose_name_plural = _('Processos')
class GrupoDeContatos(CmjAuditoriaModelMixin):
nome = models.CharField(max_length=100,
verbose_name=_('Nome do Grupo'))
contatos = models.ManyToManyField(Contato,
blank=True,
verbose_name=_(
'Contatos do Grupo'),
related_name='grupodecontatos_set',)
workspace = models.ForeignKey(
AreaTrabalho,
verbose_name=_('Área de Trabalho'),
related_name='grupodecontatos_set',
on_delete=PROTECT)
class Meta:
verbose_name = _('Grupo de Contatos')
verbose_name_plural = _('Grupos de Contatos')
ordering = ('nome', )
def __str__(self):
return str(self.nome)
| gpl-3.0 |
rajalokan/nova | nova/tests/functional/notification_sample_tests/test_instance.py | 1 | 34922 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from nova import context
from nova import exception
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestInstanceNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def setUp(self):
self.flags(use_neutron=True)
super(TestInstanceNotificationSample, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
self.cinder = fixtures.CinderFixture(self)
self.useFixture(self.cinder)
def _wait_until_swap_volume(self, server, volume_id):
for i in range(50):
volume_attachments = self.api.get_server_volumes(server['id'])
if len(volume_attachments) > 0:
for volume_attachment in volume_attachments:
if volume_attachment['volumeId'] == volume_id:
return
time.sleep(0.5)
self.fail('Volume swap operation failed.')
def _wait_until_swap_volume_error(self):
for i in range(50):
if self.cinder.swap_error:
return
time.sleep(0.5)
self.fail("Timed out waiting for volume swap error to occur.")
def test_instance_action(self):
# A single test case is used to test most of the instance action
# notifications to avoid booting up an instance for every action
# separately.
# Every instance action test function shall make sure that after the
# function the instance is in active state and usable by other actions.
# Therefore some action especially delete cannot be used here as
# recovering from that action would mean to recreate the instance and
# that would go against the whole purpose of this optimization
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
actions = [
self._test_power_off_on_server,
self._test_restore_server,
self._test_suspend_resume_server,
self._test_pause_unpause_server,
self._test_shelve_server,
self._test_shelve_offload_server,
self._test_unshelve_server,
self._test_resize_server,
self._test_revert_server,
self._test_resize_confirm_server,
self._test_snapshot_server,
self._test_rebuild_server,
self._test_reboot_server,
self._test_reboot_server_error,
self._test_trigger_crash_dump,
self._test_volume_attach_detach_server,
self._test_rescue_server,
self._test_unrescue_server,
self._test_soft_delete_server,
self._test_attach_volume_error,
]
for action in actions:
fake_notifier.reset()
action(server)
# Ensure that instance is in active state after an action
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
def test_create_delete_server(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
self.assertEqual(6, len(fake_notifier.VERSIONED_NOTIFICATIONS))
# This list needs to be in order.
expected_notifications = [
'instance-create-start',
'instance-create-end',
'instance-delete-start',
'instance-shutdown-start',
'instance-shutdown-end',
'instance-delete-end'
]
for idx, notification in enumerate(expected_notifications):
self._verify_notification(
notification,
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[idx])
@mock.patch('nova.compute.manager.ComputeManager._build_resources')
def test_create_server_error(self, mock_build):
def _build_resources(*args, **kwargs):
raise exception.FlavorDiskTooSmall()
mock_build.side_effect = _build_resources
server = self._boot_a_server(
expected_status='ERROR',
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-create-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-create-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def _verify_instance_update_steps(self, steps, notifications,
initial=None):
replacements = {}
if initial:
replacements = initial
for i, step in enumerate(steps):
replacements.update(step)
self._verify_notification(
'instance-update',
replacements=replacements,
actual=notifications[i])
return replacements
def test_create_delete_server_with_instance_update(self):
# This makes server network creation synchronous which is necessary
# for notification samples that expect instance.info_cache.network_info
# to be set.
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.flags(notify_on_state_change='vm_and_task_state',
group='notifications')
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
instance_updates = self._wait_for_notifications('instance.update', 7)
# The first notification comes from the nova-conductor the
# rest is from the nova-compute. To keep the test simpler
# assert this fact and then modify the publisher_id of the
# first notification to match the template
self.assertEqual('conductor:fake-mini',
instance_updates[0]['publisher_id'])
instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini'
create_steps = [
# nothing -> scheduling
{'reservation_id': server['reservation_id'],
'uuid': server['id'],
'host': None,
'node': None,
'state_update.new_task_state': 'scheduling',
'state_update.old_task_state': 'scheduling',
'state_update.state': 'building',
'state_update.old_state': 'building',
'state': 'building'},
# scheduling -> building
{
'state_update.new_task_state': None,
'state_update.old_task_state': 'scheduling',
'task_state': None},
# scheduled
{'host': 'compute',
'node': 'fake-mini',
'state_update.old_task_state': None},
# building -> networking
{'state_update.new_task_state': 'networking',
'state_update.old_task_state': 'networking',
'task_state': 'networking'},
# networking -> block_device_mapping
{'state_update.new_task_state': 'block_device_mapping',
'state_update.old_task_state': 'networking',
'task_state': 'block_device_mapping',
'ip_addresses': [{
"nova_object.name": "IpPayload",
"nova_object.namespace": "nova",
"nova_object.version": "1.0",
"nova_object.data": {
"mac": "fa:16:3e:4c:2c:30",
"address": "192.168.1.3",
"port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
"meta": {},
"version": 4,
"label": "private-network",
"device_name": "tapce531f90-19"
}}]
},
# block_device_mapping -> spawning
{'state_update.new_task_state': 'spawning',
'state_update.old_task_state': 'block_device_mapping',
'task_state': 'spawning',
},
# spawning -> active
{'state_update.new_task_state': None,
'state_update.old_task_state': 'spawning',
'state_update.state': 'active',
'launched_at': '2012-10-29T13:42:11Z',
'state': 'active',
'task_state': None,
'power_state': 'running'},
]
replacements = self._verify_instance_update_steps(
create_steps, instance_updates)
fake_notifier.reset()
# Let's generate some bandwidth usage data.
# Just call the periodic task directly for simplicity
self.compute.manager._poll_bandwidth_usage(context.get_admin_context())
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
instance_updates = self._get_notifications('instance.update')
self.assertEqual(2, len(instance_updates))
delete_steps = [
# active -> deleting
{'state_update.new_task_state': 'deleting',
'state_update.old_task_state': 'deleting',
'state_update.old_state': 'active',
'state': 'active',
'task_state': 'deleting',
'bandwidth': [
{'nova_object.namespace': 'nova',
'nova_object.name': 'BandwidthPayload',
'nova_object.data':
{'network_name': 'private-network',
'out_bytes': 0,
'in_bytes': 0},
'nova_object.version': '1.0'}],
'tags': ["tag1"]
},
# deleting -> deleted
{'state_update.new_task_state': None,
'state_update.old_task_state': 'deleting',
'state_update.old_state': 'active',
'state_update.state': 'deleted',
'state': 'deleted',
'task_state': None,
'terminated_at': '2012-10-29T13:42:11Z',
'ip_addresses': [],
'power_state': 'pending',
'bandwidth': [],
'tags': ["tag1"]
},
]
self._verify_instance_update_steps(delete_steps, instance_updates,
initial=replacements)
def _test_power_off_on_server(self, server):
self.api.post_server_action(server['id'], {'os-stop': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHUTOFF')
self.api.post_server_action(server['id'], {'os-start': {}})
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-power_off-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-power_off-end',
replacements={
'reservation_id': server['reservation_id'],
'power_state': 'running',
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self._verify_notification(
'instance-power_on-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'instance-power_on-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
def _test_shelve_server(self, server):
self.flags(shelved_offload_time = -1)
self.api.post_server_action(server['id'], {'shelve': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHELVED')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-shelve-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-shelve-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
post = {'unshelve': None}
self.api.post_server_action(server['id'], post)
def _test_shelve_offload_server(self, server):
self.flags(shelved_offload_time=-1)
self.api.post_server_action(server['id'], {'shelve': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHELVED')
self.api.post_server_action(server['id'], {'shelveOffload': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHELVED_OFFLOADED')
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-shelve-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-shelve-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self._verify_notification(
'instance-shelve_offload-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'instance-shelve_offload-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
self.api.post_server_action(server['id'], {'unshelve': None})
def _test_unshelve_server(self, server):
# setting the shelved_offload_time to 0 should set the
# instance status to 'SHELVED_OFFLOADED'
self.flags(shelved_offload_time = 0)
self.api.post_server_action(server['id'], {'shelve': {}})
self._wait_for_state_change(self.api, server,
expected_status='SHELVED_OFFLOADED')
post = {'unshelve': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
self.assertEqual(6, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-unshelve-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[4])
self._verify_notification(
'instance-unshelve-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[5])
def _test_suspend_resume_server(self, server):
post = {'suspend': {}}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'SUSPENDED')
post = {'resume': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Four versioned notification are generated.
# 0. instance-suspend-start
# 1. instance-suspend-end
# 2. instance-resume-start
# 3. instance-resume-end
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-suspend-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-suspend-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self._verify_notification(
'instance-resume-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'instance-resume-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
self.flags(reclaim_instance_interval=0)
def _test_pause_unpause_server(self, server):
self.api.post_server_action(server['id'], {'pause': {}})
self._wait_for_state_change(self.api, server, 'PAUSED')
self.api.post_server_action(server['id'], {'unpause': {}})
self._wait_for_state_change(self.api, server, 'ACTIVE')
# Four versioned notifications are generated
# 0. instance-pause-start
# 1. instance-pause-end
# 2. instance-unpause-start
# 3. instance-unpause-end
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-pause-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-pause-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self._verify_notification(
'instance-unpause-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
self._verify_notification(
'instance-unpause-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
def _test_resize_server(self, server):
self.flags(allow_resize_to_same_host=True)
other_flavor_body = {
'flavor': {
'name': 'other_flavor',
'ram': 256,
'vcpus': 1,
'disk': 1,
'id': 'd5a8bb54-365a-45ae-abdb-38d249df7845'
}
}
other_flavor_id = self.api.post_flavor(other_flavor_body)['id']
extra_specs = {
"extra_specs": {
"hw:watchdog_action": "reset"}}
self.admin_api.post_extra_spec(other_flavor_id, extra_specs)
# Ignore the create flavor notification
fake_notifier.reset()
post = {
'resize': {
'flavorRef': other_flavor_id
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
# This list needs to be in order.
expected_notifications = [
'instance-resize-start',
'instance-resize-end',
'instance-resize_finish-start',
'instance-resize_finish-end'
]
for idx, notification in enumerate(expected_notifications):
self._verify_notification(
notification,
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[idx])
post = {'revertResize': None}
self.api.post_server_action(server['id'], post)
def _test_snapshot_server(self, server):
post = {'createImage': {'name': 'test-snap'}}
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.snapshot.end')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-snapshot-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-snapshot-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def _test_rebuild_server(self, server):
post = {
'rebuild': {
'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
'metadata': {}
}
}
self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state
# server state need to be changed to REBUILD state
self._wait_for_state_change(self.api, server,
expected_status='REBUILD')
self._wait_for_state_change(self.api, server,
expected_status='ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-rebuild-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-rebuild-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_rebuild_instance_with_claim')
def test_rebuild_server_exc(self, mock_rebuild):
def _compute_resources_unavailable(*args, **kwargs):
raise exception.ComputeResourcesUnavailable(
reason="fake-resource")
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
fake_notifier.reset()
post = {
'rebuild': {
'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
'metadata': {}
}
}
self.api.post_server_action(server['id'], post)
mock_rebuild.side_effect = _compute_resources_unavailable
self._wait_for_state_change(self.api, server, expected_status='ERROR')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-rebuild-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
def _test_restore_server(self, server):
self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id'])
self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
self.api.post_server_action(server['id'], {'restore': {}})
self._wait_for_state_change(self.api, server, 'ACTIVE')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-restore-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-restore-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
self.flags(reclaim_instance_interval=0)
def _test_reboot_server(self, server):
post = {'reboot': {'type': 'HARD'}}
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.reboot.start')
self._wait_for_notification('instance.reboot.end')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-reboot-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-reboot-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
@mock.patch('nova.virt.fake.SmallFakeDriver.reboot')
def _test_reboot_server_error(self, server, mock_reboot):
def _hard_reboot(*args, **kwargs):
raise exception.UnsupportedVirtType(virt="FakeVirt")
mock_reboot.side_effect = _hard_reboot
post = {'reboot': {'type': 'HARD'}}
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.reboot.start')
self._wait_for_notification('instance.reboot.error')
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-reboot-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-reboot-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def _attach_volume_to_server(self, server, volume_id):
self.api.post_server_volume(
server['id'], {"volumeAttachment": {"volumeId": volume_id}})
self._wait_for_notification('instance.volume_attach.end')
def _detach_volume_from_server(self, server, volume_id):
self.api.delete_server_volume(server['id'], volume_id)
self._wait_for_notification('instance.volume_detach.end')
def _volume_swap_server(self, server, attachement_id, volume_id):
self.api.put_server_volume(server['id'], attachement_id, volume_id)
def test_volume_swap_server(self):
server = self._boot_a_server(
extra_params={'networks':
[{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.cinder.swap_volume_instance_uuid = server['id']
self._volume_swap_server(server, self.cinder.SWAP_OLD_VOL,
self.cinder.SWAP_NEW_VOL)
self._wait_until_swap_volume(server, self.cinder.SWAP_NEW_VOL)
self.assertEqual(6, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-volume_swap-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[4])
self._verify_notification(
'instance-volume_swap-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[5])
def test_volume_swap_server_with_error(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_ERR_OLD_VOL)
self.cinder.swap_volume_instance_error_uuid = server['id']
self._volume_swap_server(server, self.cinder.SWAP_ERR_OLD_VOL,
self.cinder.SWAP_ERR_NEW_VOL)
self._wait_until_swap_volume_error()
# Seven versioned notifications are generated. We only rely on the
# first six because _wait_until_swap_volume_error will return True
# after volume_api.unreserve is called on the cinder fixture, and that
# happens before the instance fault is handled in the compute manager
# which generates the last notification (compute.exception).
# 0. instance-create-start
# 1. instance-create-end
# 2. instance-volume_attach-start
# 3. instance-volume_attach-end
# 4. instance-volume_swap-start
# 5. instance-volume_swap-error
# 6. compute.exception
self.assertTrue(len(fake_notifier.VERSIONED_NOTIFICATIONS) >= 6,
'Unexpected number of versioned notifications. '
'Expected at least 6, got: %s' %
len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-volume_swap-start',
replacements={
'new_volume_id': self.cinder.SWAP_ERR_NEW_VOL,
'old_volume_id': self.cinder.SWAP_ERR_OLD_VOL,
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[4])
self._verify_notification(
'instance-volume_swap-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[5])
def _test_revert_server(self, server):
pass
def _test_resize_confirm_server(self, server):
pass
def _test_trigger_crash_dump(self, server):
pass
def _test_volume_attach_detach_server(self, server):
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
# 0. volume_attach-start
# 1. volume_attach-end
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-volume_attach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-volume_attach-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
fake_notifier.reset()
self._detach_volume_from_server(server, self.cinder.SWAP_OLD_VOL)
# 0. volume_detach-start
# 1. volume_detach-end
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-volume_detach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-volume_detach-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def _test_rescue_server(self, server):
pass
def _test_unrescue_server(self, server):
pass
def _test_soft_delete_server(self, server):
pass
@mock.patch('nova.volume.cinder.API.attach')
def _test_attach_volume_error(self, server, mock_attach):
def attach_volume(*args, **kwargs):
raise exception.CinderConnectionFailed(
reason="Connection timed out")
mock_attach.side_effect = attach_volume
post = {"volumeAttachment": {"volumeId": self.cinder.SWAP_OLD_VOL}}
self.api.post_server_volume(server['id'], post)
self._wait_for_notification('instance.volume_attach.error')
# 0. volume_attach-start
# 1. volume_attach-error
# 2. compute.exception
# We only rely on the first 2 notifications, in this case we don't
# care about the exception notification.
self.assertLessEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'instance-volume_attach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
self._verify_notification(
'instance-volume_attach-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
| apache-2.0 |
TiVo/kafka | tests/kafkatest/services/security/minikdc.py | 8 | 6343 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import uuid
from io import open
from os import remove, close
from shutil import move
from tempfile import mkstemp
from ducktape.services.service import Service
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin, CORE_LIBS_JAR_NAME, CORE_DEPENDANT_TEST_LIBS_JAR_NAME
from kafkatest.version import DEV_BRANCH
class MiniKdc(KafkaPathResolverMixin, Service):
logs = {
"minikdc_log": {
"path": "/mnt/minikdc/minikdc.log",
"collect_default": True}
}
WORK_DIR = "/mnt/minikdc"
PROPS_FILE = "/mnt/minikdc/minikdc.properties"
KEYTAB_FILE = "/mnt/minikdc/keytab"
KRB5CONF_FILE = "/mnt/minikdc/krb5.conf"
LOG_FILE = "/mnt/minikdc/minikdc.log"
LOCAL_KEYTAB_FILE = None
LOCAL_KRB5CONF_FILE = None
@staticmethod
def _set_local_keytab_file(local_scratch_dir):
"""Set MiniKdc.LOCAL_KEYTAB_FILE exactly once per test.
LOCAL_KEYTAB_FILE is currently used like a global variable to provide a mechanism to share the
location of the local keytab file among all services which might need it.
Since individual ducktape tests are each run in a subprocess forked from the ducktape main process,
class variables set at class load time are duplicated between test processes. This leads to collisions
if test subprocesses are run in parallel, so we defer setting these class variables until after the test itself
begins to run.
"""
if MiniKdc.LOCAL_KEYTAB_FILE is None:
MiniKdc.LOCAL_KEYTAB_FILE = os.path.join(local_scratch_dir, "keytab")
return MiniKdc.LOCAL_KEYTAB_FILE
@staticmethod
def _set_local_krb5conf_file(local_scratch_dir):
"""Set MiniKdc.LOCAL_KRB5CONF_FILE exactly once per test.
See _set_local_keytab_file for details why we do this.
"""
if MiniKdc.LOCAL_KRB5CONF_FILE is None:
MiniKdc.LOCAL_KRB5CONF_FILE = os.path.join(local_scratch_dir, "krb5conf")
return MiniKdc.LOCAL_KRB5CONF_FILE
def __init__(self, context, kafka_nodes, extra_principals=""):
super(MiniKdc, self).__init__(context, 1)
self.kafka_nodes = kafka_nodes
self.extra_principals = extra_principals
# context.local_scratch_dir uses a ducktape feature:
# each test_context object has a unique local scratch directory which is available for the duration of the test
# which is automatically garbage collected after the test finishes
MiniKdc._set_local_keytab_file(context.local_scratch_dir)
MiniKdc._set_local_krb5conf_file(context.local_scratch_dir)
def replace_in_file(self, file_path, pattern, subst):
fh, abs_path = mkstemp()
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
close(fh)
remove(file_path)
move(abs_path, file_path)
def start_node(self, node):
node.account.ssh("mkdir -p %s" % MiniKdc.WORK_DIR, allow_fail=False)
props_file = self.render('minikdc.properties', node=node)
node.account.create_file(MiniKdc.PROPS_FILE, props_file)
self.logger.info("minikdc.properties")
self.logger.info(props_file)
kafka_principals = ' '.join(['kafka/' + kafka_node.account.hostname for kafka_node in self.kafka_nodes])
principals = 'client ' + kafka_principals + ' ' + self.extra_principals
self.logger.info("Starting MiniKdc with principals " + principals)
core_libs_jar = self.path.jar(CORE_LIBS_JAR_NAME, DEV_BRANCH)
core_dependant_test_libs_jar = self.path.jar(CORE_DEPENDANT_TEST_LIBS_JAR_NAME, DEV_BRANCH)
cmd = "for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_libs_jar
cmd += " for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_dependant_test_libs_jar
cmd += " export CLASSPATH;"
cmd += " %s kafka.security.minikdc.MiniKdc %s %s %s %s 1>> %s 2>> %s &" % (self.path.script("kafka-run-class.sh", node), MiniKdc.WORK_DIR, MiniKdc.PROPS_FILE, MiniKdc.KEYTAB_FILE, principals, MiniKdc.LOG_FILE, MiniKdc.LOG_FILE)
self.logger.debug("Attempting to start MiniKdc on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(MiniKdc.LOG_FILE) as monitor:
node.account.ssh(cmd)
monitor.wait_until("MiniKdc Running", timeout_sec=60, backoff_sec=1, err_msg="MiniKdc didn't finish startup")
node.account.copy_from(MiniKdc.KEYTAB_FILE, MiniKdc.LOCAL_KEYTAB_FILE)
node.account.copy_from(MiniKdc.KRB5CONF_FILE, MiniKdc.LOCAL_KRB5CONF_FILE)
# KDC is set to bind openly (via 0.0.0.0). Change krb5.conf to hold the specific KDC address
self.replace_in_file(MiniKdc.LOCAL_KRB5CONF_FILE, '0.0.0.0', node.account.hostname)
def stop_node(self, node):
self.logger.info("Stopping %s on %s" % (type(self).__name__, node.account.hostname))
node.account.kill_java_processes("MiniKdc", clean_shutdown=True, allow_fail=False)
def clean_node(self, node):
node.account.kill_java_processes("MiniKdc", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf " + MiniKdc.WORK_DIR, allow_fail=False)
if os.path.exists(MiniKdc.LOCAL_KEYTAB_FILE):
os.remove(MiniKdc.LOCAL_KEYTAB_FILE)
if os.path.exists(MiniKdc.LOCAL_KRB5CONF_FILE):
os.remove(MiniKdc.LOCAL_KRB5CONF_FILE)
| apache-2.0 |
ghandiosm/Test | addons/sale/res_config.py | 3 | 6335 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class sale_configuration(osv.TransientModel):
_inherit = 'sale.config.settings'
_columns = {
'group_product_variant': fields.selection([
(0, "No variants on products"),
(1, 'Products can have several attributes, defining variants (Example: size, color,...)')
], "Product Variants",
help='Work with product variant allows you to define some variant of the same products, an ease the product management in the ecommerce for example',
implied_group='product.group_product_variant'),
'group_sale_pricelist':fields.boolean("Use pricelists to adapt your price per customers",implied_group='product.group_sale_pricelist',
help="""Allows to manage different prices based on rules per category of customers.
Example: 10% for retailers, promotion of 5 EUR on this product, etc."""),
'group_pricelist_item':fields.boolean("Show pricelists to customers", implied_group='product.group_pricelist_item'),
'group_product_pricelist':fields.boolean("Show pricelists On Products", implied_group='product.group_product_pricelist'),
'group_uom':fields.selection([
(0, 'Products have only one unit of measure (easier)'),
(1, 'Some products may be sold/purchased in different unit of measures (advanced)')
], "Unit of Measures",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_discount_per_so_line': fields.selection([
(0, 'No discount on sales order lines, global discount only'),
(1, 'Allow discounts on sales order lines')
], "Discount",
implied_group='sale.group_discount_per_so_line'),
'group_display_incoterm':fields.selection([
(0, 'No incoterm on reports'),
(1, 'Show incoterms on sale orders and invoices')
], "Incoterms",
implied_group='sale.group_display_incoterm',
help="The printed reports will display the incoterms for the sale orders and the related invoices"),
'module_sale_margin': fields.selection([
(0, 'Salespeople do not need to view margins when quoting'),
(1, 'Display margins on quotations and sales orders')
], "Margins"),
'module_website_quote': fields.selection([
(0, 'Print quotes or send by email'),
(1, 'Send online quotations based on templates (advanced)')
], "Online Quotations"),
'group_sale_delivery_address': fields.selection([
(0, "Invoicing and shipping addresses are always the same (Example: services companies)"),
(1, 'Display 3 fields on sales orders: customer, invoice address, delivery address')
], "Addresses", implied_group='sale.group_delivery_invoice_address'),
'sale_pricelist_setting': fields.selection([('fixed', 'A single sale price per product'), ('percentage', 'Different prices per customer segment'), ('formula', 'Advanced pricing based on formula')], required=True,
help='Fix Price: all price manage from products sale price.\n'
'Different prices per Customer: you can assign price on buying of minimum quantity in products sale tab.\n'
'Advanced pricing based on formula: You can have all the rights on pricelist'),
'default_invoice_policy': fields.selection([
('order', 'Invoice ordered quantities'),
('delivery', 'Invoice delivered quantities'),
('cost', 'Invoice based on costs (time and material, expenses)')
], 'Default Invoicing', default_model='product.template'),
'deposit_product_id_setting': fields.many2one('product.product', 'Deposit Product',\
domain="[('type', '=', 'service')]",\
help='Default product used for payment advances'),
'auto_done_setting': fields.selection([
(0, "Allow to edit sales order from the 'Sales Order' menu (not from the Quotation menu)"),
(1, "Never allow to modify a confirmed sale order")
], "Sale Order Modification"),
'module_sale_contract': fields.boolean("Manage subscriptions and recurring invoicing"),
'module_website_sale_digital': fields.boolean("Sell digital products - provide downloadable content on your customer portal"),
'module_website_portal': fields.boolean("Enable customer portal to track orders, delivery and invoices"),
}
_defaults = {
'sale_pricelist_setting': 'fixed',
'default_invoice_policy': 'order',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
sale_price = self.browse(cr, uid, ids, context=context).sale_pricelist_setting
res = self.pool.get('ir.values').set_default(cr, uid, 'sale.config.settings', 'sale_pricelist_setting', sale_price)
return res
def set_deposit_product_id_defaults(self, cr, uid, ids, context=None):
deposit_product_id = self.browse(cr, uid, ids, context=context).deposit_product_id_setting
res = self.pool.get('ir.values').set_default(cr, uid, 'sale.config.settings', 'deposit_product_id_setting', deposit_product_id.id)
return res
def set_auto_done_defaults(self, cr, uid, ids, context=None):
auto_done = self.browse(cr, uid, ids, context=context).auto_done_setting
res = self.pool.get('ir.values').set_default(cr, uid, 'sale.config.settings', 'auto_done_setting', auto_done)
return res
def onchange_sale_price(self, cr, uid, ids, sale_pricelist_setting, context=None):
if sale_pricelist_setting == 'percentage':
return {'value': {'group_product_pricelist': True, 'group_sale_pricelist': True, 'group_pricelist_item': False}}
if sale_pricelist_setting == 'formula':
return {'value': {'group_pricelist_item': True, 'group_sale_pricelist': True, 'group_product_pricelist': False}}
return {'value': {'group_pricelist_item': False, 'group_sale_pricelist': False, 'group_product_pricelist': False}}
| gpl-3.0 |
mcking49/apache-flask | Python/Lib/bsddb/test/test_sequence.py | 68 | 5274 | import unittest
import os
from test_all import db, test_support, get_new_environment_path, get_new_database_path
class DBSequenceTest(unittest.TestCase):
def setUp(self):
self.int_32_max = 0x100000000
self.homeDir = get_new_environment_path()
self.filename = "test"
self.dbenv = db.DBEnv()
self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0666)
self.d = db.DB(self.dbenv)
self.d.open(self.filename, db.DB_BTREE, db.DB_CREATE, 0666)
def tearDown(self):
if hasattr(self, 'seq'):
self.seq.close()
del self.seq
if hasattr(self, 'd'):
self.d.close()
del self.d
if hasattr(self, 'dbenv'):
self.dbenv.close()
del self.dbenv
test_support.rmtree(self.homeDir)
def test_get(self):
self.seq = db.DBSequence(self.d, flags=0)
start_value = 10 * self.int_32_max
self.assertEqual(0xA00000000, start_value)
self.assertEqual(None, self.seq.initial_value(start_value))
self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE))
self.assertEqual(start_value, self.seq.get(5))
self.assertEqual(start_value + 5, self.seq.get())
def test_remove(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(None, self.seq.remove(txn=None, flags=0))
del self.seq
def test_get_key(self):
self.seq = db.DBSequence(self.d, flags=0)
key = 'foo'
self.assertEqual(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE))
self.assertEqual(key, self.seq.get_key())
def test_get_dbp(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(self.d, self.seq.get_dbp())
def test_cachesize(self):
self.seq = db.DBSequence(self.d, flags=0)
cashe_size = 10
self.assertEqual(None, self.seq.set_cachesize(cashe_size))
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(cashe_size, self.seq.get_cachesize())
def test_flags(self):
self.seq = db.DBSequence(self.d, flags=0)
flag = db.DB_SEQ_WRAP;
self.assertEqual(None, self.seq.set_flags(flag))
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(flag, self.seq.get_flags() & flag)
def test_range(self):
self.seq = db.DBSequence(self.d, flags=0)
seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1)
self.assertEqual(None, self.seq.set_range(seq_range))
self.seq.initial_value(seq_range[0])
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(seq_range, self.seq.get_range())
def test_stat(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
stat = self.seq.stat()
for param in ('nowait', 'min', 'max', 'value', 'current',
'flags', 'cache_size', 'last_value', 'wait'):
self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
if db.version() >= (4,7) :
# This code checks a crash solved in Berkeley DB 4.7
def test_stat_crash(self) :
d=db.DB()
d.open(None,dbtype=db.DB_HASH,flags=db.DB_CREATE) # In RAM
seq = db.DBSequence(d, flags=0)
self.assertRaises(db.DBNotFoundError, seq.open,
key='id', txn=None, flags=0)
self.assertRaises(db.DBInvalidArgError, seq.stat)
d.close()
def test_64bits(self) :
# We don't use both extremes because they are problematic
value_plus=(1L<<63)-2
self.assertEqual(9223372036854775806L,value_plus)
value_minus=(-1L<<63)+1 # Two complement
self.assertEqual(-9223372036854775807L,value_minus)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.initial_value(value_plus-1))
self.assertEqual(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEqual(value_plus-1, self.seq.get(1))
self.assertEqual(value_plus, self.seq.get(1))
self.seq.remove(txn=None, flags=0)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.initial_value(value_minus))
self.assertEqual(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEqual(value_minus, self.seq.get(1))
self.assertEqual(value_minus+1, self.seq.get(1))
def test_multiple_close(self):
self.seq = db.DBSequence(self.d)
self.seq.close() # You can close a Sequence multiple times
self.seq.close()
self.seq.close()
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBSequenceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit |
Aorjoa/aiyara-ceph-dash | .tox/py27/lib/python2.7/site-packages/requests/packages/urllib3/util/url.py | 713 | 5879 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| bsd-2-clause |
yufengg/tensorflow | tensorflow/contrib/metrics/python/ops/histogram_ops.py | 159 | 10459 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Metrics that use histograms.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
def auc_using_histogram(boolean_labels,
scores,
score_range,
nbins=100,
collections=None,
check_shape=True,
name=None):
"""AUC computed by maintaining histograms.
Rather than computing AUC directly, this Op maintains Variables containing
histograms of the scores associated with `True` and `False` labels. By
comparing these the AUC is generated, with some discretization error.
See: "Efficient AUC Learning Curve Calculation" by Bouckaert.
This AUC Op updates in `O(batch_size + nbins)` time and works well even with
large class imbalance. The accuracy is limited by discretization error due
to finite number of bins. If scores are concentrated in a fewer bins,
accuracy is lower. If this is a concern, we recommend trying different
numbers of bins and comparing results.
Args:
boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
scores: 1-D numeric `Tensor`, same shape as boolean_labels.
score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
collections: List of graph collections keys. Internal histogram Variables
are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
check_shape: Boolean. If `True`, do a runtime shape check on the scores
and labels.
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
if collections is None:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
with variable_scope.variable_scope(
name, 'auc_using_histogram', [boolean_labels, scores, score_range]):
scores, boolean_labels = tensor_util.remove_squeezable_dimensions(
scores, boolean_labels)
score_range = ops.convert_to_tensor(score_range, name='score_range')
boolean_labels, scores = _check_labels_and_scores(
boolean_labels, scores, check_shape)
hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
score_range, nbins)
hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(hist_true,
hist_false,
nbins,
collections)
auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
return auc, update_op
def _check_labels_and_scores(boolean_labels, scores, check_shape):
"""Check the rank of labels/scores, return tensor versions."""
with ops.name_scope('_check_labels_and_scores',
values=[boolean_labels, scores]):
boolean_labels = ops.convert_to_tensor(boolean_labels,
name='boolean_labels')
scores = ops.convert_to_tensor(scores, name='scores')
if boolean_labels.dtype != dtypes.bool:
raise ValueError(
'Argument boolean_labels should have dtype bool. Found: %s',
boolean_labels.dtype)
if check_shape:
labels_rank_1 = control_flow_ops.Assert(
math_ops.equal(1, array_ops.rank(boolean_labels)),
['Argument boolean_labels should have rank 1. Found: ',
boolean_labels.name, array_ops.shape(boolean_labels)])
scores_rank_1 = control_flow_ops.Assert(
math_ops.equal(1, array_ops.rank(scores)),
['Argument scores should have rank 1. Found: ', scores.name,
array_ops.shape(scores)])
with ops.control_dependencies([labels_rank_1, scores_rank_1]):
return boolean_labels, scores
else:
return boolean_labels, scores
def _make_auc_histograms(boolean_labels, scores, score_range, nbins):
"""Create histogram tensors from one batch of labels/scores."""
with variable_scope.variable_scope(
None, 'make_auc_histograms', [boolean_labels, scores, nbins]):
# Histogram of scores for records in this batch with True label.
hist_true = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, boolean_labels),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_true')
# Histogram of scores for records in this batch with False label.
hist_false = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_false')
return hist_true, hist_false
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_scope(
None, 'hist_accumulate', [hist_true, hist_false]):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
shape=[nbins],
dtype=hist_true.dtype,
initializer=init_ops.zeros_initializer(),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
shape=[nbins],
dtype=hist_true.dtype,
initializer=init_ops.zeros_initializer(),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
def _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins):
"""Convert histograms to auc.
Args:
hist_true_acc: `Tensor` holding accumulated histogram of scores for records
that were `True`.
hist_false_acc: `Tensor` holding accumulated histogram of scores for
records that were `False`.
nbins: Integer number of bins in the histograms.
Returns:
Scalar `Tensor` estimating AUC.
"""
# Note that this follows the "Approximating AUC" section in:
# Efficient AUC learning curve calculation, R. R. Bouckaert,
# AI'06 Proceedings of the 19th Australian joint conference on Artificial
# Intelligence: advances in Artificial Intelligence
# Pages 181-191.
# Note that the above paper has an error, and we need to re-order our bins to
# go from high to low score.
# Normalize histogram so we get fraction in each bin.
normed_hist_true = math_ops.truediv(hist_true_acc,
math_ops.reduce_sum(hist_true_acc))
normed_hist_false = math_ops.truediv(hist_false_acc,
math_ops.reduce_sum(hist_false_acc))
# These become delta x, delta y from the paper.
delta_y_t = array_ops.reverse_v2(normed_hist_true, [0], name='delta_y_t')
delta_x_t = array_ops.reverse_v2(normed_hist_false, [0], name='delta_x_t')
# strict_1d_cumsum requires float32 args.
delta_y_t = math_ops.cast(delta_y_t, dtypes.float32)
delta_x_t = math_ops.cast(delta_x_t, dtypes.float32)
# Trapezoidal integration, \int_0^1 0.5 * (y_t + y_{t-1}) dx_t
y_t = _strict_1d_cumsum(delta_y_t, nbins)
first_trap = delta_x_t[0] * y_t[0] / 2.0
other_traps = delta_x_t[1:] * (y_t[1:] + y_t[:nbins - 1]) / 2.0
return math_ops.add(first_trap, math_ops.reduce_sum(other_traps), name='auc')
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# Also see if cast to float32 above can be removed with new cumsum.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_1d_cumsum(tensor, len_tensor):
"""Cumsum of a 1D tensor with defined shape by padding and convolving."""
# Assumes tensor shape is fully defined.
with ops.name_scope('strict_1d_cumsum', values=[tensor]):
if len_tensor == 0:
return constant_op.constant([])
len_pad = len_tensor - 1
x = array_ops.pad(tensor, [[len_pad, 0]])
h = array_ops.ones_like(x)
return _strict_conv1d(x, h)[:len_tensor]
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.name_scope('strict_conv1d', values=[x, h]):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1])
| apache-2.0 |
lixiangning888/whole_project | modules/signatures/antivm_generic_diskreg.py | 3 | 1470 | # -*- coding: utf-8 -*-
# Copyright (C) 2012,2015 Claudio "nex" Guarnieri (@botherder), Accuvant, Inc. (bspengler@accuvant.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class AntiVMDiskReg(Signature):
name = "antivm_generic_diskreg"
description = "检查注册表中的磁盘驱动器,可能被用来实现反虚拟机"
severity = 3
confidence = 50
categories = ["anti-vm"]
authors = ["nex"]
minimum = "0.5"
def run(self):
indicators = [
".*\\\\SYSTEM\\\\(CurrentControlSet|ControlSet001)\\\\Enum\\\\IDE$",
".*\\\\SYSTEM\\\\(CurrentControlSet|ControlSet001)\\\\Services\\\\Disk\\\\Enum\\\\.*",
]
for indicator in indicators:
if self.check_key(pattern=indicator, regex=True):
return True
return False
| lgpl-3.0 |
t0in4/django | tests/template_tests/test_smartif.py | 580 | 2178 | import unittest
from django.template.smartif import IfParser
class SmartIfTests(unittest.TestCase):
def assertCalcEqual(self, expected, tokens):
self.assertEqual(expected, IfParser(tokens).parse().eval({}))
# We only test things here that are difficult to test elsewhere
# Many other tests are found in the main tests for builtin template tags
# Test parsing via the printed parse tree
def test_not(self):
var = IfParser(["not", False]).parse()
self.assertEqual("(not (literal False))", repr(var))
self.assertTrue(var.eval({}))
self.assertFalse(IfParser(["not", True]).parse().eval({}))
def test_or(self):
var = IfParser([True, "or", False]).parse()
self.assertEqual("(or (literal True) (literal False))", repr(var))
self.assertTrue(var.eval({}))
def test_in(self):
list_ = [1, 2, 3]
self.assertCalcEqual(True, [1, 'in', list_])
self.assertCalcEqual(False, [1, 'in', None])
self.assertCalcEqual(False, [None, 'in', list_])
def test_not_in(self):
list_ = [1, 2, 3]
self.assertCalcEqual(False, [1, 'not', 'in', list_])
self.assertCalcEqual(True, [4, 'not', 'in', list_])
self.assertCalcEqual(False, [1, 'not', 'in', None])
self.assertCalcEqual(True, [None, 'not', 'in', list_])
def test_precedence(self):
# (False and False) or True == True <- we want this one, like Python
# False and (False or True) == False
self.assertCalcEqual(True, [False, 'and', False, 'or', True])
# True or (False and False) == True <- we want this one, like Python
# (True or False) and False == False
self.assertCalcEqual(True, [True, 'or', False, 'and', False])
# (1 or 1) == 2 -> False
# 1 or (1 == 2) -> True <- we want this one
self.assertCalcEqual(True, [1, 'or', 1, '==', 2])
self.assertCalcEqual(True, [True, '==', True, 'or', True, '==', False])
self.assertEqual("(or (and (== (literal 1) (literal 2)) (literal 3)) (literal 4))",
repr(IfParser([1, '==', 2, 'and', 3, 'or', 4]).parse()))
| bsd-3-clause |
nirmeshk/oh-mainline | vendor/packages/south/south/hacks/django_1_0.py | 118 | 3378 | """
Hacks for the Django 1.0/1.0.2 releases.
"""
import django
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
from django.db.models.loading import cache
from django.core import management
from django.core.management.commands.flush import Command as FlushCommand
from django.utils.datastructures import SortedDict
from south.utils.py3 import string_types
class SkipFlushCommand(FlushCommand):
def handle_noargs(self, **options):
# no-op to avoid calling flush
return
class Hacks:
def set_installed_apps(self, apps):
"""
Sets Django's INSTALLED_APPS setting to be effectively the list passed in.
"""
# Make sure it's a list.
apps = list(apps)
# Make sure it contains strings
if apps:
assert isinstance(apps[0], string_types), "The argument to set_installed_apps must be a list of strings."
# Monkeypatch in!
settings.INSTALLED_APPS, settings.OLD_INSTALLED_APPS = (
apps,
settings.INSTALLED_APPS,
)
self._redo_app_cache()
def reset_installed_apps(self):
"""
Undoes the effect of set_installed_apps.
"""
settings.INSTALLED_APPS = settings.OLD_INSTALLED_APPS
self._redo_app_cache()
def _redo_app_cache(self):
"""
Used to repopulate AppCache after fiddling with INSTALLED_APPS.
"""
cache.loaded = False
cache.handled = set() if django.VERSION >= (1, 6) else {}
cache.postponed = []
cache.app_store = SortedDict()
cache.app_models = SortedDict()
cache.app_errors = {}
cache._populate()
def clear_app_cache(self):
"""
Clears the contents of AppCache to a blank state, so new models
from the ORM can be added.
"""
self.old_app_models, cache.app_models = cache.app_models, {}
def unclear_app_cache(self):
"""
Reversed the effects of clear_app_cache.
"""
cache.app_models = self.old_app_models
cache._get_models_cache = {}
def repopulate_app_cache(self):
"""
Rebuilds AppCache with the real model definitions.
"""
cache._populate()
def store_app_cache_state(self):
self.stored_app_cache_state = dict(**cache.__dict__)
def restore_app_cache_state(self):
cache.__dict__ = self.stored_app_cache_state
def patch_flush_during_test_db_creation(self):
"""
Patches BaseDatabaseCreation.create_test_db to not flush database
"""
def patch(f):
def wrapper(*args, **kwargs):
# hold onto the original and replace flush command with a no-op
original_flush_command = management._commands['flush']
try:
management._commands['flush'] = SkipFlushCommand()
# run create_test_db
return f(*args, **kwargs)
finally:
# unpatch flush back to the original
management._commands['flush'] = original_flush_command
return wrapper
BaseDatabaseCreation.create_test_db = patch(BaseDatabaseCreation.create_test_db)
| agpl-3.0 |
theonewolf/siegvswolf | lib/flask/config.py | 781 | 6234 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| mit |
monarch-initiative/dipper | dipper/sources/OMIM.py | 2 | 47719 | import logging
import re
import json
import urllib
import time
from urllib.error import HTTPError
from datetime import date, datetime, timedelta
from dipper.sources.OMIMSource import OMIMSource
from dipper.sources.Source import USER_AGENT
from dipper.models.Model import Model
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.BiolinkVocabulary import BioLinkVocabulary as blv
from dipper.models.Genotype import Genotype
from dipper.models.GenomicFeature import Feature, makeChromID
from dipper.models.Reference import Reference
from dipper import config
from dipper.utils.romanplus import romanNumeralPattern, fromRoman, toRoman
LOG = logging.getLogger(__name__)
# omimftp key EXPIRES
# get a new one here: https://omim.org/help/api
OMIMURL = 'https://data.omim.org/downloads/'
OMIMFTP = OMIMURL + config.get_config()['keys']['omim']
OMIMAPI = 'https://api.omim.org/api/entry?format=json&apiKey=' + \
config.get_config()['keys']['omim'] + '&'
class OMIM(OMIMSource):
"""
The only anonymously obtainable data from the ftp site is mim2gene.
However, more detailed information is available behind API key.
We pull the omim identifiers from their protected http mimTitles file
then query their API in batchs of 20.
Their prescribed rate limits have been mecurial
one per two seconds or four per second,
in 2017 November all mention of api rate limits have vanished
(save 20 IDs per call if any include is used)
Note this ingest requires an api Key which is not stored in the repo,
but in a separate conf.yaml file.
Processing this source serves two purposes:
1. the creation of the OMIM classes for merging into the disease ontology
2. add annotations such as disease-gene associations
When creating the disease classes, we pull from their REST-api
id/label/definition information.
Additionally we pull the Orphanet and UMLS mappings
(to make equivalent ids).
We also pull the phenotypic series annotations as grouping classes.
"""
files = {
'morbidmap': {
'file': 'morbidmap.txt',
'url': OMIMFTP + '/morbidmap.txt',
'clean': OMIMURL + 'morbidmap.txt',
'columns': [ # expected
'# Phenotype',
'Gene Symbols',
'MIM Number',
'Cyto Location',
],
},
'phenotypicSeries': {
'file': 'phenotypic_series_title_all.txt',
'url': 'https://omim.org/phenotypicSeriesTitles/all?format=tsv',
'headers': {'User-Agent': USER_AGENT},
'clean': OMIMURL + 'phenotypic_series_title_all.txt',
'columns': [ # expected
"Phenotypic Series Title",
"Phenotypic Series number",
],
},
}
def __init__(self,
graph_type,
are_bnodes_skolemized,
data_release_version=None):
super().__init__(
graph_type=graph_type,
are_bnodes_skolemized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='omim',
ingest_title='Online Mendelian Inheritance in Man',
ingest_url='http://www.omim.org',
ingest_logo='source-omim.png',
# ingest_desc=None,
license_url=None,
data_rights='http://omim.org/help/agreement',
# file_handle=None
)
self.omim_ncbigene_idmap = {}
# check if config exists; if it doesn't, error out and let user know
if 'keys' not in config.get_config() and \
'omim' not in config.get_config()['keys']:
LOG.error("not configured with API key.")
if 'disease' in self.all_test_ids:
# local_id (numeric) portion of omim identifier
self.test_ids = [
x[5:] for x in self.all_test_ids['disease'] if x[:5] == 'OMIM:']
else:
LOG.warning("not configured with gene test ids.")
self.test_ids = []
self.disorder_regex = re.compile(r'(.*), (\d{6})\s*(?:\((\d+)\))?')
self.nogene_regex = re.compile(r'(.*)\s+\((\d+)\)')
def fetch(self, is_dl_forced=True):
"""
Get the preconfigured static files.
This DOES NOT fetch the individual records via REST...that is handled
in the parsing function. (To be refactored.)
over riding Source.fetch() calling Source.get_files()
:param is_dl_forced:
"""
self.get_files(is_dl_forced)
def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
self._process_all(limit)
self._process_morbidmap(limit)
self._process_phenotypicseries(limit)
LOG.info("Done parsing.")
def process_entries(
self, omimids, transform, included_fields=None, graph=None, limit=None):
"""
Given a list of omim ids,
this will use the omim API to fetch the entries, according to the
```included_fields``` passed as a parameter.
If a transformation function is supplied,
this will iterate over each entry,
and either add the results to the supplied ```graph```
or will return a set of processed entries that the calling function
can further iterate.
If no ```included_fields``` are provided, this will simply fetch
the basic entry from omim, that is ALL fields,
which includes an entry's: prefix, mimNumber, status, and titles.
:param omimids: the set of omim entry ids to fetch using their API
:param transform: Function to transform each omim entry when looping
:param included_fields: A set of what fields are required to retrieve
from the API
:param graph: the graph to add the transformed data into
"""
omimparams = {}
reponse_batches = []
# add the included_fields as parameters
if included_fields is not None and included_fields:
omimparams['include'] = ','.join(included_fields)
# not expecting any, but keeping just in case
cleanomimids = [o.split(':')[-1] for o in omimids]
diff = set(omimids) - set(cleanomimids)
if diff:
LOG.warning('OMIM has %i dirty bits see"\n %s', len(diff), str(diff))
omimids = cleanomimids
cleanomimids = []
# WIP: check if we can use a cached copy of the json records
# maybe if exists raw/omim/_<iso-date>.json use that
# in the meanwhile, to bypass (in case of emergencies)
# cache_date = '2019-06-27'
# with open('raw/omim/_' + cache_date + '.json', 'r') as cachefile:
# reponse_batches = json.load(cachefile)
if True: # False:
acc = 0 # for counting
# note that you can only do request batches of 20
# see info about "Limits" at http://omim.org/help/api
# TODO 2017 May seems a majority of many groups of 20
# are producing python None for RDF triple Objects
groupsize = 20
if not self.test_mode and limit is not None:
# just in case the limit is larger than the number of records,
maxit = limit
if limit > len(omimids):
maxit = len(omimids)
else:
maxit = len(omimids)
then = datetime.now()
while acc < maxit:
end = min((maxit, acc + groupsize))
# iterate through the omim ids list,
# and fetch from the OMIM api in batches of 20
if self.test_mode:
intersect = list(
set([str(i) for i in self.test_ids]) & set(omimids[acc:end]))
# some of the test ids are in the omimids
if intersect:
LOG.info("found test ids: %s", intersect)
omimparams.update({'mimNumber': ','.join(intersect)})
else:
acc += groupsize
continue
else:
omimparams.update({'mimNumber': ','.join(omimids[acc:end])})
url = OMIMAPI + urllib.parse.urlencode(omimparams)
# slow down api calls
then += timedelta(seconds=4)
pause = then - datetime.now()
LOG.info("Naptime! %i", pause.seconds)
time.sleep(pause.seconds if pause.seconds > 0 else 0)
then = datetime.now()
try:
req = urllib.request.urlopen(url)
except HTTPError as err: # URLError?
LOG.warning('fetching: %s', url)
error_msg = err.read()
if re.search(r'The API key: .* is invalid', str(error_msg)):
msg = "API Key not valid"
raise HTTPError(url, err.code, msg, err.hdrs, err.fp)
LOG.error("Failed with: %s", str(error_msg))
# dump what we have to see how far we got.
with open(
'./raw/omim/_' + date.today().isoformat() + '.json_partial',
'w') as writer:
json.dump(reponse_batches, writer)
break
resp = req.read().decode()
acc += groupsize
# gather all batches
reponse_batches.append(json.loads(resp))
# snag a copy of all the batches
with open(
'./raw/omim/_' + date.today().isoformat() + '.json', 'w') as writer:
json.dump(reponse_batches, writer)
LOG.info(
"begin transforming the %i blocks of (20) records", len(reponse_batches))
for myjson in reponse_batches:
for entery in myjson['omim']['entryList']:
# apply the data transformation, and save it to the graph
transform(entery, graph)
def _process_all(self, limit):
"""
This takes the list of omim identifiers from the omimTitles file,
excludes those designated as obsolete and iteratively queries the omim api
in batches of 20 for the json-formatted data.
This will create OMIM classes, with the label & definition.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
"""
omimids = list(self.omim_type.keys() - self.omim_replaced.keys())
LOG.info('Have %i omim numbers to fetch records from their API', len(omimids))
LOG.info('Have %i omim types ', len(self.omim_type))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
# add genome and taxon
geno.addGenome(tax_id, tax_label)
model.addClassToGraph(tax_id, tax_label)
includes = set()
includes.add('all')
self.process_entries(omimids, self._transform_entry, includes, graph, limit)
# since we are not fetching obsolete records any more add them all in here
for omim_id in self.omim_replaced:
model.addDeprecatedClass(
'OMIM:' + omim_id, ['OMIM:' + o for o in self.omim_replaced[omim_id]])
def _transform_entry(self, ent, graph):
self.graph = graph
model = Model(graph)
geno = Genotype(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
build_num = "GRCh38"
asm_curie = ':'.join(('NCBIAssembly', build_num))
# get the numbers, labels, and descriptions
omim_num = str(ent['entry']['mimNumber'])
titles = ent['entry']['titles']
label = titles['preferredTitle']
other_labels = []
if 'alternativeTitles' in titles:
other_labels += self._get_alt_labels(titles['alternativeTitles'])
if 'includedTitles' in titles:
other_labels += self._get_alt_labels(titles['includedTitles'])
# remove the abbreviation (comes after the ;) from the preferredTitle,
abbrev = None
lab_lst = label.split(';')
if len(lab_lst) > 1:
abbrev = lab_lst[1].strip()
newlabel = self._cleanup_label(label)
omim_curie = 'OMIM:' + omim_num
omimtype = self.omim_type[omim_num]
nodelabel = newlabel
# this uses our cleaned-up label
if omimtype == self.globaltt['heritable_phenotypic_marker']:
#if abbrev is not None:
# nodelabel = abbrev
# in this special case,
# make it a disease by not declaring it as a gene/marker
# ??? and if abbrev is None?
model.addClassToGraph(
omim_curie,
nodelabel,
description=newlabel,
class_category=blv.terms['Disease']
)
elif omimtype in [self.globaltt['gene'], self.globaltt['has_affected_feature']]:
omimtype = self.globaltt['gene']
if abbrev is not None:
nodelabel = abbrev
# omim is subclass_of gene (provide type term)
model.addClassToGraph(
omim_curie,
nodelabel,
self.globaltt['gene'],
newlabel,
class_category=blv.terms['Gene']
)
elif omimtype == self.globaltt['phenotype']:
model.addClassToGraph(
omim_curie,
nodelabel,
description=newlabel,
class_category=blv.terms['Disease']
)
else:
# omim is NOT subclass_of D|P|or ?...
model.addClassToGraph(omim_curie, newlabel)
model.addSynonym(omim_curie, label)
# add the alternate labels and includes as synonyms
for label in other_labels:
model.addSynonym(omim_curie, label, model.globaltt['has_related_synonym'])
model.addSynonym(
omim_curie, label, model.globaltt['has_related_synonym'])
# KS: commenting out, we will get disease descriptions
# from MONDO, and gene descriptions from the mygene API
# if this is a genetic locus (not sequenced) then
# add the chrom loc info to the ncbi gene identifier,
# not to the omim id (we reserve the omim id to be the phenotype)
#################################################################
# the above makes no sense to me. (TEC)
# For Monarch, OMIM is authoritative for disease / phenotype
# if they say a phenotype is associated with a locus
# that is what dipper should report.
# OMIM is not authoritative for NCBI gene locations, locus or otherwise.
# and dipper should not be reporting gene locations via OMIM.
feature_id = None
feature_label = None
if 'geneMapExists' in ent['entry'] and ent['entry']['geneMapExists']:
genemap = ent['entry']['geneMap']
is_gene = False
if omimtype == self.globaltt['heritable_phenotypic_marker']:
# get the ncbigene ids
ncbifeature = self._get_mapped_gene_ids(ent['entry'], graph)
if len(ncbifeature) == 1:
feature_id = 'NCBIGene:' + str(ncbifeature[0])
# add this feature as a cause for the omim disease
# TODO SHOULD I EVEN DO THIS HERE?
assoc = G2PAssoc(graph, self.name, feature_id, omim_curie)
assoc.add_association_to_graph()
else:
LOG.info(
"Its ambiguous when %s maps to not one gene id: %s",
omim_curie, str(ncbifeature))
elif omimtype in [
self.globaltt['gene'], self.globaltt['has_affected_feature']]:
feature_id = omim_curie
is_gene = True
omimtype = self.globaltt['gene']
else:
# 158900 falls into this category
feature_id = self._make_anonymous_feature(omim_num)
if abbrev is not None:
feature_label = abbrev
omimtype = self.globaltt['heritable_phenotypic_marker']
if feature_id is not None:
if 'comments' in genemap:
# add a comment to this feature
comment = genemap['comments']
if comment.strip() != '':
model.addDescription(feature_id, comment)
if 'cytoLocation' in genemap:
cytoloc = genemap['cytoLocation']
# parse the cytoloc.
# add this omim thing as
# a subsequence of the cytofeature
# 18p11.3-p11.2
# FIXME
# add the other end of the range,
# but not sure how to do that
# not sure if saying subsequence of feature
# is the right relationship
feat = Feature(graph, feature_id, feature_label, omimtype)
if 'chromosomeSymbol' in genemap:
chrom_num = str(genemap['chromosomeSymbol'])
chrom = makeChromID(chrom_num, tax_id, 'CHR')
geno.addChromosomeClass(
chrom_num, self.globaltt['Homo sapiens'], tax_label)
# add the positional information, if available
fstart = fend = -1
if 'chromosomeLocationStart' in genemap:
fstart = genemap['chromosomeLocationStart']
if 'chromosomeLocationEnd' in genemap:
fend = genemap['chromosomeLocationEnd']
if fstart >= 0:
# make the build-specific chromosome
chrom_in_build = makeChromID(
chrom_num, build_num, 'MONARCH')
# then, add the chromosome instance
# (from the given build)
geno.addChromosomeInstance(
chrom_num, asm_curie, build_num, chrom)
if omimtype == self.globaltt[
'heritable_phenotypic_marker']:
postypes = [self.globaltt['FuzzyPosition']]
else:
postypes = None
# NOTE that no strand information
# is available in the API
feat.addFeatureStartLocation(
fstart, chrom_in_build, None, postypes)
if fend >= 0:
feat.addFeatureEndLocation(
fend, chrom_in_build, None, postypes)
if fstart > fend:
LOG.info(
"start>end (%d>%d) for %s",
fstart, fend, omim_curie)
# add the cytogenic location too
# for now, just take the first one
cytoloc = cytoloc.split('-')[0]
loc = makeChromID(cytoloc, tax_id, 'CHR')
model.addClassToGraph(loc, None)
feat.addSubsequenceOfFeature(loc)
feat.addFeatureToGraph(True, None, is_gene)
# end adding causative genes/features
if ent['entry']['status'] in ['moved', 'removed']:
LOG.warning('UNEXPECTED! not expecting obsolete record %s', omim_curie)
self._get_phenotypicseries_parents(ent['entry'], graph)
self._get_mappedids(ent['entry'], graph)
self._get_mapped_gene_ids(ent['entry'], graph)
self._get_pubs(ent['entry'], graph)
self._get_process_allelic_variants(ent['entry'], graph)
def _process_morbidmap(self, limit):
"""
This will process the morbidmap file to get the links between
omim genes and diseases. Here, we create anonymous nodes for some
variant loci that are variants of the gene that causes the disease.
Triples created:
<some_anonymous_variant_locus>
is_allele_of
<omim_gene_id>
<some_anonymous_variant_locus> causes condition <omim_disease_id>
<assoc> hasSubject <some_anonymous_variant_locus>
<assoc> hasObject <omim_disease_id>
<assoc> hasPredicate <causes condition>
<assoc> dc:evidence <eco_id>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
assoc_count = 0
src_key = 'morbidmap'
col = self.files[src_key]['columns']
raw = '/'.join((self.rawdir, self.files[src_key]['file']))
with open(raw) as reader:
line = reader.readline() # Copyright
line = reader.readline() # Generated: 2016-04-11
line = reader.readline() # EOF for field spec
line = reader.readline().strip() # columns header
line_counter = 4
row = line.split('\t') # includes funky leading octothorpe
if not self.check_fileheader(col, row):
pass
for line in reader:
line_counter += 1
line = line.strip()
# since there are comments at the end of the file as well,
if line[0] == '#':
continue
row = line.split('\t')
if len(row) != len(col):
LOG.warning(
'Unexpected input on line: %i got: %s', line_counter, row)
continue
disorder = row[col.index('# Phenotype')]
gene_symbols = row[col.index('Gene Symbols')]
gene_num = row[col.index('MIM Number')]
# loc = row[col.index('Cyto Location')]
# LOG.info("morbidmap disorder: %s", disorder) # too verbose
# disorder = disorder label , number (mapping key)
# 3-M syndrome 1, 273750 (3)|CUL7, 3M1|609577|6p21.1
# but note that for those diseases where they are genomic loci
# (not genes though), the omim id is only listed as the gene
# Alopecia areata 1 (2)|AA1|104000|18p11.3-p11.2
# when there's a gene and disease
disorder_match = self.disorder_regex.match(disorder)
nogene_match = self.nogene_regex.match(disorder)
if disorder_match is not None:
disorder_parts = disorder_match.groups()
(disorder_label, disorder_num, phene_key) = disorder_parts
if self.test_mode and (
int(disorder_num) not in self.test_ids or
int(gene_num) not in self.test_ids):
continue
assoc_count += 1
gene_symbols = gene_symbols.split(', ')
gene_id = 'OMIM:' + str(gene_num)
self._make_pheno_assoc(
graph, gene_id, disorder_num, disorder_label, phene_key)
elif nogene_match is not None:
# this is a case where the disorder
# a blended gene/phenotype
# we lookup the NCBIGene feature and make the association
(disorder_label, phene_key) = nogene_match.groups()
disorder_num = gene_num
# make what's in the gene column the disease
disorder_id = 'OMIM:' + str(disorder_num)
if self.test_mode and int(disorder_num) not in self.test_ids:
continue
if disorder_id in self.omim_ncbigene_idmap:
# get the gene ids
gene_ids = self.omim_ncbigene_idmap[disorder_id]
if gene_ids is None:
continue
for gene_num in gene_ids:
# TODO add gene filter for testMode and NCBIGenes
gene_id = 'NCBIGene:' + str(gene_num).strip()
assoc_count += 1
self._make_pheno_assoc(
graph, gene_id, disorder_num, disorder_label, phene_key)
else:
# we can create an anonymous feature
# to house this thing for example, 158900
feature_id = self._make_anonymous_feature(gene_num)
assoc_count += 1
self._make_pheno_assoc(
graph, feature_id, disorder_num, disorder_label, phene_key)
LOG.info(
"We don't have an NCBIGene feature id to link %s with %s",
disorder_id, disorder_label)
if self.test_mode and gene_num not in self.test_ids:
continue
else:
LOG.warning(
"There are misformatted rows %i:%s", line_counter, line)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Added %d G2P associations", assoc_count)
@staticmethod
def _make_anonymous_feature(omim_num):
''' more blank nodes '''
return '_:feature' + omim_num
def _make_pheno_assoc(
self, graph, gene_id, disorder_num, disorder_label, phene_key
):
"""
From the docs:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations
that lead to apparently abnormal laboratory test values
(e.g., dysalbuminemic euthyroidal hyperthyroxinemia).
Braces, "{ }", indicate mutations that contribute to susceptibility
to multifactorial disorders (e.g., diabetes, asthma) or to
susceptibility to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
Phene key:
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
reference: https://omim.org/help/faq#1_6
:param graph: graph object of type dipper.graph.Graph
:param gene_id: str, gene id as curie
:param gene_symbol: str, symbol
:param disorder_num: str, disorder id
:param disorder_label: str, disorder label
:param phene_key: int or str, 1-4, see docstring
:return:
"""
disorder_id = ':'.join(('OMIM', disorder_num))
rel_label = 'causes condition'
rel_id = self.globaltt[rel_label]
if disorder_label.startswith('['):
rel_id = self.globaltt['is marker for']
# rel_label = 'is a marker for'
elif disorder_label.startswith('{'):
rel_id = self.globaltt['contributes to']
# rel_label = 'contributes to'
elif disorder_label.startswith('?'):
# this is a questionable mapping! skip?
rel_id = self.globaltt['contributes to']
# Note: this is actually a G2D association;
# see https://github.com/monarch-initiative/dipper/issues/748
assoc = G2PAssoc(
graph,
self.name,
gene_id,
disorder_id,
rel_id
)
if phene_key is not None:
evidence = self.resolve(phene_key, False)
if evidence != phene_key:
assoc.add_evidence(evidence) # evidence is Found
assoc.add_association_to_graph()
@staticmethod
def _get_description(entry):
"""
Get the description of the omim entity
from the textSection called 'description'.
Note that some of these descriptions have linebreaks.
If printed in turtle syntax, they will appear to be triple-quoted.
:param entry:
:return:
"""
description = None
if entry is not None and 'textSectionList' in entry:
textsectionlist = entry['textSectionList']
for txts in textsectionlist:
if txts['textSection']['textSectionName'] == 'description':
description = txts['textSection']['textSectionContent']
# there are internal references to OMIM identifiers in
# the description, I am formatting them in our style.
description = re.sub(r'{(\d+)}', r'OMIM:\1', description)
# TODO
# reformat the citations in the description with PMIDs
break
return description
def _get_process_allelic_variants(self, entry, graph):
model = Model(graph)
reference = Reference(graph)
geno = Genotype(graph)
if entry is not None:
# to hold the entry-specific publication mentions
# for the allelic variants
publist = {}
entry_num = entry['mimNumber']
# process the ref list just to get the pmids
ref_to_pmid = self._get_pubs(entry, graph)
if 'allelicVariantList' in entry:
for alv in entry['allelicVariantList']:
al_num = alv['allelicVariant']['number']
al_id = 'OMIM:'+str(entry_num)+'.'+str(al_num).zfill(4)
al_label = None
al_description = None
if alv['allelicVariant']['status'] == 'live':
publist[al_id] = set()
if 'mutations' in alv['allelicVariant']:
al_label = alv['allelicVariant']['mutations']
if 'text' in alv['allelicVariant']:
al_description = alv['allelicVariant']['text']
mch = re.findall(r'\{(\d+)\:', al_description)
publist[al_id] = set(mch)
geno.addAllele(
al_id, al_label, self.globaltt['variant_locus'],
al_description)
geno.addAlleleOfGene(
al_id, 'OMIM:' + str(entry_num),
self.globaltt['is_allele_of'])
for ref in publist[al_id]:
pmid = ref_to_pmid[int(ref)]
graph.addTriple(pmid, self.globaltt['is_about'], al_id)
# look up the pubmed id in the list of references
if 'dbSnps' in alv['allelicVariant']:
dbsnp_ids = re.split(r',', alv['allelicVariant']['dbSnps'])
for dnum in dbsnp_ids:
did = 'dbSNP:'+dnum.strip()
model.addIndividualToGraph(did, None)
model.addSameIndividual(al_id, did)
# Note that RCVs are variant to disease associations
# in ClinVar, rather than variant entries
# so we make these xrefs instead of equivalents
if 'clinvarAccessions' in alv['allelicVariant']:
# clinvarAccessions triple semicolon delimited
# each >1 like RCV000020059;;;
rcv_ids = \
alv['allelicVariant']['clinvarAccessions'].split(';;;')
rcv_ids = [rcv[:12] for rcv in rcv_ids] # incase more cruft
for rnum in rcv_ids:
rid = 'ClinVar:' + rnum
model.addXref(al_id, rid)
reference.addPage(
al_id, "http://omim.org/entry/" +
'#'.join((str(entry_num), str(al_num).zfill(4))))
elif re.search(
r'moved', alv['allelicVariant']['status']):
# for both 'moved' and 'removed'
moved_ids = None
if 'movedTo' in alv['allelicVariant']:
moved_id = 'OMIM:' + alv['allelicVariant']['movedTo']
moved_ids = [moved_id]
model.addDeprecatedIndividual(
al_id,
moved_ids,
old_id_category= blv.terms['SequenceVariant']
)
else:
LOG.error(
'Uncaught alleleic variant status %s',
alv['allelicVariant']['status'])
# end loop allelicVariantList
@staticmethod
def _cleanup_label(label):
"""
Reformat the ALL CAPS OMIM labels to something more pleasant to read.
This will:
1. remove the abbreviation suffixes
2. convert the roman numerals to integer numbers
3. make the text title case,
except for suplied conjunctions/prepositions/articles
:param label:
:return:
"""
conjunctions = ['and', 'but', 'yet', 'for', 'nor', 'so']
little_preps = [
'at', 'by', 'in', 'of', 'on', 'to', 'up', 'as', 'it', 'or']
articles = ['a', 'an', 'the']
# remove the abbreviation
lbl = label.split(r';')[0]
fixedwords = []
i = 0
for wrd in lbl.split():
i += 1
# convert the roman numerals to numbers,
# but assume that the first word is not
# a roman numeral (this permits things like "X inactivation"
if i > 1 and re.match(romanNumeralPattern, wrd):
num = fromRoman(wrd)
# make the assumption that the number of syndromes are <100
# this allows me to retain "SYNDROME C"
# and not convert it to "SYNDROME 100"
if 0 < num < 100:
# get the non-roman suffix, if present.
# for example, IIIB or IVA
suffix = wrd.replace(toRoman(num), '', 1)
fixed = ''.join((str(num), suffix))
wrd = fixed
# capitalize first letter
wrd = wrd.title()
# replace interior conjunctions, prepositions,
# and articles with lowercase
if wrd.lower() in (conjunctions+little_preps+articles) and i != 1:
wrd = wrd.lower()
fixedwords.append(wrd)
lbl = ' '.join(fixedwords)
# print (label, '-->', lbl)
return lbl
def _process_phenotypicseries(self, limit):
"""
Creates classes from the OMIM phenotypic series list.
These are grouping classes to hook the more granular OMIM diseases.
# TEC what does 'hook' mean here?
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting phenotypic series titles")
model = Model(graph)
line_counter = 0
src_key = 'phenotypicSeries'
col = self.files[src_key]['columns']
raw = '/'.join((self.rawdir, self.files[src_key]['file']))
with open(raw) as reader:
line = reader.readline() # title
line = reader.readline() # date downloaded
line = reader.readline() # copyright
line = reader.readline() # <blank>
line = reader.readline().strip() # column headers
line_counter = 5
row = line.split('\t')
if not self.check_fileheader(col, row):
pass
for line in reader:
line_counter += 1
row = line.strip().split('\t')
if row and len(row) != len(col):
LOG.warning(
'Unexpected input on line: %i got: %s', line_counter, row)
continue
ps_label = row[col.index('Phenotypic Series Title')].strip()
ps_num = row[col.index('Phenotypic Series number')].strip()
omimps_curie = 'OMIMPS:' + ps_num
model.addClassToGraph(
omimps_curie, ps_label, class_category=blv.terms['Disease']
)
if not self.test_mode and limit is not None and line_counter > limit:
break
def _get_phenotypicseries_parents(self, entry, graph):
"""
Extract the phenotypic series parent relationship out of the entry
:param entry:
:return:
"""
model = Model(graph)
omim_num = str(entry['mimNumber'])
omim_curie = 'OMIM:' + omim_num
omimtype = self.omim_type[omim_num]
# the phenotypic series mappings
serieslist = []
if 'phenotypeMapList' in entry:
phenolist = entry['phenotypeMapList']
for phl in phenolist:
if 'phenotypicSeriesNumber' in phl['phenotypeMap']:
pns_lst = phl['phenotypeMap']['phenotypicSeriesNumber']
for pns in pns_lst.split(','):
serieslist.append(pns)
if 'geneMap' in entry and 'phenotypeMapList' in entry['geneMap']:
phenolist = entry['geneMap']['phenotypeMapList']
for phl in phenolist:
if 'phenotypicSeriesNumber' in phl['phenotypeMap']:
pns_lst = phl['phenotypeMap']['phenotypicSeriesNumber']
for pns in pns_lst.split(','):
serieslist.append(pns)
# add this omim entry as a subclass of the series entry
if serieslist:
LOG.info(
'%s is awarded %i optional PS superclasses!',
omim_curie, len(serieslist))
for phser in set(serieslist):
series_curie = 'OMIMPS:' + phser
model.addClassToGraph(
series_curie, None, class_category=blv.terms['Disease']
)
if omimtype in [
self.globaltt['gene'], self.globaltt['has_affected_feature']]:
model.addTriple(
omim_curie,
self.globaltt['contributes to condition'],
series_curie)
elif omimtype in [
self.globaltt['phenotype'],
self.globaltt['heritable_phenotypic_marker']
]:
model.addSubClass(
omim_curie,
series_curie,
child_category=blv.terms['Disease'],
parent_category=blv.terms['Disease']
)
else:
LOG.info('Unable to map type %s to phenotypic series', omimtype)
@staticmethod
def _get_mappedids(entry, graph):
"""
Extract the Orphanet and UMLS ids as equivalences from the entry
:param entry:
:return:
"""
model = Model(graph)
omim_num = str(entry['mimNumber'])
omim_curie = 'OMIM:' + omim_num
orpha_mappings = []
if 'externalLinks' in entry:
links = entry['externalLinks']
if 'orphanetDiseases' in links:
# triple semi-colon delimited list of
# double semi-colon delimited orphanet ID/disease pairs
# 2970;;566;;Prune belly syndrome
items = links['orphanetDiseases'].strip().split(';;;')
for item in items:
orphdis = item.strip().split(';;')
orpha_num = orphdis[0].strip()
orpha_label = orphdis[2].strip()
if orpha_num != 'None':
orpha_curie = 'ORPHA:' + orpha_num
orpha_mappings.append(orpha_curie)
model.addClassToGraph(
orpha_curie,
# orpha_label, # TODO till #969 is resolved
class_category=blv.terms['Disease']
)
model.addXref(
omim_curie,
orpha_curie,
class_category=blv.terms['Disease'],
xref_category=blv.terms['Disease']
)
if 'umlsIDs' in links:
umls_mappings = links['umlsIDs'].split(',')
for umls in umls_mappings:
umls_curie = 'UMLS:' + umls
model.addClassToGraph(umls_curie, None)
model.addXref(omim_curie, umls_curie)
def _get_mapped_gene_ids(self, entry, graph):
gene_ids = []
model = Model(graph)
omim_num = str(entry['mimNumber'])
omim_curie = 'OMIM:' + omim_num
if 'externalLinks' in entry:
links = entry['externalLinks']
omimtype = self.omim_type[omim_num]
if 'geneIDs' in links:
entrez_mappings = links['geneIDs']
gene_ids = entrez_mappings.split(',')
self.omim_ncbigene_idmap[omim_curie] = gene_ids
if omimtype in [
self.globaltt['gene'], self.globaltt['has_affected_feature']]:
for ncbi in gene_ids:
model.addEquivalentClass(omim_curie, 'NCBIGene:' + str(ncbi))
return gene_ids
def _get_alt_labels(self, titles):
"""
From a string of delimited titles, make an array.
This assumes that the titles are double-semicolon (';;') delimited.
This will additionally pass each through the _cleanup_label method to
convert the screaming ALL CAPS to something more pleasant to read.
:param titles:
:return: an array of cleaned-up labels
"""
labels = []
# "alternativeTitles": "
# ACROCEPHALOSYNDACTYLY, TYPE V; ACS5;;\nACS V;;\nNOACK SYNDROME",
# "includedTitles":
# "CRANIOFACIAL-SKELETAL-DERMATOLOGIC DYSPLASIA, INCLUDED"
for title in titles.split(';;'):
# remove ', included', if present
label = re.sub(r',\s*INCLUDED', '', title.strip(), re.IGNORECASE)
label = self._cleanup_label(label)
labels.append(label)
return labels
def _get_pubs(self, entry, graph):
"""
Extract mentioned publications from the reference list
:param entry:
:return:
"""
ref_to_pmid = {}
entry_num = entry['mimNumber']
if 'referenceList' in entry:
reflist = entry['referenceList']
for rlst in reflist:
if 'pubmedID' in rlst['reference']:
pub_id = 'PMID:' + str(rlst['reference']['pubmedID'])
ref = Reference(
graph, pub_id, self.globaltt['journal article'])
else:
# make blank node for internal reference
pub_id = '_:OMIM' + str(entry_num) + 'ref' + str(
rlst['reference']['referenceNumber'])
ref = Reference(graph, pub_id)
title = author_list = source = citation = None
if 'title' in rlst['reference']:
title = rlst['reference']['title']
ref.setTitle(title)
if 'authors' in rlst['reference']:
author_list = rlst['reference']['authors']
ref.setAuthorList(author_list)
citation = re.split(r'\.\,', author_list)[0] + ' et al'
if 'source' in rlst['reference']:
source = rlst['reference']['source']
citation = '; '.join(
[tok for tok in [citation, title, source] if tok is not None])
ref.setShortCitation(citation)
ref.addRefToGraph()
ref_to_pmid[rlst['reference']['referenceNumber']] = pub_id
# add is_about for the pub
omim_id = 'OMIM:' + str(entry_num)
graph.addTriple(omim_id, self.globaltt['mentions'], pub_id)
return ref_to_pmid
def _omim_type_2_biolink_category(self, entry_num):
if entry_num in self.omim_type:
if self.omim_type[entry_num] in [
self.globaltt['gene'],
self.globaltt['has_affected_feature']
]:
omim_id_category = blv.terms['Gene']
elif self.omim_type[entry_num] == self.globaltt['phenotype']:
omim_id_category = blv.terms['Disease']
elif self.omim_type[entry_num] == self.globaltt['heritable_phenotypic_marker']:
omim_id_category = blv.terms['GenomicEntity']
else:
omim_id_category = None
else:
omim_id_category = None
return omim_id_category
def getTestSuite(self):
# ''' this should find a home under /test , if it is needed'''
import unittest
# # TODO PYLINT Unable to import 'tests.test_omim'
from tests.test_omim import OMIMTestCase
#
test_suite = unittest.TestLoader().loadTestsFromTestCase(OMIMTestCase)
return test_suite
def get_omim_id_from_entry(entry):
if entry is not None and 'mimNumber' in entry:
omimid = 'OMIM:' + str(entry['mimNumber'])
else:
omimid = None
return omimid
| bsd-3-clause |
stchepanhagn/domain-learning | plan_learning.py | 1 | 2767 | """ plan_learning.py
- This module contain the procedure used for learning plans from experience.
Copyright (C) 2016 Stephan Chang
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program, located in the root of this repository.
If not, see <http://www.gnu.org/licenses/>.
"""
import pdb
import planning
import sys
import random
def main(args):
verbose = '-v' in args
n_arg = '-n' in args
try:
i = 1 + int(verbose)
examples_file = args[i]
domain_name = args[i+1]
except:
print "usage: {cmd} [-v] examples_file"\
" domain_name".format(cmd=args[0])
return
print """
PDDL Domain Learning Copyright (C) 2016 Stephan Chang
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
"""
examples = []
print "Parsing examples..."
with open(examples_file) as f:
line = f.readline().replace('\n', '')
while line:
triple = line.split('|')
example = (triple[0], triple[1], triple[2])
examples.append(example)
line = f.readline().replace('\n', '')
print "Done reading {n_examples} training examples!".format(n_examples=len(examples))
if not f.closed:
print "Warning: file stream is still open."
if n_arg:
n_examples = int(args[i+3])
else:
n_examples = len(examples)
print "Creating domain..."
domain = planning.Domain(domain_name)
# random.shuffle(examples)
for i in range(n_examples):
preconditions = examples[i][0].split(',')
operators = examples[i][1].split(',')
effects = examples[i][2].split(',')
domain.add_all_predicates(preconditions)
domain.add_all_predicates(effects)
domain.add_actions(operators, preconditions, effects)
print "Done!"
if verbose:
print str(domain)
else:
print "Outputting to file..."
output_file_name = "{domain_name}.pddl".format(domain_name=domain_name)
with open(output_file_name, 'w') as f:
f.write(str(domain))
print "Done!"
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 |
CVML/pybrain | pybrain/tools/customxml/networkwriter.py | 25 | 4150 | __author__ = 'Tom Schaul, tom@idsia.ch'
from inspect import isclass
from .handling import XMLHandling
from pybrain.structure.connections.shared import SharedConnection
from pybrain.structure.networks.network import Network
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain.utilities import canonicClassString
# TODO: higher precision on writing parameters
class NetworkWriter(XMLHandling):
""" A class that can take a network and write it to an XML file """
@staticmethod
def appendToFile(net, filename):
""" append the network to an existing xml file """
w = NetworkWriter(filename, newfile = False)
netroot = w.newRootNode('Network')
w.writeNetwork(net, netroot)
w.save()
@staticmethod
def writeToFile(net, filename):
""" write the network as a new xml file """
w = NetworkWriter(filename, newfile = True)
netroot = w.newRootNode('Network')
w.writeNetwork(net, netroot)
w.save()
def writeNetwork(self, net, netroot):
""" write a Network into a new XML node """
netroot.setAttribute('name', net.name)
netroot.setAttribute('class', canonicClassString(net))
if net.argdict:
self.writeArgs(netroot, net.argdict)
# the modules
mods = self.newChild(netroot, 'Modules')
# first write the input modules (in order)
for im in net.inmodules:
self.writeModule(mods, im, True, im in net.outmodules)
# now the output modules (in order)
for om in net.outmodules:
if om not in net.inmodules:
self.writeModule(mods, om, False, True)
# now the rest
for m in net.modulesSorted:
if m not in net.inmodules and m not in net.outmodules:
self.writeModule(mods, m, False, False)
# the motherconnections
if len(net.motherconnections) > 0:
mothers = self.newChild(netroot, 'MotherConnections')
for m in net.motherconnections:
self.writeBuildable(mothers, m)
# the connections
conns = self.newChild(netroot, 'Connections')
for m in net.modulesSorted:
for c in net.connections[m]:
self.writeConnection(conns, c, False)
if hasattr(net, "recurrentConns"):
for c in net.recurrentConns:
self.writeConnection(conns, c, True)
def writeModule(self, rootnode, m, inmodule, outmodule):
if isinstance(m, Network):
mnode = self.newChild(rootnode, 'Network')
self.writeNetwork(m, mnode)
else:
mnode = self.writeBuildable(rootnode, m)
if inmodule:
mnode.setAttribute('inmodule', 'True')
elif outmodule:
mnode.setAttribute('outmodule', 'True')
def writeConnection(self, rootnode, c, recurrent):
mnode = self.writeBuildable(rootnode, c)
if recurrent:
mnode.setAttribute('recurrent', 'True')
def writeBuildable(self, rootnode, m):
""" store the class (with path) and name in a new child. """
mname = m.__class__.__name__
mnode = self.newChild(rootnode, mname)
mnode.setAttribute('name', m.name)
mnode.setAttribute('class', canonicClassString(m))
if m.argdict:
self.writeArgs(mnode, m.argdict)
if m.paramdim > 0 and not isinstance(m, SharedConnection):
self.writeParams(mnode, m.params)
return mnode
def writeArgs(self, node, argdict):
""" write a dictionnary of arguments """
for name, val in list(argdict.items()):
if val != None:
tmp = self.newChild(node, name)
if isclass(val):
s = canonicClassString(val)
else:
s = getattr(val, 'name', repr(val))
tmp.setAttribute('val', s)
def writeParams(self, node, params):
# TODO: might be insufficient precision
pnode = self.newChild(node, 'Parameters')
self.addTextNode(pnode, str(list(params)))
| bsd-3-clause |
Rawk/xbmc | tools/EventClients/Clients/PS3 Sixaxis Controller/ps3d.py | 168 | 12019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import traceback
import time
import struct
import threading
import os
if os.path.exists("../../lib/python"):
sys.path.append("../PS3 BD Remote")
sys.path.append("../../lib/python")
from bt.hid import HID
from bt.bt import bt_lookup_name
from xbmcclient import XBMCClient
from ps3 import sixaxis
from ps3_remote import process_keys as process_remote
try:
from ps3 import sixwatch
except Exception, e:
print "Failed to import sixwatch now disabled: " + str(e)
sixwatch = None
try:
import zeroconf
except:
zeroconf = None
ICON_PATH = "../../icons/"
else:
# fallback to system wide modules
from kodi.bt.hid import HID
from kodi.bt.bt import bt_lookup_name
from kodi.xbmcclient import XBMCClient
from kodi.ps3 import sixaxis
from kodi.ps3_remote import process_keys as process_remote
from kodi.defs import *
try:
from kodi.ps3 import sixwatch
except Exception, e:
print "Failed to import sixwatch now disabled: " + str(e)
sixwatch = None
try:
import kodi.zeroconf as zeroconf
except:
zeroconf = None
event_threads = []
def printerr():
trace = ""
exception = ""
exc_list = traceback.format_exception_only (sys.exc_type, sys.exc_value)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
print("%s\n%s" % (exception, trace), "Script Error")
class StoppableThread ( threading.Thread ):
def __init__(self):
threading.Thread.__init__(self)
self._stop = False
self.set_timeout(0)
def stop_thread(self):
self._stop = True
def stop(self):
return self._stop
def close_sockets(self):
if self.isock:
try:
self.isock.close()
except:
pass
self.isock = None
if self.csock:
try:
self.csock.close()
except:
pass
self.csock = None
self.last_action = 0
def set_timeout(self, seconds):
self.timeout = seconds
def reset_timeout(self):
self.last_action = time.time()
def idle_time(self):
return time.time() - self.last_action
def timed_out(self):
if (time.time() - self.last_action) > self.timeout:
return True
else:
return False
class PS3SixaxisThread ( StoppableThread ):
def __init__(self, csock, isock, ipaddr="127.0.0.1"):
StoppableThread.__init__(self)
self.csock = csock
self.isock = isock
self.xbmc = XBMCClient(name="PS3 Sixaxis", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr)
self.set_timeout(600)
def run(self):
six = sixaxis.sixaxis(self.xbmc, self.csock, self.isock)
self.xbmc.connect()
self.reset_timeout()
try:
while not self.stop():
if self.timed_out():
raise Exception("PS3 Sixaxis powering off, timed out")
if self.idle_time() > 50:
self.xbmc.connect()
try:
if six.process_socket(self.isock):
self.reset_timeout()
except Exception, e:
print e
break
except Exception, e:
printerr()
six.close()
self.close_sockets()
class PS3RemoteThread ( StoppableThread ):
def __init__(self, csock, isock, ipaddr="127.0.0.1"):
StoppableThread.__init__(self)
self.csock = csock
self.isock = isock
self.xbmc = XBMCClient(name="PS3 Blu-Ray Remote", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr)
self.set_timeout(600)
self.services = []
self.current_xbmc = 0
def run(self):
self.xbmc.connect()
try:
# start the zeroconf thread if possible
try:
self.zeroconf_thread = ZeroconfThread()
self.zeroconf_thread.add_service('_xbmc-events._udp',
self.zeroconf_service_handler)
self.zeroconf_thread.start()
except Exception, e:
print str(e)
# main thread loop
while not self.stop():
status = process_remote(self.isock, self.xbmc)
if status == 2: # 2 = socket read timeout
if self.timed_out():
raise Exception("PS3 Blu-Ray Remote powering off, "\
"timed out")
elif status == 3: # 3 = ps and skip +
self.next_xbmc()
elif status == 4: # 4 = ps and skip -
self.previous_xbmc()
elif not status: # 0 = keys are normally processed
self.reset_timeout()
# process_remote() will raise an exception on read errors
except Exception, e:
print str(e)
self.zeroconf_thread.stop()
self.close_sockets()
def next_xbmc(self):
"""
Connect to the next XBMC instance
"""
self.current_xbmc = (self.current_xbmc + 1) % len( self.services )
self.reconnect()
return
def previous_xbmc(self):
"""
Connect to the previous XBMC instance
"""
self.current_xbmc -= 1
if self.current_xbmc < 0 :
self.current_xbmc = len( self.services ) - 1
self.reconnect()
return
def reconnect(self):
"""
Reconnect to an XBMC instance based on self.current_xbmc
"""
try:
service = self.services[ self.current_xbmc ]
print "Connecting to %s" % service['name']
self.xbmc.connect( service['address'], service['port'] )
self.xbmc.send_notification("PS3 Blu-Ray Remote", "New Connection", None)
except Exception, e:
print str(e)
def zeroconf_service_handler(self, event, service):
"""
Zeroconf event handler
"""
if event == zeroconf.SERVICE_FOUND: # new xbmc service detected
self.services.append( service )
elif event == zeroconf.SERVICE_LOST: # xbmc service lost
try:
# search for the service by name, since IP+port isn't available
for s in self.services:
# nuke it, if found
if service['name'] == s['name']:
self.services.remove(s)
break
except:
pass
return
class SixWatch(threading.Thread):
def __init__(self, mac):
threading.Thread.__init__(self)
self.mac = mac
self.daemon = True
self.start()
def run(self):
while True:
try:
sixwatch.main(self.mac)
except Exception, e:
print "Exception caught in sixwatch, restarting: " + str(e)
class ZeroconfThread ( threading.Thread ):
"""
"""
def __init__(self):
threading.Thread.__init__(self)
self._zbrowser = None
self._services = []
def run(self):
if zeroconf:
# create zeroconf service browser
self._zbrowser = zeroconf.Browser()
# add the requested services
for service in self._services:
self._zbrowser.add_service( service[0], service[1] )
# run the event loop
self._zbrowser.run()
return
def stop(self):
"""
Stop the zeroconf browser
"""
try:
self._zbrowser.stop()
except:
pass
return
def add_service(self, type, handler):
"""
Add a new service to search for.
NOTE: Services must be added before thread starts.
"""
self._services.append( [ type, handler ] )
def usage():
print """
PS3 Sixaxis / Blu-Ray Remote HID Server v0.1
Usage: ps3.py [bdaddress] [XBMC host]
bdaddress => address of local bluetooth device to use (default: auto)
(e.g. aa:bb:cc:dd:ee:ff)
ip address => IP address or hostname of the XBMC instance (default: localhost)
(e.g. 192.168.1.110)
"""
def start_hidd(bdaddr=None, ipaddr="127.0.0.1"):
devices = [ 'PLAYSTATION(R)3 Controller',
'BD Remote Control' ]
hid = HID(bdaddr)
watch = None
if sixwatch:
try:
print "Starting USB sixwatch"
watch = SixWatch(hid.get_local_address())
except Exception, e:
print "Failed to initialize sixwatch" + str(e)
pass
while True:
if hid.listen():
(csock, addr) = hid.get_control_socket()
device_name = bt_lookup_name(addr[0])
if device_name == devices[0]:
# handle PS3 controller
handle_ps3_controller(hid, ipaddr)
elif device_name == devices[1]:
# handle the PS3 remote
handle_ps3_remote(hid, ipaddr)
else:
print "Unknown Device: %s" % (device_name)
def handle_ps3_controller(hid, ipaddr):
print "Received connection from a Sixaxis PS3 Controller"
csock = hid.get_control_socket()[0]
isock = hid.get_interrupt_socket()[0]
sixaxis = PS3SixaxisThread(csock, isock, ipaddr)
add_thread(sixaxis)
sixaxis.start()
return
def handle_ps3_remote(hid, ipaddr):
print "Received connection from a PS3 Blu-Ray Remote"
csock = hid.get_control_socket()[0]
isock = hid.get_interrupt_socket()[0]
isock.settimeout(1)
remote = PS3RemoteThread(csock, isock, ipaddr)
add_thread(remote)
remote.start()
return
def add_thread(thread):
global event_threads
event_threads.append(thread)
def main():
if len(sys.argv)>3:
return usage()
bdaddr = ""
ipaddr = "127.0.0.1"
try:
for addr in sys.argv[1:]:
try:
# ensure that the addr is of the format 'aa:bb:cc:dd:ee:ff'
if "".join([ str(len(a)) for a in addr.split(":") ]) != "222222":
raise Exception("Invalid format")
bdaddr = addr
print "Connecting to Bluetooth device: %s" % bdaddr
except Exception, e:
try:
ipaddr = addr
print "Connecting to : %s" % ipaddr
except:
print str(e)
return usage()
except Exception, e:
pass
print "Starting HID daemon"
start_hidd(bdaddr, ipaddr)
if __name__=="__main__":
try:
main()
finally:
for t in event_threads:
try:
print "Waiting for thread "+str(t)+" to terminate"
t.stop_thread()
if t.isAlive():
t.join()
print "Thread "+str(t)+" terminated"
except Exception, e:
print str(e)
pass
| gpl-2.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/network/nso/nso_query.py | 69 | 3295 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
---
module: nso_query
extends_documentation_fragment: nso
short_description: Query data from Cisco NSO.
description:
- This module provides support for querying data from Cisco NSO using XPath.
requirements:
- Cisco NSO version 3.4 or higher.
author: "Claes Nästén (@cnasten)"
options:
xpath:
description: XPath selection relative to the root.
required: true
fields:
description: >
List of fields to select from matching nodes.
required: true
version_added: "2.5"
'''
EXAMPLES = '''
- name: Select device name and description
nso_query:
url: http://localhost:8080/jsonrpc
username: username
password: password
xpath: /ncs:devices/device
fields:
- name
- description
'''
RETURN = '''
output:
description: Value of matching nodes
returned: success
type: list
'''
from ansible.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec
from ansible.module_utils.network.nso.nso import ModuleFailException, NsoException
from ansible.module_utils.basic import AnsibleModule
class NsoQuery(object):
REQUIRED_VERSIONS = [
(3, 4)
]
def __init__(self, check_mode, client, xpath, fields):
self._check_mode = check_mode
self._client = client
self._xpath = xpath
self._fields = fields
def main(self):
if self._check_mode:
return []
else:
return self._client.query(self._xpath, self._fields)
def main():
argument_spec = dict(
xpath=dict(required=True, type='str'),
fields=dict(required=True, type='list')
)
argument_spec.update(nso_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
p = module.params
client = connect(p)
nso_query = NsoQuery(
module.check_mode, client,
p['xpath'], p['fields'])
try:
verify_version(client, NsoQuery.REQUIRED_VERSIONS)
output = nso_query.main()
client.logout()
module.exit_json(changed=False, output=output)
except NsoException as ex:
client.logout()
module.fail_json(msg=ex.message)
except ModuleFailException as ex:
client.logout()
module.fail_json(msg=ex.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
tedder/ansible | test/units/parsing/test_unquote.py | 298 | 1602 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.quoting import unquote
import pytest
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
@pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA)
def test_unquote(quoted, expected):
assert unquote(quoted) == expected
| gpl-3.0 |
dcy/epush | examples/rabbitmq/xiaomi.py | 1 | 1181 | #!/usr/bin/env python
#coding:utf-8
import pika
import json
HOST = 'localhost'
USERNAME = 'hisir'
PASSWORD = 'hisir123'
class Xiaomi():
def __init__(self):
credentials = pika.PlainCredentials(USERNAME, PASSWORD)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST, credentials=credentials))
self.channel = self.connection.channel()
def notification_send(self):
data = {'push_method': 'notification_send',
'title': 'Test 中文',
'description': 'Content',
'registration_id': 'go6VssZlTDDypm+hxYdaxycXtqM7M9NsTPbCjzyIyh0='}
self.in_mq(data)
def all(self):
data = {'push_method':'all',
'title':'Test中文',
'description':'Test'}
self.in_mq(data)
def end(self):
self.channel.close()
self.connection.close()
def in_mq(self, data):
self.channel.basic_publish(exchange='',
routing_key='xiaomi_c',
body=json.dumps(data))
if __name__ == "__main__":
xiaomi = Xiaomi()
xiaomi.notification_send()
#xiaomi.all()
xiaomi.end()
| bsd-3-clause |
yeming233/horizon | openstack_dashboard/test/integration_tests/config.py | 4 | 6346 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/',
help="Where the dashboard can be found"),
cfg.StrOpt('help_url',
default='http://docs.openstack.org/',
help="Dashboard help page url"),
]
IdentityGroup = [
cfg.StrOpt('username',
default='demo',
help="Username to use for non-admin API requests."),
cfg.StrOpt('password',
default='secretadmin',
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('home_project',
default='demo',
help="Project to keep all objects belonging to a regular user."
),
cfg.StrOpt('admin_username',
default='admin',
help="Administrative Username to use for admin API "
"requests."),
cfg.StrOpt('admin_password',
default='secretadmin',
help="API key to use when authenticating as admin.",
secret=True),
cfg.StrOpt('admin_home_project',
default='admin',
help="Project to keep all objects belonging to an admin user."),
cfg.StrOpt('default_keystone_role',
default='Member',
help="Name of default role every user gets in his new project"),
cfg.StrOpt('default_keystone_admin_role',
default='admin',
help="Name of the role that grants admin rights to a user in "
"his project"),
]
ImageGroup = [
cfg.StrOpt('panel_type',
default='legacy',
help='type/version of images panel'),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image'),
cfg.ListOpt('images_list',
default=['cirros-0.3.4-x86_64-uec',
'cirros-0.3.4-x86_64-uec-kernel',
'cirros-0.3.4-x86_64-uec-ramdisk'],
help='default list of images')
]
NetworkGroup = [
cfg.StrOpt('network_cidr',
default='10.100.0.0/16',
help='The cidr block to allocate tenant ipv4 subnets from'),
]
AvailableServiceGroup = [
cfg.BoolOpt('neutron',
default=True),
cfg.BoolOpt('heat',
default=True),
]
SeleniumGroup = [
cfg.IntOpt('implicit_wait',
default=10,
help="Implicit wait timeout in seconds"),
cfg.IntOpt('explicit_wait',
default=300,
help="Explicit wait timeout in seconds"),
cfg.IntOpt('page_timeout',
default=30,
help="Page load timeout in seconds"),
cfg.StrOpt('screenshots_directory',
default="integration_tests_screenshots",
help="Output screenshot directory"),
cfg.BoolOpt('maximize_browser',
default=True,
help="Is the browser size maximized for each test?"),
]
FlavorsGroup = [
cfg.StrOpt('panel_type',
default='legacy',
help='type/version of flavors panel'),
]
ScenarioGroup = [
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for image file'),
]
InstancesGroup = [
cfg.StrOpt('available_zone',
default='nova',
help="Zone to be selected for launch Instances"),
cfg.StrOpt('image_name',
default='cirros-0.3.4-x86_64-uec (24.0 MB)',
help="Boot Source to be selected for launch Instances"),
cfg.StrOpt('flavor',
default='m1.tiny',
help="Flavor to be selected for launch Instances"),
]
VolumeGroup = [
cfg.StrOpt('volume_type',
default='lvmdriver-1',
help='Default volume type'),
cfg.StrOpt('volume_size',
default='1',
help='Default volume size ')
]
PluginGroup = [
cfg.BoolOpt('is_plugin',
default='False',
help="Set to true if this is a plugin"),
cfg.MultiStrOpt('plugin_page_path',
default='',
help='Additional path to look for plugin page content'),
cfg.MultiStrOpt('plugin_page_structure',
default='')
]
def _get_config_files():
conf_dir = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'integration_tests')
conf_file = os.environ.get('HORIZON_INTEGRATION_TESTS_CONFIG_FILE',
"%s/horizon.conf" % conf_dir)
config_files = [conf_file]
local_config = os.environ.get('HORIZON_INTEGRATION_TESTS_LOCAL_CONFIG',
"%s/local-horizon.conf" % conf_dir)
if os.path.isfile(local_config):
config_files.append(local_config)
return config_files
def get_config():
cfg.CONF([], project='horizon', default_config_files=_get_config_files())
cfg.CONF.register_opts(DashboardGroup, group="dashboard")
cfg.CONF.register_opts(IdentityGroup, group="identity")
cfg.CONF.register_opts(NetworkGroup, group="network")
cfg.CONF.register_opts(AvailableServiceGroup, group="service_available")
cfg.CONF.register_opts(SeleniumGroup, group="selenium")
cfg.CONF.register_opts(FlavorsGroup, group="flavors")
cfg.CONF.register_opts(ImageGroup, group="image")
cfg.CONF.register_opts(ScenarioGroup, group="scenario")
cfg.CONF.register_opts(InstancesGroup, group="launch_instances")
cfg.CONF.register_opts(PluginGroup, group="plugin")
cfg.CONF.register_opts(VolumeGroup, group="volume")
return cfg.CONF
| apache-2.0 |
dominikl/bioformats | components/xsd-fu/python/genshi/template/tests/plugin.py | 24 | 9862 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# Copyright (C) 2006 Matthew Good
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import unittest
from genshi.core import Stream
from genshi.output import DocType
from genshi.template import MarkupTemplate, TextTemplate, NewTextTemplate
from genshi.template.plugin import ConfigurationError, \
MarkupTemplateEnginePlugin, \
TextTemplateEnginePlugin
PACKAGE = 'genshi.template.tests'
class MarkupTemplateEnginePluginTestCase(unittest.TestCase):
def test_init_no_options(self):
plugin = MarkupTemplateEnginePlugin()
self.assertEqual(None, plugin.default_encoding)
self.assertEqual('html', plugin.default_format)
self.assertEqual(None, plugin.default_doctype)
self.assertEqual([], plugin.loader.search_path)
self.assertEqual(True, plugin.loader.auto_reload)
self.assertEqual(25, plugin.loader._cache.capacity)
def test_init_with_loader_options(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.auto_reload': 'off',
'genshi.max_cache_size': '100',
'genshi.search_path': '/usr/share/tmpl:/usr/local/share/tmpl',
})
self.assertEqual(['/usr/share/tmpl', '/usr/local/share/tmpl'],
plugin.loader.search_path)
self.assertEqual(False, plugin.loader.auto_reload)
self.assertEqual(100, plugin.loader._cache.capacity)
def test_init_with_invalid_cache_size(self):
self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin,
options={'genshi.max_cache_size': 'thirty'})
def test_init_with_output_options(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.default_encoding': 'iso-8859-15',
'genshi.default_format': 'xhtml',
'genshi.default_doctype': 'xhtml-strict',
})
self.assertEqual('iso-8859-15', plugin.default_encoding)
self.assertEqual('xhtml', plugin.default_format)
self.assertEqual(DocType.XHTML, plugin.default_doctype)
def test_init_with_invalid_output_format(self):
self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin,
options={'genshi.default_format': 'foobar'})
def test_init_with_invalid_doctype(self):
self.assertRaises(ConfigurationError, MarkupTemplateEnginePlugin,
options={'genshi.default_doctype': 'foobar'})
def test_load_template_from_file(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
self.assertEqual('test.html', os.path.basename(tmpl.filename))
assert isinstance(tmpl, MarkupTemplate)
def test_load_template_from_string(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(None, template_string="""<p>
$message
</p>""")
self.assertEqual(None, tmpl.filename)
assert isinstance(tmpl, MarkupTemplate)
def test_transform_with_load(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
stream = plugin.transform({'message': 'Hello'}, tmpl)
assert isinstance(stream, Stream)
def test_transform_without_load(self):
plugin = MarkupTemplateEnginePlugin()
stream = plugin.transform({'message': 'Hello'},
PACKAGE + '.templates.test')
assert isinstance(stream, Stream)
def test_render(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, template=tmpl)
self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_render_with_format(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, format='xhtml',
template=tmpl)
self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_render_with_doctype(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.default_doctype': 'html-strict',
})
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, template=tmpl)
self.assertEqual("""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_render_fragment_with_doctype(self):
plugin = MarkupTemplateEnginePlugin(options={
'genshi.default_doctype': 'html-strict',
})
tmpl = plugin.load_template(PACKAGE + '.templates.test_no_doctype')
output = plugin.render({'message': 'Hello'}, template=tmpl,
fragment=True)
self.assertEqual("""<html lang="en">
<head>
<title>Test</title>
</head>
<body>
<h1>Test</h1>
<p>Hello</p>
</body>
</html>""", output)
def test_helper_functions(self):
plugin = MarkupTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.functions')
output = plugin.render({'snippet': u'<b>Foo</b>'}, template=tmpl)
self.assertEqual("""<div>
False
bar
<b>Foo</b>
<b>Foo</b>
</div>""", output)
class TextTemplateEnginePluginTestCase(unittest.TestCase):
def test_init_no_options(self):
plugin = TextTemplateEnginePlugin()
self.assertEqual(None, plugin.default_encoding)
self.assertEqual('text', plugin.default_format)
self.assertEqual([], plugin.loader.search_path)
self.assertEqual(True, plugin.loader.auto_reload)
self.assertEqual(25, plugin.loader._cache.capacity)
def test_init_with_loader_options(self):
plugin = TextTemplateEnginePlugin(options={
'genshi.auto_reload': 'off',
'genshi.max_cache_size': '100',
'genshi.search_path': '/usr/share/tmpl:/usr/local/share/tmpl',
})
self.assertEqual(['/usr/share/tmpl', '/usr/local/share/tmpl'],
plugin.loader.search_path)
self.assertEqual(False, plugin.loader.auto_reload)
self.assertEqual(100, plugin.loader._cache.capacity)
def test_init_with_output_options(self):
plugin = TextTemplateEnginePlugin(options={
'genshi.default_encoding': 'iso-8859-15',
})
self.assertEqual('iso-8859-15', plugin.default_encoding)
def test_init_with_new_syntax(self):
plugin = TextTemplateEnginePlugin(options={
'genshi.new_text_syntax': 'yes',
})
self.assertEqual(NewTextTemplate, plugin.template_class)
tmpl = plugin.load_template(PACKAGE + '.templates.new_syntax')
output = plugin.render({'foo': True}, template=tmpl)
self.assertEqual('bar', output)
def test_load_template_from_file(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
assert isinstance(tmpl, TextTemplate)
self.assertEqual('test.txt', os.path.basename(tmpl.filename))
def test_load_template_from_string(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(None, template_string="$message")
self.assertEqual(None, tmpl.filename)
assert isinstance(tmpl, TextTemplate)
def test_transform_without_load(self):
plugin = TextTemplateEnginePlugin()
stream = plugin.transform({'message': 'Hello'},
PACKAGE + '.templates.test')
assert isinstance(stream, Stream)
def test_transform_with_load(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
stream = plugin.transform({'message': 'Hello'}, tmpl)
assert isinstance(stream, Stream)
def test_render(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.test')
output = plugin.render({'message': 'Hello'}, template=tmpl)
self.assertEqual("""Test
====
Hello
""", output)
def test_helper_functions(self):
plugin = TextTemplateEnginePlugin()
tmpl = plugin.load_template(PACKAGE + '.templates.functions')
output = plugin.render({}, template=tmpl)
self.assertEqual("""False
bar
""", output)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTemplateEnginePluginTestCase, 'test'))
suite.addTest(unittest.makeSuite(TextTemplateEnginePluginTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-2.0 |
czgu/opendataexperience | env/lib/python2.7/site-packages/django/contrib/sessions/backends/base.py | 61 | 11249 | from __future__ import unicode_literals
import base64
from datetime import datetime, timedelta
import logging
import string
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.crypto import constant_time_compare
from django.utils.crypto import get_random_string
from django.utils.crypto import salted_hmac
from django.utils import timezone
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
from django.contrib.sessions.exceptions import SuspiciousSession
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _get_session_key(self):
return self._session_key
session_key = property(_get_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self.create()
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| apache-2.0 |
morphis/home-assistant | homeassistant/helpers/config_validation.py | 10 | 16175 | """Helpers for config validation using voluptuous."""
from collections import OrderedDict
from datetime import timedelta, datetime as datetime_sys
import os
import re
from urllib.parse import urlparse
from socket import _GLOBAL_DEFAULT_TIMEOUT
from typing import Any, Union, TypeVar, Callable, Sequence, Dict
import voluptuous as vol
from homeassistant.loader import get_platform
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, CONF_TIMEOUT, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC)
from homeassistant.core import valid_entity_id
from homeassistant.exceptions import TemplateError
import homeassistant.util.dt as dt_util
from homeassistant.util import slugify as util_slugify
from homeassistant.helpers import template as template_helper
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
T = TypeVar('T')
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: str) -> Callable:
"""Validator that at least one key exists."""
def validate(obj: Dict) -> Dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isdevice(value):
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError:
raise vol.Invalid('No device at {} found'.format(value))
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid('None is not file')
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid('not a file')
if not os.access(file_in, os.R_OK):
raise vol.Invalid('file not readable')
return file_in
def ensure_list(value: Union[T, Sequence[T]]) -> Sequence[T]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return value if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value: Union[str, Sequence]) -> Sequence[str]:
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
def enum(enumClass):
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value):
"""Validate icon."""
value = str(value)
if value.startswith('mdi:'):
return value
raise vol.Invalid('Icons should start with prefix "mdi:"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time_period_str(value: str) -> timedelta:
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
elif not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
def time_period_seconds(value: Union[int, str]) -> timedelta:
"""Validate and transform seconds to a time offset."""
try:
return timedelta(seconds=int(value))
except (ValueError, TypeError):
raise vol.Invalid('Expected seconds, got {}'.format(value))
time_period = vol.Any(time_period_str, time_period_seconds, timedelta,
time_period_dict)
def match_all(value):
"""Validator that matches all values."""
return value
def platform_validator(domain):
"""Validate if platform exists for given domain."""
def validator(value):
"""Test if platform exists."""
if value is None:
raise vol.Invalid('platform cannot be None')
if get_platform(domain, str(value)):
return value
raise vol.Invalid(
'platform {} does not exist for {}'.format(value, domain))
return validator
def positive_timedelta(value: timedelta) -> timedelta:
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def slug(value):
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = util_slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def slugify(value):
"""Coerce a value to a slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
slg = util_slugify(str(value))
if len(slg) > 0:
return slg
raise vol.Invalid('Unable to slugify {}'.format(value))
def string(value: Any) -> str:
"""Coerce value to string, except for None."""
if value is not None:
return str(value)
raise vol.Invalid('string value is None')
def temperature_unit(value) -> str:
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
elif value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
unit_system = vol.All(vol.Lower, vol.Any(CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL))
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
elif isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid('template value should be a string')
value = template_helper.Template(str(value))
try:
value.ensure_valid()
return value
except TemplateError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def template_complex(value):
"""Validate a complex jinja2 template."""
if isinstance(value, list):
for idx, element in enumerate(value):
value[idx] = template_complex(element)
return value
if isinstance(value, dict):
for key, element in value.items():
value[key] = template_complex(element)
return value
return template(value)
def time(value):
"""Validate time."""
time_val = dt_util.parse_time(value)
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def datetime(value):
"""Validate datetime."""
if isinstance(value, datetime_sys):
return value
try:
date_val = dt_util.parse_datetime(value)
except TypeError:
date_val = None
if date_val is None:
raise vol.Invalid('Invalid datetime specified: {}'.format(value))
return date_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
def socket_timeout(value):
"""Validate timeout float > 0.0.
None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.
"""
if value is None:
return _GLOBAL_DEFAULT_TIMEOUT
else:
try:
float_value = float(value)
if float_value > 0.0:
return float_value
raise vol.Invalid('Invalid socket timeout value.'
' float > 0.0 required.')
except Exception as _:
raise vol.Invalid('Invalid socket timeout: {err}'.format(err=_))
# pylint: disable=no-value-for-parameter
def url(value: Any) -> str:
"""Validate an URL."""
url_in = str(value)
if urlparse(url_in).scheme in ['http', 'https']:
return vol.Schema(vol.Url())(url_in)
raise vol.Invalid('invalid url')
def x10_address(value):
"""Validate an x10 address."""
regex = re.compile(r'([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$')
if not regex.match(value):
raise vol.Invalid('Invalid X10 Address')
return str(value).lower()
def ordered_dict(value_validator, key_validator=match_all):
"""Validate an ordered dict validator that maintains ordering.
value_validator will be applied to each value of the dictionary.
key_validator (optional) will be applied to each key of the dictionary.
"""
item_validator = vol.Schema({key_validator: value_validator})
def validator(value):
"""Validate ordered dict."""
config = OrderedDict()
if not isinstance(value, dict):
raise vol.Invalid('Value {} is not a dictionary'.format(value))
for key, val in value.items():
v_res = item_validator({key: val})
config.update(v_res)
return config
return validator
def ensure_list_csv(value: Any) -> Sequence:
"""Ensure that input is a list or make one from comma-separated string."""
if isinstance(value, str):
return [member.strip() for member in value.split(',')]
return ensure_list(value)
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): string,
vol.Optional(CONF_SCAN_INTERVAL): time_period
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template_complex},
vol.Optional(CONF_ENTITY_ID): entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any('sunset', 'sunrise')),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.Any(
vol.All(time_period, positive_timedelta),
template)
})
_SCRIPT_WAIT_TEMPLATE_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("wait_template"): template,
vol.Optional(CONF_TIMEOUT): vol.All(time_period, positive_timedelta),
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA,
_SCRIPT_WAIT_TEMPLATE_SCHEMA, EVENT_SCHEMA, CONDITION_SCHEMA)],
)
| apache-2.0 |
zack3241/incubator-airflow | airflow/contrib/hooks/vertica_hook.py | 60 | 1381 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from vertica_python import connect
from airflow.hooks.dbapi_hook import DbApiHook
class VerticaHook(DbApiHook):
'''
Interact with Vertica.
'''
conn_name_attr = 'vertica_conn_id'
default_conn_name = 'vertica_default'
supports_autocommit = True
def get_conn(self):
"""
Returns verticaql connection object
"""
conn = self.get_connection(self.vertica_conn_id)
conn_config = {
"user": conn.login,
"password": conn.password or '',
"database": conn.schema,
}
conn_config["host"] = conn.host or 'localhost'
if not conn.port:
conn_config["port"] = 5433
else:
conn_config["port"] = int(conn.port)
conn = connect(**conn_config)
return conn
| apache-2.0 |
jeremiahmarks/sl4a | python/src/Lib/test/test_fractions.py | 55 | 16373 | """Tests for Lib/fractions.py."""
from decimal import Decimal
from test.test_support import run_unittest
import math
import operator
import fractions
import unittest
from copy import copy, deepcopy
from cPickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
class GcdTest(unittest.TestCase):
def testMisc(self):
self.assertEquals(0, gcd(0, 0))
self.assertEquals(1, gcd(1, 0))
self.assertEquals(-1, gcd(-1, 0))
self.assertEquals(1, gcd(0, 1))
self.assertEquals(-1, gcd(0, -1))
self.assertEquals(1, gcd(7, 1))
self.assertEquals(-1, gcd(7, -1))
self.assertEquals(1, gcd(-23, 15))
self.assertEquals(12, gcd(120, 84))
self.assertEquals(-12, gcd(84, -120))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEquals(type(expected), type(actual))
self.assertEquals(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
"""Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type, e:
self.assertEquals(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEquals((0, 1), _components(F()))
self.assertEquals((7, 1), _components(F(7)))
self.assertEquals((7, 3), _components(F(F(7, 3))))
self.assertEquals((-1, 1), _components(F(-1, 1)))
self.assertEquals((-1, 1), _components(F(1, -1)))
self.assertEquals((1, 1), _components(F(-2, -2)))
self.assertEquals((1, 2), _components(F(5, 10)))
self.assertEquals((7, 15), _components(F(7, 15)))
self.assertEquals((10**23, 1), _components(F(10**23)))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, F(1, 2), 3)
self.assertRaises(TypeError, F, "3/2", 3)
def testFromString(self):
self.assertEquals((5, 1), _components(F("5")))
self.assertEquals((3, 2), _components(F("3/2")))
self.assertEquals((3, 2), _components(F(" \n +3/2")))
self.assertEquals((-3, 2), _components(F("-3/2 ")))
self.assertEquals((13, 2), _components(F(" 013/02 \n ")))
self.assertEquals((13, 2), _components(F(u" 013/02 \n ")))
self.assertEquals((16, 5), _components(F(" 3.2 ")))
self.assertEquals((-16, 5), _components(F(u" -3.2 ")))
self.assertEquals((-3, 1), _components(F(u" -3. ")))
self.assertEquals((3, 5), _components(F(u" .6 ")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
# Imitate float's parsing.
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Only parse ordinary decimals, not scientific form.
ValueError, "Invalid literal for Fraction: '3.2e4'",
F, "3.2e4")
self.assertRaisesMessage(
# Don't accept combinations of decimals and fractions.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and fractions.
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEquals((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEquals((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEquals((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEquals(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEquals((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEquals((bigint, 1), _components(F.from_float(bigint)))
self.assertEquals((0, 1), _components(F.from_float(-0.0)))
self.assertEquals((10, 1), _components(F.from_float(10.0)))
self.assertEquals((-5, 2), _components(F.from_float(-2.5)))
self.assertEquals((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEquals(float(10**23), float(F.from_float(1e23)))
self.assertEquals((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEquals(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
self.assertRaisesMessage(
TypeError, "Cannot convert inf to Fraction.",
F.from_float, inf)
self.assertRaisesMessage(
TypeError, "Cannot convert -inf to Fraction.",
F.from_float, -inf)
self.assertRaisesMessage(
TypeError, "Cannot convert nan to Fraction.",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEquals(F(10, 1), F.from_decimal(10))
self.assertEquals(F(0), F.from_decimal(Decimal("-0")))
self.assertEquals(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEquals(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEquals(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEquals(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
self.assertRaisesMessage(
TypeError, "Cannot convert Infinity to Fraction.",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
TypeError, "Cannot convert -Infinity to Fraction.",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
TypeError, "Cannot convert NaN to Fraction.",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
TypeError, "Cannot convert sNaN to Fraction.",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertEquals(False, bool(F(0, 1)))
self.assertEquals(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
# numerator and denominator to float before dividing.
self.assertRaises(OverflowError, float, long('2'*400+'7'))
self.assertAlmostEquals(2.0/3,
float(F(long('2'*400+'7'), long('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testArithmetic(self):
self.assertEquals(F(1, 2), F(1, 10) + F(2, 5))
self.assertEquals(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEquals(F(1, 25), F(1, 10) * F(2, 5))
self.assertEquals(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEquals(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEquals(F(8, 27), F(2, 3) ** F(3))
self.assertEquals(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
# Will return 1j in 3.0:
self.assertRaises(ValueError, pow, F(-1), F(1, 2))
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0.0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10.0, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# No need for divmod since we don't override it.
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
# Will return 1j in 3.0:
self.assertRaises(ValueError, pow, (-1), F(1, 2))
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
def testMixingWithDecimal(self):
# Decimal refuses mixed comparisons.
self.assertRaisesMessage(
TypeError,
"unsupported operand type(s) for +: 'Fraction' and 'Decimal'",
operator.add, F(3,11), Decimal('3.1415926'))
self.assertNotEquals(F(5, 2), Decimal('2.5'))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
# The first test demonstrates why these are important.
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertTrue(F(10**23) > complex(10**23))
self.assertFalse(F(10**23) <= complex(10**23))
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
def testStringification(self):
self.assertEquals("Fraction(7, 3)", repr(F(7, 3)))
self.assertEquals("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEquals("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEquals("7/3", str(F(7, 3)))
self.assertEquals("7", str(F(7, 1)))
def testHash(self):
self.assertEquals(hash(2.5), hash(F(5, 2)))
self.assertEquals(hash(10**50), hash(F(10**50)))
self.assertNotEquals(hash(float(10**23)), hash(F(10**23)))
def testApproximatePi(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEquals(math.pi, s)
def testApproximateCos1(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEquals(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
def test_slots(self):
# Issue 4998
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
def test_main():
run_unittest(FractionTest, GcdTest)
if __name__ == '__main__':
test_main()
| apache-2.0 |
pusateri/vectorformats | vectorformats/formats/spatialite.py | 2 | 2825 | '''
Created on Sep 14, 2012
@author: michel
'''
from pyspatialite import dbapi2 as db
from .format import Format
from .wkt import to_wkt
class SpatiaLite(Format):
_connection = None
_cursor = None
def encode(self, features, **kwargs):
tmpFile = kwargs["tmpFile"]
if len(features) > 0:
self._connection = db.connect(tmpFile)
self._cursor = self._connection.cursor()
self._cursor.execute('SELECT InitSpatialMetadata()')
self.create_table(features[0])
for feature in features:
self.encode_feature(feature)
self._connection.commit()
self._cursor.close()
return self._connection
def create_table(self, feature):
sql = "CREATE TABLE featureserver (fid text, "
for key, value in feature.properties.items():
if key != "geometry":
sql += "%s text, " % key
sql = sql[:-2]
sql += ")"
self._cursor.execute(sql)
if hasattr(self.datasource, 'srid_out') and self.datasource.srid_out is not None:
srs = self.datasource.srid_out
else:
if hasattr(feature, "geometry_attr"):
srs = str(feature.srs);
if 'EPSG' in srs:
srs = srs[5:]
else:
srs = 4326
self._cursor.execute('''SELECT AddGeometryColumn('featureserver', 'geometry', %i, '%s', 2);''' % (int(srs), feature['geometry']['type'].upper()))
def encode_feature(self, feature):
if hasattr(self.datasource, 'srid_out') and self.datasource.srid_out is not None:
srs = self.datasource.srid_out
else:
if hasattr(feature, "geometry_attr"):
srs = str(feature.srs);
if 'EPSG' in srs:
srs = srs[5:]
else:
srs = 4326
wkt = "GeomFromText('" + to_wkt(feature.geometry) + "', %i)" % int(srs)
sql = "INSERT INTO featureserver (fid, "
for key, value in feature.properties.items():
if key != "geometry":
sql += "%s, " % key
sql += "geometry"
sql += ") VALUES ('%s', " % self.escapeSQL(str(feature.id).encode('utf-8'))
for key, value in feature.properties.items():
#key = self.getFormatedAttributName(key)
if value == None:
sql += "null, "
else:
sql += "'" + self.escapeSQL(value.encode('utf-8')) + "', "
sql += wkt
sql += ");"
self._cursor.execute(sql)
| mit |
BenHall/docker | vendor/src/github.com/ugorji/go/codec/test.py | 670 | 3808 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# pip install --user msgpack-python
# pip install --user cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
franmolinaca/BitaCR | node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| mit |
rdm-dev/uboot-curie | tools/buildman/cmdline.py | 3 | 5133 | #
# Copyright (c) 2014 Google, Inc
#
# SPDX-License-Identifier: GPL-2.0+
#
from optparse import OptionParser
def ParseArgs():
"""Parse command line arguments from sys.argv[]
Returns:
tuple containing:
options: command line options
args: command lin arguments
"""
parser = OptionParser()
parser.add_option('-b', '--branch', type='string',
help='Branch name to build')
parser.add_option('-B', '--bloat', dest='show_bloat',
action='store_true', default=False,
help='Show changes in function code size for each board')
parser.add_option('-c', '--count', dest='count', type='int',
default=-1, help='Run build on the top n commits')
parser.add_option('-C', '--force-reconfig', dest='force_reconfig',
action='store_true', default=False,
help='Reconfigure for every commit (disable incremental build)')
parser.add_option('-d', '--detail', dest='show_detail',
action='store_true', default=False,
help='Show detailed information for each board in summary')
parser.add_option('-e', '--show_errors', action='store_true',
default=False, help='Show errors and warnings')
parser.add_option('-f', '--force-build', dest='force_build',
action='store_true', default=False,
help='Force build of boards even if already built')
parser.add_option('-F', '--force-build-failures', dest='force_build_failures',
action='store_true', default=False,
help='Force build of previously-failed build')
parser.add_option('--fetch-arch', type='string',
help="Fetch a toolchain for architecture FETCH_ARCH ('list' to list)."
' You can also fetch several toolchains separate by comma, or'
" 'all' to download all")
parser.add_option('-g', '--git', type='string',
help='Git repo containing branch to build', default='.')
parser.add_option('-G', '--config-file', type='string',
help='Path to buildman config file', default='')
parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
default=False, help='Display the README file')
parser.add_option('-i', '--in-tree', dest='in_tree',
action='store_true', default=False,
help='Build in the source tree instead of a separate directory')
parser.add_option('-j', '--jobs', dest='jobs', type='int',
default=None, help='Number of jobs to run at once (passed to make)')
parser.add_option('-k', '--keep-outputs', action='store_true',
default=False, help='Keep all build output files (e.g. binaries)')
parser.add_option('-l', '--list-error-boards', action='store_true',
default=False, help='Show a list of boards next to each error/warning')
parser.add_option('--list-tool-chains', action='store_true', default=False,
help='List available tool chains')
parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
default=False, help="Do a dry run (describe actions, but do nothing)")
parser.add_option('-N', '--no-subdirs', action='store_true', dest='no_subdirs',
default=False, help="Don't create subdirectories when building current source for a single board")
parser.add_option('-o', '--output-dir', type='string',
dest='output_dir', default='..',
help='Directory where all builds happen and buildman has its workspace (default is ../)')
parser.add_option('-Q', '--quick', action='store_true',
default=False, help='Do a rough build, with limited warning resolution')
parser.add_option('-p', '--full-path', action='store_true',
default=False, help="Use full toolchain path in CROSS_COMPILE")
parser.add_option('-s', '--summary', action='store_true',
default=False, help='Show a build summary')
parser.add_option('-S', '--show-sizes', action='store_true',
default=False, help='Show image size variation in summary')
parser.add_option('--step', type='int',
default=1, help='Only build every n commits (0=just first and last)')
parser.add_option('-t', '--test', action='store_true', dest='test',
default=False, help='run tests')
parser.add_option('-T', '--threads', type='int',
default=None, help='Number of builder threads to use')
parser.add_option('-u', '--show_unknown', action='store_true',
default=False, help='Show boards with unknown build result')
parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Show build results while the build progresses')
parser.add_option('-V', '--verbose-build', action='store_true',
default=False, help='Run make with V=1, showing all output')
parser.add_option('-x', '--exclude', dest='exclude',
type='string', action='append',
help='Specify a list of boards to exclude, separated by comma')
parser.usage += """
Build U-Boot for all commits in a branch. Use -n to do a dry run"""
return parser.parse_args()
| gpl-2.0 |
snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/bottle/test/test_formsdict.py | 49 | 1243 | # -*- coding: utf-8 -*-
# '瓶' means "Bottle"
import unittest
from bottle import FormsDict, touni, tob
class TestFormsDict(unittest.TestCase):
def test_attr_access(self):
""" FomsDict.attribute returs string values as unicode. """
d = FormsDict(py2=tob('瓶'), py3=tob('瓶').decode('latin1'))
self.assertEqual(touni('瓶'), d.py2)
self.assertEqual(touni('瓶'), d.py3)
def test_attr_missing(self):
""" FomsDict.attribute returs u'' on missing keys. """
d = FormsDict()
self.assertEqual(touni(''), d.missing)
def test_attr_unicode_error(self):
""" FomsDict.attribute returs u'' on UnicodeError. """
d = FormsDict(latin=touni('öäüß').encode('latin1'))
self.assertEqual(touni(''), d.latin)
d.input_encoding = 'latin1'
self.assertEqual(touni('öäüß'), d.latin)
def test_decode_method(self):
d = FormsDict(py2=tob('瓶'), py3=tob('瓶').decode('latin1'))
d = d.decode()
self.assertFalse(d.recode_unicode)
self.assertTrue(hasattr(list(d.keys())[0], 'encode'))
self.assertTrue(hasattr(list(d.values())[0], 'encode'))
if __name__ == '__main__': #pragma: no cover
unittest.main()
| gpl-3.0 |
labkaxita/lakaxita | bootstrap.py | 40 | 10525 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site # imported because of its side effects
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__) == 1 and
not os.path.exists(os.path.join(v.__path__[0], '__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source + "."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.insert(0, 'buildout:accept-buildout-test-releases=true')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
if sys.version_info[:2] == (2, 4):
setup_args['version'] = '0.6.32'
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if not find_links and options.accept_buildout_test_releases:
find_links = 'http://downloads.buildout.org/'
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if distv >= pkg_resources.parse_version('2dev'):
continue
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement += '=='+version
else:
requirement += '<2dev'
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
# If there isn't already a command in the args, add bootstrap
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
| agpl-3.0 |
justanr/flask | flask/json.py | 140 | 8458 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import date
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
try:
from itsdangerous import simplejson as _json
except ImportError:
from itsdangerous import json as _json
# Figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson decoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(unicode(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""Creates a :class:`~flask.Response` with the JSON representation of
the given arguments with an :mimetype:`application/json` mimetype. The
arguments to this function are the same as to the :class:`dict`
constructor.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
For security reasons only objects are supported toplevel. For more
information about this, have a look at :ref:`json-security`.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] \
and not request.is_xhr:
indent = 2
separators = (', ', ': ')
# Note that we add '\n' to end of response
# (see https://github.com/mitsuhiko/flask/pull/1262)
rv = current_app.response_class(
(dumps(dict(*args, **kwargs), indent=indent, separators=separators),
'\n'),
mimetype='application/json')
return rv
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| bsd-3-clause |
iShoto/testpy | codes/20200104_metric_learning_mnist/src/train_mnist_original_center.py | 1 | 5545 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd.function import Function
import torchvision
import os
import matplotlib.pyplot as plt
import argparse
from tqdm import trange
import numpy as np
from sklearn.metrics import classification_report
from losses import CenterLoss
from mnist_net import Net
import mnist_loader
# cf. https://cpp-learning.com/center-loss/
def main():
args = parse_args()
# Dataset
train_loader, test_loader, classes = mnist_loader.load_dataset(args.dataset_dir, img_show=True)
# Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model
model = Net().to(device)
print(model)
# Loss
nllloss = nn.NLLLoss().to(device) # CrossEntropyLoss = log_softmax + NLLLoss
loss_weight = 1
centerloss = CenterLoss(10, 2).to(device)
# Optimizer
dnn_optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)
sheduler = lr_scheduler.StepLR(dnn_optimizer, 20, gamma=0.8)
center_optimizer = optim.SGD(centerloss.parameters(), lr =0.5)
print('Start training...')
for epoch in range(100):
# Update parameters.
epoch += 1
sheduler.step()
# Train and test a model.
train_acc, train_loss, feat, labels = train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer)
test_acc, test_loss = test(device, test_loader, model, nllloss, loss_weight, centerloss)
stdout_temp = 'Epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'
print(stdout_temp.format(epoch, train_acc, train_loss, test_acc, test_loss))
# Visualize features of each class.
vis_img_path = args.vis_img_path_temp.format(str(epoch).zfill(3))
visualize(feat.data.cpu().numpy(), labels.data.cpu().numpy(), epoch, vis_img_path)
# Save a trained model.
model_path = args.model_path_temp.format(str(epoch).zfill(3))
torch.save(model.state_dict(), model_path)
def train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer):
running_loss = 0.0
pred_list = []
label_list = []
ip1_loader = []
idx_loader = []
model.train()
for i,(imgs, labels) in enumerate(train_loader):
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Initilize gradient.
dnn_optimizer.zero_grad()
center_optimizer.zero_grad()
# Calculate gradient.
loss.backward()
# Update parameters.
dnn_optimizer.step()
center_optimizer.step()
# For calculation.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# For visualization.
ip1_loader.append(ip1)
idx_loader.append((labels))
# Calculate training accurary and loss.
result = classification_report(pred_list, label_list, output_dict=True)
train_acc = round(result['weighted avg']['f1-score'], 6)
train_loss = round(running_loss / len(train_loader.dataset), 6)
# Concatinate features and labels.
feat = torch.cat(ip1_loader, 0)
labels = torch.cat(idx_loader, 0)
return train_acc, train_loss, feat, labels
def test(device, test_loader, model, nllloss, loss_weight, centerloss):
model = model.eval()
# Prediciton
running_loss = 0.0
pred_list = []
label_list = []
for i,(imgs, labels) in enumerate(test_loader):
with torch.no_grad():
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Append predictions and labels.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# Calculate accuracy.
result = classification_report(pred_list, label_list, output_dict=True)
test_acc = round(result['weighted avg']['f1-score'], 6)
test_loss = round(running_loss / len(test_loader.dataset), 6)
return test_acc, test_loss
def visualize(feat, labels, epoch, vis_img_path):
colors = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.figure()
for i in range(10):
plt.plot(feat[labels==i, 0], feat[labels==i, 1], '.', color=colors[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='best')
plt.xlim(left=-8, right=8)
plt.ylim(bottom=-8, top=8)
plt.text(-7.8, 7.3, "epoch=%d" % epoch)
plt.savefig(vis_img_path)
plt.clf()
def parse_args():
arg_parser = argparse.ArgumentParser(description="parser for focus one")
arg_parser.add_argument("--dataset_dir", type=str, default='../inputs/')
arg_parser.add_argument("--model_dir", type=str, default='../outputs/models/checkpoints/')
arg_parser.add_argument("--model_path_temp", type=str, default='../outputs/models/checkpoints/mnist_original_softmax_center_epoch_{}.pth')
arg_parser.add_argument("--vis_img_dir", type=str, default='../outputs/visual/')
arg_parser.add_argument("--vis_img_path_temp", type=str, default='../outputs/visual/epoch_{}.png')
args = arg_parser.parse_args()
os.makedirs(args.dataset_dir, exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
os.makedirs(args.vis_img_dir, exist_ok=True)
return args
if __name__ == "__main__":
main()
| mit |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/jedi/evaluate/helpers.py | 5 | 6531 | import copy
import sys
import re
import os
from itertools import chain
from contextlib import contextmanager
from parso.python import tree
from jedi._compatibility import unicode
from jedi.parser_utils import get_parent_scope
from jedi.evaluate.compiled import CompiledObject
def is_stdlib_path(path):
# Python standard library paths look like this:
# /usr/lib/python3.5/...
# TODO The implementation below is probably incorrect and not complete.
if 'dist-packages' in path or 'site-packages' in path:
return False
base_path = os.path.join(sys.prefix, 'lib', 'python')
return bool(re.match(re.escape(base_path) + '\d.\d', path))
def deep_ast_copy(obj):
"""
Much, much faster than copy.deepcopy, but just for parser tree nodes.
"""
# If it's already in the cache, just return it.
new_obj = copy.copy(obj)
# Copy children
new_children = []
for child in obj.children:
if isinstance(child, tree.Leaf):
new_child = copy.copy(child)
new_child.parent = new_obj
else:
new_child = deep_ast_copy(child)
new_child.parent = new_obj
new_children.append(new_child)
new_obj.children = new_children
return new_obj
def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
"""
Creates a "call" node that consist of all ``trailer`` and ``power``
objects. E.g. if you call it with ``append``::
list([]).append(3) or None
You would get a node with the content ``list([]).append`` back.
This generates a copy of the original ast node.
If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
We use this function for two purposes. Given an expression ``bar.foo``,
we may want to
- infer the type of ``foo`` to offer completions after foo
- infer the type of ``bar`` to be able to jump to the definition of foo
The option ``cut_own_trailer`` must be set to true for the second purpose.
"""
trailer = leaf.parent
# The leaf may not be the last or first child, because there exist three
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
# we should not match anything more than x.
if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
if trailer.type == 'atom':
return context.eval_node(trailer)
return context.eval_node(leaf)
power = trailer.parent
index = power.children.index(trailer)
if cut_own_trailer:
cut = index
else:
cut = index + 1
if power.type == 'error_node':
start = index
while True:
start -= 1
base = power.children[start]
if base.type != 'trailer':
break
trailers = power.children[start + 1: index + 1]
else:
base = power.children[0]
trailers = power.children[1:cut]
if base == 'await':
base = trailers[0]
trailers = trailers[1:]
values = context.eval_node(base)
from jedi.evaluate.syntax_tree import eval_trailer
for trailer in trailers:
values = eval_trailer(context, values, trailer)
return values
def call_of_leaf(leaf):
"""
Creates a "call" node that consist of all ``trailer`` and ``power``
objects. E.g. if you call it with ``append``::
list([]).append(3) or None
You would get a node with the content ``list([]).append`` back.
This generates a copy of the original ast node.
If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
"""
# TODO this is the old version of this call. Try to remove it.
trailer = leaf.parent
# The leaf may not be the last or first child, because there exist three
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
# we should not match anything more than x.
if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
if trailer.type == 'atom':
return trailer
return leaf
power = trailer.parent
index = power.children.index(trailer)
new_power = copy.copy(power)
new_power.children = list(new_power.children)
new_power.children[index + 1:] = []
if power.type == 'error_node':
start = index
while True:
start -= 1
if power.children[start].type != 'trailer':
break
transformed = tree.Node('power', power.children[start:])
transformed.parent = power.parent
return transformed
return power
def get_names_of_node(node):
try:
children = node.children
except AttributeError:
if node.type == 'name':
return [node]
else:
return []
else:
return list(chain.from_iterable(get_names_of_node(c) for c in children))
def get_module_names(module, all_scopes):
"""
Returns a dictionary with name parts as keys and their call paths as
values.
"""
names = chain.from_iterable(module.get_used_names().values())
if not all_scopes:
# We have to filter all the names that don't have the module as a
# parent_scope. There's None as a parent, because nodes in the module
# node have the parent module and not suite as all the others.
# Therefore it's important to catch that case.
names = [n for n in names if get_parent_scope(n).parent in (module, None)]
return names
@contextmanager
def predefine_names(context, flow_scope, dct):
predefined = context.predefined_names
predefined[flow_scope] = dct
try:
yield
finally:
del predefined[flow_scope]
def is_compiled(context):
return isinstance(context, CompiledObject)
def is_string(context):
if context.evaluator.environment.version_info.major == 2:
str_classes = (unicode, bytes)
else:
str_classes = (unicode,)
return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes)
def is_literal(context):
return is_number(context) or is_string(context)
def _get_safe_value_or_none(context, accept):
if is_compiled(context):
value = context.get_safe_value(default=None)
if isinstance(value, accept):
return value
def get_int_or_none(context):
return _get_safe_value_or_none(context, int)
def is_number(context):
return _get_safe_value_or_none(context, (int, float)) is not None
| gpl-3.0 |
moniqx4/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/apps/organization/data.py | 102 | 12186 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for the Organization Unit Provisioning API."""
__author__ = 'Gunjan Sharma <gunjansharma@google.com>'
import gdata.apps
import gdata.apps.apps_property_entry
import gdata.apps_property
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property name of an organization unit
ORG_UNIT_NAME = 'name'
# The apps:property orgUnitPath of an organization unit
ORG_UNIT_PATH = 'orgUnitPath'
# The apps:property parentOrgUnitPath of an organization unit
PARENT_ORG_UNIT_PATH = 'parentOrgUnitPath'
# The apps:property description of an organization unit
ORG_UNIT_DESCRIPTION = 'description'
# The apps:property blockInheritance of an organization unit
ORG_UNIT_BLOCK_INHERITANCE = 'blockInheritance'
# The apps:property userEmail of a user entry
USER_EMAIL = 'orgUserEmail'
# The apps:property list of users to move
USERS_TO_MOVE = 'usersToMove'
# The apps:property list of moved users
MOVED_USERS = 'usersMoved'
# The apps:property customerId for the domain
CUSTOMER_ID = 'customerId'
# The apps:property name of the customer org unit
CUSTOMER_ORG_UNIT_NAME = 'customerOrgUnitName'
# The apps:property description of the customer org unit
CUSTOMER_ORG_UNIT_DESCRIPTION = 'customerOrgUnitDescription'
# The apps:property old organization unit's path for a user
OLD_ORG_UNIT_PATH = 'oldOrgUnitPath'
class CustomerIdEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents a customerId entry in object form."""
def GetCustomerId(self):
"""Get the customer ID of the customerId object.
Returns:
The customer ID of this customerId object as a string or None.
"""
return self._GetProperty(CUSTOMER_ID)
customer_id = pyproperty(GetCustomerId)
def GetOrgUnitName(self):
"""Get the Organization Unit name of the customerId object.
Returns:
The Organization unit name of this customerId object as a string or None.
"""
return self._GetProperty(ORG_UNIT_NAME)
org_unit_name = pyproperty(GetOrgUnitName)
def GetCustomerOrgUnitName(self):
"""Get the Customer Organization Unit name of the customerId object.
Returns:
The Customer Organization unit name of this customerId object as a string
or None.
"""
return self._GetProperty(CUSTOMER_ORG_UNIT_NAME)
customer_org_unit_name = pyproperty(GetCustomerOrgUnitName)
def GetOrgUnitDescription(self):
"""Get the Organization Unit Description of the customerId object.
Returns:
The Organization Unit Description of this customerId object as a string
or None.
"""
return self._GetProperty(ORG_UNIT_DESCRIPTION)
org_unit_description = pyproperty(GetOrgUnitDescription)
def GetCustomerOrgUnitDescription(self):
"""Get the Customer Organization Unit Description of the customerId object.
Returns:
The Customer Organization Unit Description of this customerId object
as a string or None.
"""
return self._GetProperty(CUSTOMER_ORG_UNIT_DESCRIPTION)
customer_org_unit_description = pyproperty(GetCustomerOrgUnitDescription)
class OrgUnitEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents an OrganizationUnit in object form."""
def GetOrgUnitName(self):
"""Get the Organization Unit name of the OrganizationUnit object.
Returns:
The Organization unit name of this OrganizationUnit object as a string
or None.
"""
return self._GetProperty(ORG_UNIT_NAME)
def SetOrgUnitName(self, value):
"""Set the Organization Unit name of the OrganizationUnit object.
Args:
value: [string] The new Organization Unit name to give this object.
"""
self._SetProperty(ORG_UNIT_NAME, value)
org_unit_name = pyproperty(GetOrgUnitName, SetOrgUnitName)
def GetOrgUnitPath(self):
"""Get the Organization Unit Path of the OrganizationUnit object.
Returns:
The Organization Unit Path of this OrganizationUnit object as a string
or None.
"""
return self._GetProperty(ORG_UNIT_PATH)
def SetOrgUnitPath(self, value):
"""Set the Organization Unit path of the OrganizationUnit object.
Args:
value: [string] The new Organization Unit path to give this object.
"""
self._SetProperty(ORG_UNIT_PATH, value)
org_unit_path = pyproperty(GetOrgUnitPath, SetOrgUnitPath)
def GetParentOrgUnitPath(self):
"""Get the Parent Organization Unit Path of the OrganizationUnit object.
Returns:
The Parent Organization Unit Path of this OrganizationUnit object
as a string or None.
"""
return self._GetProperty(PARENT_ORG_UNIT_PATH)
def SetParentOrgUnitPath(self, value):
"""Set the Parent Organization Unit path of the OrganizationUnit object.
Args:
value: [string] The new Parent Organization Unit path
to give this object.
"""
self._SetProperty(PARENT_ORG_UNIT_PATH, value)
parent_org_unit_path = pyproperty(GetParentOrgUnitPath, SetParentOrgUnitPath)
def GetOrgUnitDescription(self):
"""Get the Organization Unit Description of the OrganizationUnit object.
Returns:
The Organization Unit Description of this OrganizationUnit object
as a string or None.
"""
return self._GetProperty(ORG_UNIT_DESCRIPTION)
def SetOrgUnitDescription(self, value):
"""Set the Organization Unit Description of the OrganizationUnit object.
Args:
value: [string] The new Organization Unit Description
to give this object.
"""
self._SetProperty(ORG_UNIT_DESCRIPTION, value)
org_unit_description = pyproperty(GetOrgUnitDescription,
SetOrgUnitDescription)
def GetOrgUnitBlockInheritance(self):
"""Get the block_inheritance flag of the OrganizationUnit object.
Returns:
The the block_inheritance flag of this OrganizationUnit object
as a string or None.
"""
return self._GetProperty(ORG_UNIT_BLOCK_INHERITANCE)
def SetOrgUnitBlockInheritance(self, value):
"""Set the block_inheritance flag of the OrganizationUnit object.
Args:
value: [string] The new block_inheritance flag to give this object.
"""
self._SetProperty(ORG_UNIT_BLOCK_INHERITANCE, value)
org_unit_block_inheritance = pyproperty(GetOrgUnitBlockInheritance,
SetOrgUnitBlockInheritance)
def GetMovedUsers(self):
"""Get the moved users of the OrganizationUnit object.
Returns:
The the moved users of this OrganizationUnit object as a string or None.
"""
return self._GetProperty(MOVED_USERS)
def SetUsersToMove(self, value):
"""Set the Users to Move of the OrganizationUnit object.
Args:
value: [string] The comma seperated list of users to move
to give this object.
"""
self._SetProperty(USERS_TO_MOVE, value)
move_users = pyproperty(GetMovedUsers, SetUsersToMove)
def __init__(
self, org_unit_name=None, org_unit_path=None,
parent_org_unit_path=None, org_unit_description=None,
org_unit_block_inheritance=None, move_users=None, *args, **kwargs):
"""Constructs a new OrganizationUnit object with the given arguments.
Args:
org_unit_name: string (optional) The organization unit name
for the object.
org_unit_path: string (optional) The organization unit path
for the object.
parent_org_unit_path: string (optional) The parent organization unit path
for the object.
org_unit_description: string (optional) The organization unit description
for the object.
org_unit_block_inheritance: boolean (optional) weather or not inheritance
from the organization unit is blocked.
move_users: string (optional) comma seperated list of users to move.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(OrgUnitEntry, self).__init__(*args, **kwargs)
if org_unit_name:
self.org_unit_name = org_unit_name
if org_unit_path:
self.org_unit_path = org_unit_path
if parent_org_unit_path:
self.parent_org_unit_path = parent_org_unit_path
if org_unit_description:
self.org_unit_description = org_unit_description
if org_unit_block_inheritance is not None:
self.org_unit_block_inheritance = str(org_unit_block_inheritance)
if move_users:
self.move_users = move_users
class OrgUnitFeed(gdata.data.GDFeed):
"""Represents a feed of OrgUnitEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [OrgUnitEntry]
class OrgUserEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents an OrgUser in object form."""
def GetUserEmail(self):
"""Get the user email address of the OrgUser object.
Returns:
The user email address of this OrgUser object as a string or None.
"""
return self._GetProperty(USER_EMAIL)
def SetUserEmail(self, value):
"""Set the user email address of this OrgUser object.
Args:
value: string The new user email address to give this object.
"""
self._SetProperty(USER_EMAIL, value)
user_email = pyproperty(GetUserEmail, SetUserEmail)
def GetOrgUnitPath(self):
"""Get the Organization Unit Path of the OrgUser object.
Returns:
The Organization Unit Path of this OrgUser object as a string or None.
"""
return self._GetProperty(ORG_UNIT_PATH)
def SetOrgUnitPath(self, value):
"""Set the Organization Unit path of the OrgUser object.
Args:
value: [string] The new Organization Unit path to give this object.
"""
self._SetProperty(ORG_UNIT_PATH, value)
org_unit_path = pyproperty(GetOrgUnitPath, SetOrgUnitPath)
def GetOldOrgUnitPath(self):
"""Get the Old Organization Unit Path of the OrgUser object.
Returns:
The Old Organization Unit Path of this OrgUser object as a string
or None.
"""
return self._GetProperty(OLD_ORG_UNIT_PATH)
def SetOldOrgUnitPath(self, value):
"""Set the Old Organization Unit path of the OrgUser object.
Args:
value: [string] The new Old Organization Unit path to give this object.
"""
self._SetProperty(OLD_ORG_UNIT_PATH, value)
old_org_unit_path = pyproperty(GetOldOrgUnitPath, SetOldOrgUnitPath)
def __init__(
self, user_email=None, org_unit_path=None,
old_org_unit_path=None, *args, **kwargs):
"""Constructs a new OrgUser object with the given arguments.
Args:
user_email: string (optional) The user email address for the object.
org_unit_path: string (optional) The organization unit path
for the object.
old_org_unit_path: string (optional) The old organization unit path
for the object.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(OrgUserEntry, self).__init__(*args, **kwargs)
if user_email:
self.user_email = user_email
if org_unit_path:
self.org_unit_path = org_unit_path
if old_org_unit_path:
self.old_org_unit_path = old_org_unit_path
class OrgUserFeed(gdata.data.GDFeed):
"""Represents a feed of OrgUserEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [OrgUserEntry]
| apache-2.0 |
TeamMac/android_kernel_huawei_p6-u06 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
AlanZatarain/cortex-vfx | test/IECoreMaya/PlaybackFrameList.py | 17 | 2314 | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreMaya
class TestPlaybackFrameList( IECoreMaya.TestCase ) :
def test( self ):
""" Test PlaybackFrameList """
r = IECoreMaya.PlaybackFrameList( IECoreMaya.PlaybackFrameList.Range.Animation )
l = r.asList()
self.assertEqual( l, range( int( maya.cmds.playbackOptions( query=True, animationStartTime=True ) ),
int( maya.cmds.playbackOptions( query=True, animationEndTime=True ) + 1 ) ) )
if __name__ == "__main__":
IECoreMaya.TestProgram()
| bsd-3-clause |
Pluto-tv/chromium-crosswalk | third_party/closure_compiler/runner/build_runner_jar.py | 85 | 2327 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import tempfile
def rel_to_abs(rel_path):
script_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(script_path, rel_path)
java_bin_path = os.getenv('JAVA_HOME', '')
if java_bin_path:
java_bin_path = os.path.join(java_bin_path, 'bin')
main_class = 'org.chromium.closure.compiler.Runner'
jar_name = 'runner.jar'
src_dir = 'src'
closure_jar_relpath = os.path.join('..', 'compiler', 'compiler.jar')
src_path = rel_to_abs(src_dir)
def run_and_communicate(command, error_template):
print >> sys.stderr, command
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc.communicate()
if proc.returncode:
print >> sys.stderr, error_template % proc.returncode
sys.exit(proc.returncode)
def build_artifacts():
print 'Compiling...'
java_files = []
for root, dirs, files in sorted(os.walk(src_path)):
for file_name in files:
java_files.append(os.path.join(root, file_name))
bin_path = tempfile.mkdtemp()
manifest_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
manifest_file.write('Class-Path: %s\n' % closure_jar_relpath)
manifest_file.close()
javac_path = os.path.join(java_bin_path, 'javac')
javac_command = '%s -d %s -cp %s %s' % (
javac_path, bin_path, rel_to_abs(closure_jar_relpath),
' '.join(java_files))
run_and_communicate(javac_command, 'Error: javac returned %d')
print 'Building jar...'
artifact_path = rel_to_abs(jar_name)
jar_path = os.path.join(java_bin_path, 'jar')
jar_command = '%s cvfme %s %s %s -C %s .' % (
jar_path, artifact_path, manifest_file.name, main_class, bin_path)
run_and_communicate(jar_command, 'Error: jar returned %d')
finally:
os.remove(manifest_file.name)
shutil.rmtree(bin_path, True)
print 'Done.'
def show_usage_and_die():
print 'usage: %s' % os.path.basename(__file__)
print 'Builds runner.jar from the %s directory contents' % src_dir
sys.exit(1)
def main():
if len(sys.argv) > 1:
show_usage_and_die()
build_artifacts()
if __name__ == '__main__':
main()
| bsd-3-clause |
nishantjr/pjproject | tests/pjsua/scripts-sendto/171_timer_initiated_by_uas.py | 40 | 1090 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
"""
# RFC 4028 Section 9:
# If the incoming request contains a Supported header field with a
# value 'timer' but does not contain a Session-Expires header, it means
# that the UAS is indicating support for timers but is not requesting
# one. The UAS may request a session timer in the 2XX response by
# including a Session-Expires header field. The value MUST NOT be set
# to a duration lower than the value in the Min-SE header field in the
# request, if it is present.
pjsua_args = "--null-audio --auto-answer 200 --use-timer 2 --timer-min-se 90 --timer-se 1800"
extra_headers = "Supported: timer\n"
include = ["Session-Expires: .*;refresher=.*"]
exclude = []
sendto_cfg = sip.SendtoCfg("Session Timer initiated by UAS", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
| gpl-2.0 |
julianprabhakar/eden_car | controllers/tour.py | 36 | 1785 | # -*- coding: utf-8 -*-
"""
Guided Tour, Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
"""
Application Home page
"""
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def config():
""" REST Controller """
tablename = "tour_config"
s3db.table(tablename)
table = s3db.tour_config
return s3_rest_controller("tour", "config",
rheader = s3db.tour_rheader)
def details():
""" REST Controller """
tablename = "tour_details"
s3db.table(tablename)
table = s3db.tour_details
return s3_rest_controller("tour", "details")
def user():
""" REST Controller """
tablename = "tour_user"
s3db.table(tablename)
table = s3db.tour_user
return s3_rest_controller("tour", "user")
# -----------------------------------------------------------------------------
def guided_tour_finished():
""" Update database when tour completed otherwise redirect to tour/config """
if request.ajax == True:
utable = s3db.tour_user
person_id = auth.s3_logged_in_person()
query = (utable.person_id == person_id) & \
(utable.tour_config_id == request.post_vars.tour_id)
db(query).update(resume = "",
completed = True,
trip_counter = utable.trip_counter+1)
return json.dumps({})
else:
redirect(URL(f="config"))
| mit |
ZhaoCJ/django | django/core/serializers/json.py | 5 | 3694 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import decimal
import json
import sys
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def start_serialization(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| bsd-3-clause |
iw3hxn/LibrERP | account_payment_term_month/models/inherit_account_invoice.py | 1 | 3307 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Micronaet SRL (<http://www.micronaet.it>).
# Copyright (C) 2014 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import orm
from tools.translate import _
class account_invoice(orm.Model):
_inherit = 'account.invoice'
def action_move_create(self, cr, uid, ids, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
ait_obj = self.pool['account.invoice.tax']
amount_tax = 0.0
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context=context):
amount_tax = context.get('amount_tax', 0.0)
if not amount_tax:
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=context)
for tax in compute_taxes:
amount_tax += compute_taxes[tax]['amount']
context.update({'amount_tax': amount_tax})
super(account_invoice, self).action_move_create(cr, uid, [inv.id], context=context)
return True
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
res = {'value': {}}
if not ids:
return res
if not payment_term_id:
return res
context = self.pool['res.users'].context_get(cr, uid)
pt_obj = self.pool['account.payment.term']
ait_obj = self.pool['account.invoice.tax']
if not date_invoice:
date_invoice = time.strftime('%Y-%m-%d')
compute_taxes = ait_obj.compute(cr, uid, ids, context=context)
amount_tax = 0
for tax in compute_taxes:
amount_tax += compute_taxes[tax]['amount']
context.update({'amount_tax': amount_tax})
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice, context=context)
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
res = {'value': {'date_due': pterm_list[-1]}}
else:
payment = self.pool['account.payment.term'].browse(cr, uid, payment_term_id, context)
raise orm.except_orm(_('Data Insufficient "{0}" !'.format(payment.name)),
_('The payment term of supplier does not have a payment term line!'))
return res
| agpl-3.0 |
DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/sdq/pd.py | 4 | 4130 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2019-2020 Philip Åkesson <philip.akesson@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
from common.srdhelper import bitpack
import sigrokdecode as srd
class SamplerateError(Exception):
pass
class Pin:
SDQ, = range(1)
class Ann:
BIT, BYTE, BREAK, = range(3)
class Decoder(srd.Decoder):
api_version = 3
id = 'sdq'
name = 'SDQ'
longname = 'Texas Instruments SDQ'
desc = 'Texas Instruments SDQ. The SDQ protocol is also used by Apple.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['Embedded/industrial']
channels = (
{'id': 'sdq', 'name': 'SDQ', 'desc': 'Single wire SDQ data line.'},
)
options = (
{'id': 'bitrate', 'desc': 'Bit rate', 'default': 98425},
)
annotations = (
('bit', 'Bit'),
('byte', 'Byte'),
('break', 'Break'),
)
annotation_rows = (
('bits', 'Bits', (Ann.BIT,)),
('bytes', 'Bytes', (Ann.BYTE,)),
('breaks', 'Breaks', (Ann.BREAK,)),
)
def puts(self, data):
self.put(self.startsample, self.samplenum, self.out_ann, data)
def putetu(self, data):
self.put(self.startsample, self.startsample + int(self.bit_width), self.out_ann, data)
def putbetu(self, data):
self.put(self.bytepos, self.startsample + int(self.bit_width), self.out_ann, data)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.startsample = 0
self.bits = []
self.bytepos = 0
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def handle_bit(self, bit):
self.bits.append(bit)
self.putetu([Ann.BIT, [
'Bit: {:d}'.format(bit),
'{:d}'.format(bit),
]])
if len(self.bits) == 8:
byte = bitpack(self.bits)
self.putbetu([Ann.BYTE, [
'Byte: 0x{:02x}'.format(byte),
'0x{:02x}'.format(byte),
]])
self.bits = []
self.bytepos = 0
def handle_break(self):
self.puts([Ann.BREAK, ['Break', 'BR']])
self.bits = []
self.startsample = self.samplenum
self.bytepos = 0
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
self.bit_width = float(self.samplerate) / float(self.options['bitrate'])
self.half_bit_width = self.bit_width / 2.0
# BREAK if the line is low for longer than this.
break_threshold = self.bit_width * 1.2
# Wait until the line is high before inspecting input data.
sdq, = self.wait({Pin.SDQ: 'h'})
while True:
# Get the length of a low pulse (falling to rising edge).
sdq, = self.wait({Pin.SDQ: 'f'})
self.startsample = self.samplenum
if self.bytepos == 0:
self.bytepos = self.samplenum
sdq, = self.wait({Pin.SDQ: 'r'})
# Check for 0 or 1 data bits, or the BREAK symbol.
delta = self.samplenum - self.startsample
if delta > break_threshold:
self.handle_break()
elif delta > self.half_bit_width:
self.handle_bit(0)
else:
self.handle_bit(1)
| gpl-3.0 |
lablup/sorna-agent | src/ai/backend/kernel/vendor/aws_polly/__init__.py | 1 | 3171 | import asyncio
import ctypes
import logging
import os
import threading
import janus
from ... import BaseRunner
from .inproc import PollyInprocRunner
log = logging.getLogger()
class Runner(BaseRunner):
log_prefix = 'vendor.aws_polly-kernel'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.inproc_runner = None
self.sentinel = object()
self.input_queue = None
self.output_queue = None
# NOTE: If credentials are missing,
# boto3 will try to use the instance role.
self.access_key = \
self.child_env.get('AWS_ACCESS_KEY_ID', None)
self.secret_key = \
self.child_env.get('AWS_SECRET_ACCESS_KEY', None)
os.environ['AWS_DEFAULT_REGION'] = \
self.child_env.get('AWS_DEFAULT_REGION', 'ap-northeast-2')
async def init_with_loop(self):
self.input_queue = janus.Queue()
self.output_queue = janus.Queue()
async def build_heuristic(self) -> int:
raise NotImplementedError
async def execute_heuristic(self) -> int:
raise NotImplementedError
async def query(self, code_text) -> int:
self.ensure_inproc_runner()
await self.input_queue.async_q.put(code_text)
# Read the generated outputs until done
while True:
try:
msg = await self.output_queue.async_q.get()
except asyncio.CancelledError:
break
self.output_queue.async_q.task_done()
if msg is self.sentinel:
break
self.outsock.send_multipart(msg)
return 0
async def complete(self, data):
self.outsock.send_multipart([
b'completion',
[],
])
async def interrupt(self):
if self.inproc_runner is None:
log.error('No user code is running!')
return
# A dirty hack to raise an exception inside a running thread.
target_tid = self.inproc_runner.ident
if target_tid not in {t.ident for t in threading.enumerate()}:
log.error('Interrupt failed due to missing thread.')
return
affected_count = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(target_tid),
ctypes.py_object(KeyboardInterrupt))
if affected_count == 0:
log.error('Interrupt failed due to invalid thread identity.')
elif affected_count > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(target_tid),
ctypes.c_long(0))
log.error('Interrupt broke the interpreter state -- '
'recommended to reset the session.')
async def start_service(self, service_info):
return None, {}
def ensure_inproc_runner(self):
if self.inproc_runner is None:
self.inproc_runner = PollyInprocRunner(
self.input_queue.sync_q,
self.output_queue.sync_q,
self.sentinel,
self.access_key,
self.secret_key)
self.inproc_runner.start()
| lgpl-3.0 |
reinaH/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
gabrielfalcao/lettuce | tests/integration/lib/Django-1.3/django/core/cache/backends/db.py | 227 | 6002 | "Database cache backend."
from django.core.cache.backends.base import BaseCache
from django.db import connections, router, transaction, DatabaseError
import base64, time
from datetime import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_cache'
self.module_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
class DatabaseCache(BaseDatabaseCache):
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
now = datetime.now()
if row[2] < now:
db = router.db_for_write(self.cache_model_class)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
return default
value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.decodestring(value))
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = datetime.now().replace(microsecond=0)
exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
if num > self._max_entries:
self._cull(db, cursor, now)
encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % table, [key])
try:
result = cursor.fetchone()
if result and (mode == 'set' or
(mode == 'add' and result[1] < now)):
cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % table,
[encoded, connections[db].ops.value_to_db_datetime(exp), key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % table,
[key, encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
transaction.rollback_unless_managed(using=db)
return False
else:
transaction.commit_unless_managed(using=db)
return True
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
now = datetime.now().replace(microsecond=0)
cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s and expires > %%s" % table,
[key, connections[db].ops.value_to_db_datetime(now)])
return cursor.fetchone() is not None
def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
table = connections[db].ops.quote_name(self._table)
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
[connections[db].ops.value_to_db_datetime(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % table, [num / self._cull_frequency])
cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % table, [cursor.fetchone()[0]])
def clear(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute('DELETE FROM %s' % table)
# For backwards compatibility
class CacheClass(DatabaseCache):
pass
| gpl-3.0 |
haeusser/tensorflow | tensorflow/contrib/training/python/training/sampling_ops_test.py | 130 | 14840 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class StratifiedSampleTest(test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [array_ops.zeros([1, 3]), array_ops.ones([1, 5])]
label = constant_op.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([]),
probs,
batch_size,
init_probs,
enqueue_many=True)
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([1, 1]),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val,
constant_op.constant([0, 1, 0, 0, 0]),
probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampling_ops.stratified_sample(
array_ops.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
constant_op.constant(1),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
[array_ops.zeros([2, 1])],
label,
probs,
batch_size,
init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
label,
array_ops.placeholder(
dtypes.float32, shape=[None]),
batch_size,
init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [array_ops.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = array_ops.placeholder(dtypes.int32, shape=[None])
probs_ph = array_ops.placeholder(
dtypes.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [array_ops.zeros([2, 3, 4])]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = logging_ops.merge_summary(
ops.get_collection(ops.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [array_ops.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 1,
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, .3, 0, .7, 0],
enqueue_many=True)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = constant_op.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = array_ops.placeholder(
dtypes.float32) # completely undefined shape
labels_ph = array_ops.placeholder(
dtypes.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [
array_ops.zeros([2, 3, 4]), array_ops.ones([2, 4]), array_ops.ones(2) *
3
]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, ops.Tensor))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
random_seed.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(lbl1), lambda: constant_op.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: constant_op.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([5, 1]), array_ops.zeros([4, 1])],
accept_prob_fn,
batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = array_ops.placeholder(dtypes.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
random_seed.set_random_seed(1234)
sampling_ops.rejection_sample(
[array_ops.zeros([])],
accept_prob_fn,
batch_size,
runtime_checks=True,
name='rejection_sample')
prob_tensor = ops.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.test_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [
control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = sampling_ops.rejection_sample(tensor_list, accept_prob_fn,
batch_size)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zetaops/ulakbus | ulakbus/views/reports/base.py | 1 | 6017 | # -*- coding: utf-8 -*-
"""
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from io import BytesIO
from zengine.lib.translation import gettext as _, gettext_lazy
import six
from zengine.forms import JsonForm
from zengine.forms import fields
from zengine.views.base import BaseView
import re
import base64
from datetime import datetime
try:
from ulakbus.lib.pdfdocument.document import PDFDocument, register_fonts_from_paths
except:
print("Warning: Reportlab module not found")
from ulakbus.lib.s3_file_manager import S3FileManager
from ulakbus.lib.common import get_file_url
class ReporterRegistry(type):
registry = {}
_meta = None
def __new__(mcs, name, bases, attrs):
# for key, prop in attrs.items():
# if hasattr(prop, 'view_method'):
if name == 'Reporter':
ReporterRegistry._meta = attrs['Meta']
if 'Meta' not in attrs:
attrs['Meta'] = type('Meta', (object,), ReporterRegistry._meta.__dict__)
else:
for k, v in ReporterRegistry._meta.__dict__.items():
if k not in attrs['Meta'].__dict__:
setattr(attrs['Meta'], k, v)
new_class = super(ReporterRegistry, mcs).__new__(mcs, name, bases, attrs)
if name != 'Reporter':
ReporterRegistry.registry[name] = new_class
return new_class
@staticmethod
def get_reporters():
return [{"text": v.get_title(),
"wf": 'generic_reporter',
"model": k,
"kategori": 'Raporlar',
"param": 'id'} for k, v in ReporterRegistry.registry.items()]
@staticmethod
def get_permissions():
return [("report.%s" % k, v.get_title(), "") for k, v in ReporterRegistry.registry.items()]
@staticmethod
def get_reporter(name):
return ReporterRegistry.registry[name]
FILENAME_RE = re.compile(r'[^A-Za-z0-9\-\.]+')
@six.add_metaclass(ReporterRegistry)
class Reporter(BaseView):
TITLE = ''
class Meta:
pass
def __init__(self, current):
super(Reporter, self).__init__(current)
self.cmd = current.input.get('cmd', 'show')
# print("CMD", self.cmd)
if self.cmd == 'show':
self.show()
elif self.cmd == 'printout':
self.printout()
class ReportForm(JsonForm):
printout = fields.Button(gettext_lazy(u"Yazdır"), cmd="printout")
def show(self):
objects = self.get_objects()
frm = self.ReportForm(current=self.current, title=self.get_title())
if objects:
frm.help_text = ''
if isinstance(objects[0], dict):
self.output['object'] = {'fields': objects, 'type': 'table-multiRow'}
else:
objects = dict((k, str(v)) for k, v in objects)
self.output['object'] = objects
else:
frm.help_text = _(u'Kayıt bulunamadı')
self.output['object'] = {}
self.set_client_cmd('form', 'show')
self.output['forms'] = frm.serialize()
self.output['forms']['constraints'] = {}
self.output['forms']['grouping'] = []
self.output['meta'] = {}
def printout(self):
register_fonts_from_paths('Vera.ttf',
'VeraIt.ttf',
'VeraBd.ttf',
'VeraBI.ttf',
'Vera')
objects = self.get_objects()
f = BytesIO()
pdf = PDFDocument(f, font_size=14)
pdf.init_report()
pdf.h1(self.tr2ascii(self.get_title()))
ascii_objects = []
if isinstance(objects[0], dict):
headers = objects[0].keys()
ascii_objects.append([self.tr2ascii(h) for h in headers])
for obj in objects:
ascii_objects.append([self.tr2ascii(k) for k in obj.values()])
else:
for o in objects:
ascii_objects.append((self.tr2ascii(o[0]), self.tr2ascii(o[1])))
pdf.table(ascii_objects)
pdf.generate()
download_url = self.generate_temp_file(
name=self.generate_file_name(),
content=base64.b64encode(f.getvalue()),
file_type='application/pdf',
ext='pdf'
)
self.set_client_cmd('download')
self.output['download_url'] = download_url
@staticmethod
def generate_temp_file(name, content, file_type, ext):
f = S3FileManager()
key = f.store_file(name=name, content=content, type=file_type, ext=ext)
return get_file_url(key)
def generate_file_name(self):
return "{0}-{1}".format(
FILENAME_RE.sub('-', self.tr2ascii(self.get_title()).lower()),
datetime.now().strftime("%d.%m.%Y-%H.%M.%S")
)
@staticmethod
def convert_choices(choices_dict_list):
result = []
for d in choices_dict_list:
try:
k = int(d[0])
except:
k = d[0]
result.append((k, d[1]))
return dict(result)
def get_headers(self):
return self.HEADERS
@classmethod
def get_title(cls):
return six.text_type(cls.TITLE)
def get_objects(self):
raise NotImplementedError
def tr2ascii(self, inp):
inp = six.text_type(inp)
shtlst = [
('ğ', 'g'),
('ı', 'i'),
('İ', 'I'),
('ç', 'c'),
('ö', 'o'),
('ü', 'u'),
('ş', 's'),
('Ğ', 'G'),
('Ş', 'S'),
('Ö', 'O'),
('Ü', 'U'),
('Ç', 'C'),
]
for t, a in shtlst:
inp = inp.replace(t, a)
return inp
def ReportDispatcher(current):
ReporterRegistry.get_reporter(current.input['model'])(current)
| gpl-3.0 |
marcoantoniooliveira/labweb | oscar/apps/order/south_migrations/0001_initial.py | 16 | 48240 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
depends_on = (
('catalogue', '0001_initial'),
('customer', '0001_initial'),
('partner', '0001_initial'),
('address', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'PaymentEventQuantity'
db.create_table('order_paymenteventquantity', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(related_name='line_quantities', to=orm['order.PaymentEvent'])),
('line', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.Line'])),
('quantity', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('order', ['PaymentEventQuantity'])
# Adding model 'ShippingEventQuantity'
db.create_table('order_shippingeventquantity', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(related_name='line_quantities', to=orm['order.ShippingEvent'])),
('line', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.Line'])),
('quantity', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('order', ['ShippingEventQuantity'])
# Adding model 'Order'
db.create_table('order_order', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('number', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('basket_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='orders', null=True, to=orm[AUTH_USER_MODEL])),
('billing_address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.BillingAddress'], null=True, blank=True)),
('total_incl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('total_excl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('shipping_incl_tax', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2)),
('shipping_excl_tax', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2)),
('shipping_address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.ShippingAddress'], null=True, blank=True)),
('shipping_method', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('date_placed', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
))
db.send_create_signal('order', ['Order'])
# Adding model 'OrderNote'
db.create_table('order_ordernote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='notes', to=orm['order.Order'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[AUTH_USER_MODEL], null=True)),
('note_type', self.gf('django.db.models.fields.CharField')(max_length=128, null=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('order', ['OrderNote'])
# Adding model 'CommunicationEvent'
db.create_table('order_communicationevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='communication_events', to=orm['order.Order'])),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['customer.CommunicationEventType'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('order', ['CommunicationEvent'])
# Adding model 'ShippingAddress'
db.create_table('order_shippingaddress', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('line1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('line2', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('line3', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('line4', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=64)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['address.Country'])),
('search_text', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('phone_number', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('order', ['ShippingAddress'])
# Adding model 'BillingAddress'
db.create_table('order_billingaddress', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('line1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('line2', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('line3', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('line4', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('postcode', self.gf('django.db.models.fields.CharField')(max_length=64)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['address.Country'])),
('search_text', self.gf('django.db.models.fields.CharField')(max_length=1000)),
))
db.send_create_signal('order', ['BillingAddress'])
# Adding model 'Line'
db.create_table('order_line', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='lines', to=orm['order.Order'])),
('partner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='order_lines', null=True, to=orm['partner.Partner'])),
('partner_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('partner_sku', self.gf('django.db.models.fields.CharField')(max_length=128)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'], null=True, blank=True)),
('quantity', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('line_price_incl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('line_price_excl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('line_price_before_discounts_incl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('line_price_before_discounts_excl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('unit_cost_price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True)),
('unit_price_incl_tax', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True)),
('unit_price_excl_tax', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True)),
('unit_retail_price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True)),
('partner_line_reference', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('partner_line_notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('est_dispatch_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal('order', ['Line'])
# Adding model 'LinePrice'
db.create_table('order_lineprice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='line_prices', to=orm['order.Order'])),
('line', self.gf('django.db.models.fields.related.ForeignKey')(related_name='prices', to=orm['order.Line'])),
('quantity', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('price_incl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('price_excl_tax', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('shipping_incl_tax', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2)),
('shipping_excl_tax', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2)),
))
db.send_create_signal('order', ['LinePrice'])
# Adding model 'LineAttribute'
db.create_table('order_lineattribute', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('line', self.gf('django.db.models.fields.related.ForeignKey')(related_name='attributes', to=orm['order.Line'])),
('option', self.gf('django.db.models.fields.related.ForeignKey')(related_name='line_attributes', null=True, to=orm['catalogue.Option'])),
('type', self.gf('django.db.models.fields.CharField')(max_length=128)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('order', ['LineAttribute'])
# Adding model 'ShippingEvent'
db.create_table('order_shippingevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='shipping_events', to=orm['order.Order'])),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.ShippingEventType'])),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('order', ['ShippingEvent'])
# Adding model 'ShippingEventType'
db.create_table('order_shippingeventtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('code', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=128, db_index=True)),
('is_required', self.gf('django.db.models.fields.BooleanField')(default=True)),
('sequence_number', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('order', ['ShippingEventType'])
# Adding model 'PaymentEvent'
db.create_table('order_paymentevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='payment_events', to=orm['order.Order'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2)),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.PaymentEventType'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('order', ['PaymentEvent'])
# Adding model 'PaymentEventType'
db.create_table('order_paymenteventtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('code', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=128, db_index=True)),
('sequence_number', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('order', ['PaymentEventType'])
# Adding model 'OrderDiscount'
db.create_table('order_orderdiscount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='discounts', to=orm['order.Order'])),
('offer_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('voucher_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('voucher_code', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, db_index=True)),
('amount', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2)),
))
db.send_create_signal('order', ['OrderDiscount'])
def backwards(self, orm):
# Deleting model 'PaymentEventQuantity'
db.delete_table('order_paymenteventquantity')
# Deleting model 'ShippingEventQuantity'
db.delete_table('order_shippingeventquantity')
# Deleting model 'Order'
db.delete_table('order_order')
# Deleting model 'OrderNote'
db.delete_table('order_ordernote')
# Deleting model 'CommunicationEvent'
db.delete_table('order_communicationevent')
# Deleting model 'ShippingAddress'
db.delete_table('order_shippingaddress')
# Deleting model 'BillingAddress'
db.delete_table('order_billingaddress')
# Deleting model 'Line'
db.delete_table('order_line')
# Deleting model 'LinePrice'
db.delete_table('order_lineprice')
# Deleting model 'LineAttribute'
db.delete_table('order_lineattribute')
# Deleting model 'ShippingEvent'
db.delete_table('order_shippingevent')
# Deleting model 'ShippingEventType'
db.delete_table('order_shippingeventtype')
# Deleting model 'PaymentEvent'
db.delete_table('order_paymentevent')
# Deleting model 'PaymentEventType'
db.delete_table('order_paymenteventtype')
# Deleting model 'OrderDiscount'
db.delete_table('order_orderdiscount')
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.communicationevent': {
'Meta': {'object_name': 'CommunicationEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customer.CommunicationEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': "orm['order.Order']"})
},
'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'to': "orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'to': "orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'order.lineprice': {
'Meta': {'object_name': 'LinePrice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': "orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': "orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'})
},
'order.paymentevent': {
'Meta': {'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.PaymentEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': "orm['order.Order']"})
},
'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.PaymentEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.paymenteventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'PaymentEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.shippingevent': {
'Meta': {'ordering': "['-date']", 'object_name': 'ShippingEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.ShippingEventQuantity']", 'symmetrical': 'False'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': "orm['order.Order']"})
},
'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.ShippingEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.shippingeventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'ShippingEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['order']
| bsd-3-clause |
venomJ/AndroidViewClient | examples/settings-sound-phone_ringtone.py | 9 | 2025 | #! /usr/bin/env python
'''
Copyright (C) 2012 Diego Torres Milano
Created on Sep 8, 2012
@author: diego
@see: http://code.google.com/p/android/issues/detail?id=36544
'''
import re
import sys
import os
# This must be imported before MonkeyRunner and MonkeyDevice,
# otherwise the import fails.
# PyDev sets PYTHONPATH, use it
try:
for p in os.environ['PYTHONPATH'].split(':'):
if not p in sys.path:
sys.path.append(p)
except:
pass
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from com.dtmilano.android.viewclient import ViewClient, View
device, serialno = ViewClient.connectToDeviceOrExit()
DEBUG = True
FLAG_ACTIVITY_NEW_TASK = 0x10000000
# We are not using Settings as the bug describes because there's no WiFi dialog in emulator
componentName = 'com.android.settings/.Settings'
device.startActivity(component=componentName, flags=FLAG_ACTIVITY_NEW_TASK)
ViewClient.sleep(3)
vc = ViewClient(device=device, serialno=serialno)
if DEBUG: vc.traverse(transform=ViewClient.TRAVERSE_CIT)
sound = vc.findViewWithText('Sound')
if sound:
sound.touch()
vc.dump()
phoneRingtone = vc.findViewWithText('Phone ringtone')
if phoneRingtone:
phoneRingtone.touch()
vc.dump()
vespa = vc.findViewWithText('Vespa')
if vespa:
vespa.touch()
ViewClient.sleep(1)
ok = vc.findViewById('id/button1')
if ok:
ok.touch()
vc.dump()
vespa = vc.findViewWithText('Vespa')
# If for some reason the dialog is still there we will have Vespa and OK
ok = vc.findViewById('id/button1')
if vespa and not ok:
print "OK"
else:
print "FAIL to set ringtone Vespa"
sys.exit(1)
else:
print >> sys.stderr, "'OK' not found"
else:
print >> sys.stderr, "'Phone ringtone' not found"
else:
print >> sys.stderr, "'Sound' not found"
| apache-2.0 |
hsluo/youtube-dl | youtube_dl/extractor/npo.py | 85 | 17188 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
token = self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
# Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js
token_l = list(token)
first = second = None
for i in range(5, len(token_l) - 4):
if token_l[i].isdigit():
if first is None:
first = i
elif second is None:
second = i
if first is None or second is None:
first = 12
second = 13
token_l[first], token_l[second] = token_l[second], token_l[first]
return ''.join(token_l)
class NPOIE(NPOBaseIE):
IE_NAME = 'npo'
IE_DESC = 'npo.nl and ntr.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?!live|radio)(?:[^/]+/){2}|
ntr\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht: De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
{
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'md5': '01c6a2841675995da1f0cf776f03a9c3',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'm4v',
'title': 'Aap, poot, pies',
'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
'upload_date': '20150508',
'duration': 599,
},
},
{
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'md5': 'd30cd8417b8b9bca1fdff27428860d08',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
# For some videos actual video id (prid) is different (e.g. for
# http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
# video id is POMS_WNL_853698 but prid is POW_00996502)
video_id = metadata.get('prid') or video_id
# titel is too generic in some cases so utilize aflevering_titel as well
# when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html)
title = metadata['titel']
sub_title = metadata.get('aflevering_titel')
if sub_title and sub_title != title:
title += ': %s' % sub_title
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://e.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': title,
'description': metadata.get('info'),
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
# smooth streaming is not supported
if stream_type in ['ss', 'ms']:
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
'Unable to download %s URL' % stream_type,
transform_source=strip_jsonp, fatal=False)
if not stream_url:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
'preference': -10,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class VPROIE(NPOIE):
IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
},
},
{
'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
'info_dict': {
'id': 'sergio-herman',
'title': 'Sergio Herman: Fucking perfect',
},
'playlist_count': 2,
},
{
# playlist with youtube embed
'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
'info_dict': {
'id': 'education-education',
'title': '2Doc',
},
'playlist_count': 2,
}
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id)
for video_id in re.findall(r'data-media-id="([^"]+)"', webpage)
]
playlist_title = self._search_regex(
r'<title>\s*([^>]+?)\s*-\s*Teledoc\s*-\s*VPRO\s*</title>',
webpage, 'playlist title', default=None) or self._og_search_title(webpage)
return self.playlist_result(entries, playlist_id, playlist_title)
class WNLIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
_TEST = {
'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
'info_dict': {
'id': 'vandaag-de-dag-6-mei',
'title': 'Vandaag de Dag 6 mei',
},
'playlist_count': 4,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id, 'NPO')
for video_id, part in re.findall(
r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>(Deel \d+)', webpage)
]
playlist_title = self._html_search_regex(
r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>',
webpage, 'playlist title')
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense |
TheWardoctor/Wardoctors-repo | plugin.video.salts/js2py/legecy_translators/nparser.py | 96 | 99391 | # JUST FOR NOW, later I will write my own - much faster and better.
# Copyright (C) 2013 Ariya Hidayat <ariya.hidayat@gmail.com>
# Copyright (C) 2013 Thaddee Tyl <thaddee.tyl@gmail.com>
# Copyright (C) 2012 Ariya Hidayat <ariya.hidayat@gmail.com>
# Copyright (C) 2012 Mathias Bynens <mathias@qiwi.be>
# Copyright (C) 2012 Joost-Wim Boekesteijn <joost-wim@boekesteijn.nl>
# Copyright (C) 2012 Kris Kowal <kris.kowal@cixar.com>
# Copyright (C) 2012 Yusuke Suzuki <utatane.tea@gmail.com>
# Copyright (C) 2012 Arpad Borsos <arpad.borsos@googlemail.com>
# Copyright (C) 2011 Ariya Hidayat <ariya.hidayat@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -*- coding: latin-1 -*-
from __future__ import print_function
import re
def typeof(t):
if t is None: return 'undefined'
elif isinstance(t, bool): return 'boolean'
elif isinstance(t, str): return 'string'
elif isinstance(t, int) or isinstance(t, float): return 'number'
elif hasattr(t, '__call__'): return 'function'
else: return 'object'
def list_indexOf(l, v):
try:
return l.index(v)
except:
return -1
parseFloat = float
parseInt = int
class jsdict(object):
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return None
def __setitem__(self, name, value):
self.__dict__[name] = value
return value
def __getattr__(self, name):
try:
return getattr(self, name)
except:
return None
def __setattr__(self, name, value):
self[name] = value
return value
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return str(self.__dict__)
class RegExp(object):
def __init__(self, pattern, flags=''):
self.flags = flags
pyflags = 0 | re.M if 'm' in flags else 0 | re.I if 'i' in flags else 0
self.source = pattern
self.pattern = re.compile(pattern, pyflags)
def test(self, s):
return self.pattern.search(s) is not None
console = jsdict({"log":print})
def __temp__42(object=None, body=None):
return jsdict({
"type": Syntax.WithStatement,
"object": object,
"body": body,
})
def __temp__41(test=None, body=None):
return jsdict({
"type": Syntax.WhileStatement,
"test": test,
"body": body,
})
def __temp__40(id=None, init=None):
return jsdict({
"type": Syntax.VariableDeclarator,
"id": id,
"init": init,
})
def __temp__39(declarations=None, kind=None):
return jsdict({
"type": Syntax.VariableDeclaration,
"declarations": declarations,
"kind": kind,
})
def __temp__38(operator=None, argument=None):
if (operator == "++") or (operator == "--"):
return jsdict({
"type": Syntax.UpdateExpression,
"operator": operator,
"argument": argument,
"prefix": True,
})
return jsdict({
"type": Syntax.UnaryExpression,
"operator": operator,
"argument": argument,
"prefix": True,
})
def __temp__37(block=None, guardedHandlers=None, handlers=None, finalizer=None):
return jsdict({
"type": Syntax.TryStatement,
"block": block,
"guardedHandlers": guardedHandlers,
"handlers": handlers,
"finalizer": finalizer,
})
def __temp__36(argument=None):
return jsdict({
"type": Syntax.ThrowStatement,
"argument": argument,
})
def __temp__35():
return jsdict({
"type": Syntax.ThisExpression,
})
def __temp__34(discriminant=None, cases=None):
return jsdict({
"type": Syntax.SwitchStatement,
"discriminant": discriminant,
"cases": cases,
})
def __temp__33(test=None, consequent=None):
return jsdict({
"type": Syntax.SwitchCase,
"test": test,
"consequent": consequent,
})
def __temp__32(expressions=None):
return jsdict({
"type": Syntax.SequenceExpression,
"expressions": expressions,
})
def __temp__31(argument=None):
return jsdict({
"type": Syntax.ReturnStatement,
"argument": argument,
})
def __temp__30(kind=None, key=None, value=None):
return jsdict({
"type": Syntax.Property,
"key": key,
"value": value,
"kind": kind,
})
def __temp__29(body=None):
return jsdict({
"type": Syntax.Program,
"body": body,
})
def __temp__28(operator=None, argument=None):
return jsdict({
"type": Syntax.UpdateExpression,
"operator": operator,
"argument": argument,
"prefix": False,
})
def __temp__27(properties=None):
return jsdict({
"type": Syntax.ObjectExpression,
"properties": properties,
})
def __temp__26(callee=None, args=None):
return jsdict({
"type": Syntax.NewExpression,
"callee": callee,
"arguments": args,
})
def __temp__25(accessor=None, object=None, property=None):
return jsdict({
"type": Syntax.MemberExpression,
"computed": accessor == "[",
"object": object,
"property": property,
})
def __temp__24(token=None):
return jsdict({
"type": Syntax.Literal,
"value": token.value,
"raw": source[token.range[0]:token.range[1]],
})
def __temp__23(label=None, body=None):
return jsdict({
"type": Syntax.LabeledStatement,
"label": label,
"body": body,
})
def __temp__22(test=None, consequent=None, alternate=None):
return jsdict({
"type": Syntax.IfStatement,
"test": test,
"consequent": consequent,
"alternate": alternate,
})
def __temp__21(name=None):
return jsdict({
"type": Syntax.Identifier,
"name": name,
})
def __temp__20(id=None, params=None, defaults=None, body=None):
return jsdict({
"type": Syntax.FunctionExpression,
"id": id,
"params": params,
"defaults": defaults,
"body": body,
"rest": None,
"generator": False,
"expression": False,
})
def __temp__19(id=None, params=None, defaults=None, body=None):
return jsdict({
"type": Syntax.FunctionDeclaration,
"id": id,
"params": params,
"defaults": defaults,
"body": body,
"rest": None,
"generator": False,
"expression": False,
})
def __temp__18(left=None, right=None, body=None):
return jsdict({
"type": Syntax.ForInStatement,
"left": left,
"right": right,
"body": body,
"each": False,
})
def __temp__17(init=None, test=None, update=None, body=None):
return jsdict({
"type": Syntax.ForStatement,
"init": init,
"test": test,
"update": update,
"body": body,
})
def __temp__16(expression=None):
return jsdict({
"type": Syntax.ExpressionStatement,
"expression": expression,
})
def __temp__15():
return jsdict({
"type": Syntax.EmptyStatement,
})
def __temp__14(body=None, test=None):
return jsdict({
"type": Syntax.DoWhileStatement,
"body": body,
"test": test,
})
def __temp__13():
return jsdict({
"type": Syntax.DebuggerStatement,
})
def __temp__12(label=None):
return jsdict({
"type": Syntax.ContinueStatement,
"label": label,
})
def __temp__11(test=None, consequent=None, alternate=None):
return jsdict({
"type": Syntax.ConditionalExpression,
"test": test,
"consequent": consequent,
"alternate": alternate,
})
def __temp__10(param=None, body=None):
return jsdict({
"type": Syntax.CatchClause,
"param": param,
"body": body,
})
def __temp__9(callee=None, args=None):
return jsdict({
"type": Syntax.CallExpression,
"callee": callee,
"arguments": args,
})
def __temp__8(label=None):
return jsdict({
"type": Syntax.BreakStatement,
"label": label,
})
def __temp__7(body=None):
return jsdict({
"type": Syntax.BlockStatement,
"body": body,
})
def __temp__6(operator=None, left=None, right=None):
type = (Syntax.LogicalExpression if (operator == "||") or (operator == "&&") else Syntax.BinaryExpression)
return jsdict({
"type": type,
"operator": operator,
"left": left,
"right": right,
})
def __temp__5(operator=None, left=None, right=None):
return jsdict({
"type": Syntax.AssignmentExpression,
"operator": operator,
"left": left,
"right": right,
})
def __temp__4(elements=None):
return jsdict({
"type": Syntax.ArrayExpression,
"elements": elements,
})
def __temp__3(node=None):
if extra.source:
node.loc.source = extra.source
return node
def __temp__2(node=None):
if node.range or node.loc:
if extra.loc:
state.markerStack.pop()
state.markerStack.pop()
if extra.range:
state.markerStack.pop()
else:
SyntaxTreeDelegate.markEnd(node)
return node
def __temp__1(node=None):
if extra.range:
node.range = [state.markerStack.pop(), index]
if extra.loc:
node.loc = jsdict({
"start": jsdict({
"line": state.markerStack.pop(),
"column": state.markerStack.pop(),
}),
"end": jsdict({
"line": lineNumber,
"column": index - lineStart,
}),
})
SyntaxTreeDelegate.postProcess(node)
return node
def __temp__0():
if extra.loc:
state.markerStack.append(index - lineStart)
state.markerStack.append(lineNumber)
if extra.range:
state.markerStack.append(index)
Token = None
TokenName = None
FnExprTokens = None
Syntax = None
PropertyKind = None
Messages = None
Regex = None
SyntaxTreeDelegate = None
source = None
strict = None
index = None
lineNumber = None
lineStart = None
length = None
delegate = None
lookahead = None
state = None
extra = None
Token = jsdict({
"BooleanLiteral": 1,
"EOF": 2,
"Identifier": 3,
"Keyword": 4,
"NullLiteral": 5,
"NumericLiteral": 6,
"Punctuator": 7,
"StringLiteral": 8,
"RegularExpression": 9,
})
TokenName = jsdict({
})
TokenName[Token.BooleanLiteral] = "Boolean"
TokenName[Token.EOF] = "<end>"
TokenName[Token.Identifier] = "Identifier"
TokenName[Token.Keyword] = "Keyword"
TokenName[Token.NullLiteral] = "Null"
TokenName[Token.NumericLiteral] = "Numeric"
TokenName[Token.Punctuator] = "Punctuator"
TokenName[Token.StringLiteral] = "String"
TokenName[Token.RegularExpression] = "RegularExpression"
FnExprTokens = ["(", "{", "[", "in", "typeof", "instanceof", "new", "return", "case", "delete", "throw", "void", "=", "+=", "-=", "*=", "/=", "%=", "<<=", ">>=", ">>>=", "&=", "|=", "^=", ",", "+", "-", "*", "/", "%", "++", "--", "<<", ">>", ">>>", "&", "|", "^", "!", "~", "&&", "||", "?", ":", "===", "==", ">=", "<=", "<", ">", "!=", "!=="]
Syntax = jsdict({
"AssignmentExpression": "AssignmentExpression",
"ArrayExpression": "ArrayExpression",
"BlockStatement": "BlockStatement",
"BinaryExpression": "BinaryExpression",
"BreakStatement": "BreakStatement",
"CallExpression": "CallExpression",
"CatchClause": "CatchClause",
"ConditionalExpression": "ConditionalExpression",
"ContinueStatement": "ContinueStatement",
"DoWhileStatement": "DoWhileStatement",
"DebuggerStatement": "DebuggerStatement",
"EmptyStatement": "EmptyStatement",
"ExpressionStatement": "ExpressionStatement",
"ForStatement": "ForStatement",
"ForInStatement": "ForInStatement",
"FunctionDeclaration": "FunctionDeclaration",
"FunctionExpression": "FunctionExpression",
"Identifier": "Identifier",
"IfStatement": "IfStatement",
"Literal": "Literal",
"LabeledStatement": "LabeledStatement",
"LogicalExpression": "LogicalExpression",
"MemberExpression": "MemberExpression",
"NewExpression": "NewExpression",
"ObjectExpression": "ObjectExpression",
"Program": "Program",
"Property": "Property",
"ReturnStatement": "ReturnStatement",
"SequenceExpression": "SequenceExpression",
"SwitchStatement": "SwitchStatement",
"SwitchCase": "SwitchCase",
"ThisExpression": "ThisExpression",
"ThrowStatement": "ThrowStatement",
"TryStatement": "TryStatement",
"UnaryExpression": "UnaryExpression",
"UpdateExpression": "UpdateExpression",
"VariableDeclaration": "VariableDeclaration",
"VariableDeclarator": "VariableDeclarator",
"WhileStatement": "WhileStatement",
"WithStatement": "WithStatement",
})
PropertyKind = jsdict({
"Data": 1,
"Get": 2,
"Set": 4,
})
Messages = jsdict({
"UnexpectedToken": "Unexpected token %0",
"UnexpectedNumber": "Unexpected number",
"UnexpectedString": "Unexpected string",
"UnexpectedIdentifier": "Unexpected identifier",
"UnexpectedReserved": "Unexpected reserved word",
"UnexpectedEOS": "Unexpected end of input",
"NewlineAfterThrow": "Illegal newline after throw",
"InvalidRegExp": "Invalid regular expression",
"UnterminatedRegExp": "Invalid regular expression: missing /",
"InvalidLHSInAssignment": "Invalid left-hand side in assignment",
"InvalidLHSInForIn": "Invalid left-hand side in for-in",
"MultipleDefaultsInSwitch": "More than one default clause in switch statement",
"NoCatchOrFinally": "Missing catch or finally after try",
"UnknownLabel": "Undefined label '%0'",
"Redeclaration": "%0 '%1' has already been declared",
"IllegalContinue": "Illegal continue statement",
"IllegalBreak": "Illegal break statement",
"IllegalReturn": "Illegal return statement",
"StrictModeWith": "Strict mode code may not include a with statement",
"StrictCatchVariable": "Catch variable may not be eval or arguments in strict mode",
"StrictVarName": "Variable name may not be eval or arguments in strict mode",
"StrictParamName": "Parameter name eval or arguments is not allowed in strict mode",
"StrictParamDupe": "Strict mode function may not have duplicate parameter names",
"StrictFunctionName": "Function name may not be eval or arguments in strict mode",
"StrictOctalLiteral": "Octal literals are not allowed in strict mode.",
"StrictDelete": "Delete of an unqualified identifier in strict mode.",
"StrictDuplicateProperty": "Duplicate data property in object literal not allowed in strict mode",
"AccessorDataProperty": "Object literal may not have data and accessor property with the same name",
"AccessorGetSet": "Object literal may not have multiple get/set accessors with the same name",
"StrictLHSAssignment": "Assignment to eval or arguments is not allowed in strict mode",
"StrictLHSPostfix": "Postfix increment/decrement may not have eval or arguments operand in strict mode",
"StrictLHSPrefix": "Prefix increment/decrement may not have eval or arguments operand in strict mode",
"StrictReservedWord": "Use of future reserved word in strict mode",
})
Regex = jsdict({
"NonAsciiIdentifierStart": RegExp(u"[\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc]"),
"NonAsciiIdentifierPart": RegExp(u"[\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u05d0-\u05ea\u05f0-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u0800-\u082d\u0840-\u085b\u08a0\u08a2-\u08ac\u08e4-\u08fe\u0900-\u0963\u0966-\u096f\u0971-\u0977\u0979-\u097f\u0981-\u0983\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7\u09c8\u09cb-\u09ce\u09d7\u09dc\u09dd\u09df-\u09e3\u09e6-\u09f1\u0a01-\u0a03\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5c\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c58\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1\u0cf2\u0d02\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772\u1773\u1780-\u17d3\u17d7\u17dc\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19d9\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1cd0-\u1cd2\u1cd4-\u1cf6\u1d00-\u1de6\u1dfc-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u200c\u200d\u203f\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u2e2f\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua697\ua69f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua827\ua840-\ua873\ua880-\ua8c4\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua900-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a\uaa7b\uaa80-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabea\uabec\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\ufe70-\ufe74\ufe76-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc]"),
})
def assert__py__(condition=None, message=None):
if not condition:
raise RuntimeError("ASSERT: " + message)
def isDecimalDigit(ch=None):
return (ch >= 48) and (ch <= 57)
def isHexDigit(ch=None):
return "0123456789abcdefABCDEF".find(ch) >= 0
def isOctalDigit(ch=None):
return "01234567".find(ch) >= 0
def isWhiteSpace(ch=None):
return (((((ch == 32) or (ch == 9)) or (ch == 11)) or (ch == 12)) or (ch == 160)) or ((ch >= 5760) and (u"\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\ufeff".find(unichr(ch)) > 0))
def isLineTerminator(ch=None):
return (((ch == 10) or (ch == 13)) or (ch == 8232)) or (ch == 8233)
def isIdentifierStart(ch=None):
return (((((ch == 36) or (ch == 95)) or ((ch >= 65) and (ch <= 90))) or ((ch >= 97) and (ch <= 122))) or (ch == 92)) or ((ch >= 128) and Regex.NonAsciiIdentifierStart.test(unichr(ch)))
def isIdentifierPart(ch=None):
return ((((((ch == 36) or (ch == 95)) or ((ch >= 65) and (ch <= 90))) or ((ch >= 97) and (ch <= 122))) or ((ch >= 48) and (ch <= 57))) or (ch == 92)) or ((ch >= 128) and Regex.NonAsciiIdentifierPart.test(unichr(ch)))
def isFutureReservedWord(id=None):
while 1:
if (id == "super") or ((id == "import") or ((id == "extends") or ((id == "export") or ((id == "enum") or (id == "class"))))):
return True
else:
return False
break
def isStrictModeReservedWord(id=None):
while 1:
if (id == "let") or ((id == "yield") or ((id == "static") or ((id == "public") or ((id == "protected") or ((id == "private") or ((id == "package") or ((id == "interface") or (id == "implements")))))))):
return True
else:
return False
break
def isRestrictedWord(id=None):
return (id == "eval") or (id == "arguments")
def isKeyword(id=None):
if strict and isStrictModeReservedWord(id):
return True
while 1:
if len(id) == 2:
return ((id == "if") or (id == "in")) or (id == "do")
elif len(id) == 3:
return ((((id == "var") or (id == "for")) or (id == "new")) or (id == "try")) or (id == "let")
elif len(id) == 4:
return (((((id == "this") or (id == "else")) or (id == "case")) or (id == "void")) or (id == "with")) or (id == "enum")
elif len(id) == 5:
return (((((((id == "while") or (id == "break")) or (id == "catch")) or (id == "throw")) or (id == "const")) or (id == "yield")) or (id == "class")) or (id == "super")
elif len(id) == 6:
return (((((id == "return") or (id == "typeof")) or (id == "delete")) or (id == "switch")) or (id == "export")) or (id == "import")
elif len(id) == 7:
return ((id == "default") or (id == "finally")) or (id == "extends")
elif len(id) == 8:
return ((id == "function") or (id == "continue")) or (id == "debugger")
elif len(id) == 10:
return id == "instanceof"
else:
return False
break
def addComment(type=None, value=None, start=None, end=None, loc=None):
comment = None
assert__py__(('undefined' if not 'start' in locals() else typeof(start)) == "number", "Comment must have valid position")
if state.lastCommentStart >= start:
return
state.lastCommentStart = start
comment = jsdict({
"type": type,
"value": value,
})
if extra.range:
comment.range = [start, end]
if extra.loc:
comment.loc = loc
extra.comments.append(comment)
def skipSingleLineComment():
global index, lineNumber, lineStart
start = None
loc = None
ch = None
comment = None
start = index - 2
loc = jsdict({
"start": jsdict({
"line": lineNumber,
"column": (index - lineStart) - 2,
}),
})
while index < length:
ch = (ord(source[index]) if index < len(source) else None)
index += 1
index
if isLineTerminator(ch):
if extra.comments:
comment = source[(start + 2):(index - 1)]
loc.end = jsdict({
"line": lineNumber,
"column": (index - lineStart) - 1,
})
addComment("Line", comment, start, index - 1, loc)
if (ch == 13) and ((ord(source[index]) if index < len(source) else None) == 10):
index += 1
index
lineNumber += 1
lineNumber
lineStart = index
return
if extra.comments:
comment = source[(start + 2):index]
loc.end = jsdict({
"line": lineNumber,
"column": index - lineStart,
})
addComment("Line", comment, start, index, loc)
def skipMultiLineComment():
global index, lineNumber, lineStart
start = None
loc = None
ch = None
comment = None
if extra.comments:
start = index - 2
loc = jsdict({
"start": jsdict({
"line": lineNumber,
"column": (index - lineStart) - 2,
}),
})
while index < length:
ch = (ord(source[index]) if index < len(source) else None)
if isLineTerminator(ch):
if (ch == 13) and ((ord(source[index + 1]) if (index + 1) < len(source) else None) == 10):
index += 1
index
lineNumber += 1
lineNumber
index += 1
index
lineStart = index
if index >= length:
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
elif ch == 42:
if (ord(source[index + 1]) if (index + 1) < len(source) else None) == 47:
index += 1
index
index += 1
index
if extra.comments:
comment = source[(start + 2):(index - 2)]
loc.end = jsdict({
"line": lineNumber,
"column": index - lineStart,
})
addComment("Block", comment, start, index, loc)
return
index += 1
index
else:
index += 1
index
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
def skipComment():
global index, lineNumber, lineStart
ch = None
while index < length:
ch = (ord(source[index]) if index < len(source) else None)
if isWhiteSpace(ch):
index += 1
index
elif isLineTerminator(ch):
index += 1
index
if (ch == 13) and ((ord(source[index]) if index < len(source) else None) == 10):
index += 1
index
lineNumber += 1
lineNumber
lineStart = index
elif ch == 47:
ch = (ord(source[index + 1]) if (index + 1) < len(source) else None)
if ch == 47:
index += 1
index
index += 1
index
skipSingleLineComment()
elif ch == 42:
index += 1
index
index += 1
index
skipMultiLineComment()
else:
break
else:
break
def scanHexEscape(prefix=None):
global len__py__, index
i = None
len__py__ = None
ch = None
code = 0
len__py__ = (4 if prefix == "u" else 2)
i = 0
while 1:
if not (i < len__py__):
break
if (index < length) and isHexDigit(source[index]):
index += 1
ch = source[index - 1]
code = (code * 16) + "0123456789abcdef".find(ch.lower())
else:
return ""
i += 1
return unichr(code)
def getEscapedIdentifier():
global index
ch = None
id = None
index += 1
index += 1
ch = (ord(source[index - 1]) if index - 1 < len(source) else None)
id = unichr(ch)
if ch == 92:
if (ord(source[index]) if index < len(source) else None) != 117:
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
index += 1
index
ch = scanHexEscape("u")
if ((not ch) or (ch == "\\")) or (not isIdentifierStart((ord(ch[0]) if 0 < len(ch) else None))):
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
id = ch
while index < length:
ch = (ord(source[index]) if index < len(source) else None)
if not isIdentifierPart(ch):
break
index += 1
index
id += unichr(ch)
if ch == 92:
id = id[0:(0 + (len(id) - 1))]
if (ord(source[index]) if index < len(source) else None) != 117:
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
index += 1
index
ch = scanHexEscape("u")
if ((not ch) or (ch == "\\")) or (not isIdentifierPart((ord(ch[0]) if 0 < len(ch) else None))):
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
id += ch
return id
def getIdentifier():
global index
start = None
ch = None
index += 1
start = index - 1
while index < length:
ch = (ord(source[index]) if index < len(source) else None)
if ch == 92:
index = start
return getEscapedIdentifier()
if isIdentifierPart(ch):
index += 1
index
else:
break
return source[start:index]
def scanIdentifier():
start = None
id = None
type = None
start = index
id = (getEscapedIdentifier() if (ord(source[index]) if index < len(source) else None) == 92 else getIdentifier())
if len(id) == 1:
type = Token.Identifier
elif isKeyword(id):
type = Token.Keyword
elif id == "null":
type = Token.NullLiteral
elif (id == "true") or (id == "false"):
type = Token.BooleanLiteral
else:
type = Token.Identifier
return jsdict({
"type": type,
"value": id,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
def scanPunctuator():
global index
start = index
code = (ord(source[index]) if index < len(source) else None)
code2 = None
ch1 = source[index]
ch2 = None
ch3 = None
ch4 = None
while 1:
if (code == 126) or ((code == 63) or ((code == 58) or ((code == 93) or ((code == 91) or ((code == 125) or ((code == 123) or ((code == 44) or ((code == 59) or ((code == 41) or ((code == 40) or (code == 46))))))))))):
index += 1
index
if extra.tokenize:
if code == 40:
extra.openParenToken = len(extra.tokens)
elif code == 123:
extra.openCurlyToken = len(extra.tokens)
return jsdict({
"type": Token.Punctuator,
"value": unichr(code),
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
else:
code2 = (ord(source[index + 1]) if (index + 1) < len(source) else None)
if code2 == 61:
while 1:
if (code == 124) or ((code == 94) or ((code == 62) or ((code == 60) or ((code == 47) or ((code == 45) or ((code == 43) or ((code == 42) or ((code == 38) or (code == 37))))))))):
index += 2
return jsdict({
"type": Token.Punctuator,
"value": unichr(code) + unichr(code2),
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
elif (code == 61) or (code == 33):
index += 2
if (ord(source[index]) if index < len(source) else None) == 61:
index += 1
index
return jsdict({
"type": Token.Punctuator,
"value": source[start:index],
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
else:
break
break
break
break
ch2 = source[index + 1] if index + 1 < len(source) else None
ch3 = source[index + 2] if index + 2 < len(source) else None
ch4 = source[index + 3] if index + 3 < len(source) else None
if ((ch1 == ">") and (ch2 == ">")) and (ch3 == ">"):
if ch4 == "=":
index += 4
return jsdict({
"type": Token.Punctuator,
"value": ">>>=",
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
if ((ch1 == ">") and (ch2 == ">")) and (ch3 == ">"):
index += 3
return jsdict({
"type": Token.Punctuator,
"value": ">>>",
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
if ((ch1 == "<") and (ch2 == "<")) and (ch3 == "="):
index += 3
return jsdict({
"type": Token.Punctuator,
"value": "<<=",
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
if ((ch1 == ">") and (ch2 == ">")) and (ch3 == "="):
index += 3
return jsdict({
"type": Token.Punctuator,
"value": ">>=",
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
if (ch1 == ch2) and ("+-<>&|".find(ch1) >= 0):
index += 2
return jsdict({
"type": Token.Punctuator,
"value": ch1 + ch2,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
if "<>=!+-*%&|^/".find(ch1) >= 0:
index += 1
index
return jsdict({
"type": Token.Punctuator,
"value": ch1,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
def scanHexLiteral(start=None):
global index
number = ""
while index < length:
if not isHexDigit(source[index]):
break
index += 1
number += source[index - 1]
if len(number) == 0:
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
if isIdentifierStart((ord(source[index]) if index < len(source) else None)):
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
return jsdict({
"type": Token.NumericLiteral,
"value": parseInt("0x" + number, 16),
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
def scanOctalLiteral(start=None):
global index
index += 1
number = "0" + source[index - 1]
while index < length:
if not isOctalDigit(source[index]):
break
index += 1
number += source[index - 1]
if isIdentifierStart((ord(source[index]) if index < len(source) else None)) or isDecimalDigit((ord(source[index]) if index < len(source) else None)):
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
return jsdict({
"type": Token.NumericLiteral,
"value": parseInt(number, 8),
"octal": True,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
def scanNumericLiteral():
global index
number = None
start = None
ch = None
ch = source[index]
assert__py__(isDecimalDigit((ord(ch[0]) if 0 < len(ch) else None)) or (ch == "."), "Numeric literal must start with a decimal digit or a decimal point")
start = index
number = ""
if ch != ".":
index += 1
number = source[index - 1]
ch = source[index] if index < len(source) else None
if number == "0":
if (ch == "x") or (ch == "X"):
index += 1
index
return scanHexLiteral(start)
if isOctalDigit(ch):
return scanOctalLiteral(start)
if ch and isDecimalDigit((ord(ch[0]) if 0 < len(ch) else None)):
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
while isDecimalDigit((ord(source[index]) if index < len(source) else None)):
index += 1
number += source[index - 1]
ch = source[index] if index < len(source) else None
if ch == ".":
index += 1
number += source[index - 1]
while isDecimalDigit((ord(source[index]) if index < len(source) else None)):
index += 1
number += source[index - 1]
ch = source[index]
if (ch == "e") or (ch == "E"):
index += 1
number += source[index - 1]
ch = source[index]
if (ch == "+") or (ch == "-"):
index += 1
number += source[index - 1]
if isDecimalDigit((ord(source[index]) if index < len(source) else None)):
while isDecimalDigit((ord(source[index]) if index < len(source) else None)):
index += 1
number += source[index - 1]
else:
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
if isIdentifierStart((ord(source[index]) if index < len(source) else None)):
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
return jsdict({
"type": Token.NumericLiteral,
"value": parseFloat(number),
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
def scanStringLiteral():
global index, lineNumber
str = ""
quote = None
start = None
ch = None
code = None
unescaped = None
restore = None
octal = False
quote = source[index]
assert__py__((quote == "'") or (quote == "\""), "String literal must starts with a quote")
start = index
index += 1
index
while index < length:
index += 1
ch = source[index - 1]
if ch == quote:
quote = ""
break
elif ch == "\\":
index += 1
ch = source[index - 1]
if (not ch) or (not isLineTerminator((ord(ch[0]) if 0 < len(ch) else None))):
while 1:
if ch == "n":
str += u"\x0a"
break
elif ch == "r":
str += u"\x0d"
break
elif ch == "t":
str += u"\x09"
break
elif (ch == "x") or (ch == "u"):
restore = index
unescaped = scanHexEscape(ch)
if unescaped:
str += unescaped
else:
index = restore
str += ch
break
elif ch == "b":
str += u"\x08"
break
elif ch == "f":
str += u"\x0c"
break
elif ch == "v":
str += u"\x0b"
break
else:
if isOctalDigit(ch):
code = "01234567".find(ch)
if code != 0:
octal = True
if (index < length) and isOctalDigit(source[index]):
octal = True
index += 1
code = (code * 8) + "01234567".find(source[index - 1])
if (("0123".find(ch) >= 0) and (index < length)) and isOctalDigit(source[index]):
index += 1
code = (code * 8) + "01234567".find(source[index - 1])
str += unichr(code)
else:
str += ch
break
break
else:
lineNumber += 1
lineNumber
if (ch == u"\x0d") and (source[index] == u"\x0a"):
index += 1
index
elif isLineTerminator((ord(ch[0]) if 0 < len(ch) else None)):
break
else:
str += ch
if quote != "":
throwError(jsdict({
}), Messages.UnexpectedToken, "ILLEGAL")
return jsdict({
"type": Token.StringLiteral,
"value": str,
"octal": octal,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
def scanRegExp():
global lookahead, index
str = None
ch = None
start = None
pattern = None
flags = None
value = None
classMarker = False
restore = None
terminated = False
lookahead = None
skipComment()
start = index
ch = source[index]
assert__py__(ch == "/", "Regular expression literal must start with a slash")
index += 1
str = source[index - 1]
while index < length:
index += 1
ch = source[index - 1]
str += ch
if classMarker:
if ch == "]":
classMarker = False
else:
if ch == "\\":
index += 1
ch = source[index - 1]
if isLineTerminator((ord(ch[0]) if 0 < len(ch) else None)):
throwError(jsdict({
}), Messages.UnterminatedRegExp)
str += ch
elif ch == "/":
terminated = True
break
elif ch == "[":
classMarker = True
elif isLineTerminator((ord(ch[0]) if 0 < len(ch) else None)):
throwError(jsdict({
}), Messages.UnterminatedRegExp)
if not terminated:
throwError(jsdict({
}), Messages.UnterminatedRegExp)
pattern = str[1:(1 + (len(str) - 2))]
flags = ""
while index < length:
ch = source[index]
if not isIdentifierPart((ord(ch[0]) if 0 < len(ch) else None)):
break
index += 1
index
if (ch == "\\") and (index < length):
ch = source[index]
if ch == "u":
index += 1
index
restore = index
ch = scanHexEscape("u")
if ch:
flags += ch
str += "\\u"
while 1:
if not (restore < index):
break
str += source[restore]
restore += 1
else:
index = restore
flags += "u"
str += "\\u"
else:
str += "\\"
else:
flags += ch
str += ch
try:
value = RegExp(pattern, flags)
except Exception as e:
throwError(jsdict({
}), Messages.InvalidRegExp)
peek()
if extra.tokenize:
return jsdict({
"type": Token.RegularExpression,
"value": value,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [start, index],
})
return jsdict({
"literal": str,
"value": value,
"range": [start, index],
})
def isIdentifierName(token=None):
return (((token.type == Token.Identifier) or (token.type == Token.Keyword)) or (token.type == Token.BooleanLiteral)) or (token.type == Token.NullLiteral)
def advanceSlash():
prevToken = None
checkToken = None
prevToken = extra.tokens[len(extra.tokens) - 1]
if not prevToken:
return scanRegExp()
if prevToken.type == "Punctuator":
if prevToken.value == ")":
checkToken = extra.tokens[extra.openParenToken - 1]
if (checkToken and (checkToken.type == "Keyword")) and ((((checkToken.value == "if") or (checkToken.value == "while")) or (checkToken.value == "for")) or (checkToken.value == "with")):
return scanRegExp()
return scanPunctuator()
if prevToken.value == "}":
if extra.tokens[extra.openCurlyToken - 3] and (extra.tokens[extra.openCurlyToken - 3].type == "Keyword"):
checkToken = extra.tokens[extra.openCurlyToken - 4]
if not checkToken:
return scanPunctuator()
elif extra.tokens[extra.openCurlyToken - 4] and (extra.tokens[extra.openCurlyToken - 4].type == "Keyword"):
checkToken = extra.tokens[extra.openCurlyToken - 5]
if not checkToken:
return scanRegExp()
else:
return scanPunctuator()
if FnExprTokens.indexOf(checkToken.value) >= 0:
return scanPunctuator()
return scanRegExp()
return scanRegExp()
if prevToken.type == "Keyword":
return scanRegExp()
return scanPunctuator()
def advance():
ch = None
skipComment()
if index >= length:
return jsdict({
"type": Token.EOF,
"lineNumber": lineNumber,
"lineStart": lineStart,
"range": [index, index],
})
ch = (ord(source[index]) if index < len(source) else None)
if ((ch == 40) or (ch == 41)) or (ch == 58):
return scanPunctuator()
if (ch == 39) or (ch == 34):
return scanStringLiteral()
if isIdentifierStart(ch):
return scanIdentifier()
if ch == 46:
if isDecimalDigit((ord(source[index + 1]) if (index + 1) < len(source) else None)):
return scanNumericLiteral()
return scanPunctuator()
if isDecimalDigit(ch):
return scanNumericLiteral()
if extra.tokenize and (ch == 47):
return advanceSlash()
return scanPunctuator()
def lex():
global index, lineNumber, lineStart, lookahead
token = None
token = lookahead
index = token.range[1]
lineNumber = token.lineNumber
lineStart = token.lineStart
lookahead = advance()
index = token.range[1]
lineNumber = token.lineNumber
lineStart = token.lineStart
return token
def peek():
global lookahead, index, lineNumber, lineStart
pos = None
line = None
start = None
pos = index
line = lineNumber
start = lineStart
lookahead = advance()
index = pos
lineNumber = line
lineStart = start
SyntaxTreeDelegate = jsdict({
"name": "SyntaxTree",
"markStart": __temp__0,
"markEnd": __temp__1,
"markEndIf": __temp__2,
"postProcess": __temp__3,
"createArrayExpression": __temp__4,
"createAssignmentExpression": __temp__5,
"createBinaryExpression": __temp__6,
"createBlockStatement": __temp__7,
"createBreakStatement": __temp__8,
"createCallExpression": __temp__9,
"createCatchClause": __temp__10,
"createConditionalExpression": __temp__11,
"createContinueStatement": __temp__12,
"createDebuggerStatement": __temp__13,
"createDoWhileStatement": __temp__14,
"createEmptyStatement": __temp__15,
"createExpressionStatement": __temp__16,
"createForStatement": __temp__17,
"createForInStatement": __temp__18,
"createFunctionDeclaration": __temp__19,
"createFunctionExpression": __temp__20,
"createIdentifier": __temp__21,
"createIfStatement": __temp__22,
"createLabeledStatement": __temp__23,
"createLiteral": __temp__24,
"createMemberExpression": __temp__25,
"createNewExpression": __temp__26,
"createObjectExpression": __temp__27,
"createPostfixExpression": __temp__28,
"createProgram": __temp__29,
"createProperty": __temp__30,
"createReturnStatement": __temp__31,
"createSequenceExpression": __temp__32,
"createSwitchCase": __temp__33,
"createSwitchStatement": __temp__34,
"createThisExpression": __temp__35,
"createThrowStatement": __temp__36,
"createTryStatement": __temp__37,
"createUnaryExpression": __temp__38,
"createVariableDeclaration": __temp__39,
"createVariableDeclarator": __temp__40,
"createWhileStatement": __temp__41,
"createWithStatement": __temp__42,
})
def peekLineTerminator():
global index, lineNumber, lineStart
pos = None
line = None
start = None
found = None
pos = index
line = lineNumber
start = lineStart
skipComment()
found = lineNumber != line
index = pos
lineNumber = line
lineStart = start
return found
def throwError(token=None, messageFormat=None, a=None):
def __temp__43(whole=None, index=None):
assert__py__(index < len(args), "Message reference must be in range")
return args[index]
error = None
args = Array.prototype.slice.call(arguments, 2)
msg = messageFormat.replace(RegExp(r'%(\d)'), __temp__43)
if ('undefined' if not ('lineNumber' in token) else typeof(token.lineNumber)) == "number":
error = RuntimeError((("Line " + token.lineNumber) + ": ") + msg)
error.index = token.range[0]
error.lineNumber = token.lineNumber
error.column = (token.range[0] - lineStart) + 1
else:
error = RuntimeError((("Line " + lineNumber) + ": ") + msg)
error.index = index
error.lineNumber = lineNumber
error.column = (index - lineStart) + 1
error.description = msg
raise error
def throwErrorTolerant():
try:
throwError.apply(None, arguments)
except Exception as e:
if extra.errors:
extra.errors.append(e)
else:
raise
def throwUnexpected(token=None):
if token.type == Token.EOF:
throwError(token, Messages.UnexpectedEOS)
if token.type == Token.NumericLiteral:
throwError(token, Messages.UnexpectedNumber)
if token.type == Token.StringLiteral:
throwError(token, Messages.UnexpectedString)
if token.type == Token.Identifier:
throwError(token, Messages.UnexpectedIdentifier)
if token.type == Token.Keyword:
if isFutureReservedWord(token.value):
throwError(token, Messages.UnexpectedReserved)
elif strict and isStrictModeReservedWord(token.value):
throwErrorTolerant(token, Messages.StrictReservedWord)
return
throwError(token, Messages.UnexpectedToken, token.value)
throwError(token, Messages.UnexpectedToken, token.value)
def expect(value=None):
token = lex()
if (token.type != Token.Punctuator) or (token.value != value):
throwUnexpected(token)
def expectKeyword(keyword=None):
token = lex()
if (token.type != Token.Keyword) or (token.value != keyword):
throwUnexpected(token)
def match(value=None):
return (lookahead.type == Token.Punctuator) and (lookahead.value == value)
def matchKeyword(keyword=None):
return (lookahead.type == Token.Keyword) and (lookahead.value == keyword)
def matchAssign():
op = None
if lookahead.type != Token.Punctuator:
return False
op = lookahead.value
return (((((((((((op == "=") or (op == "*=")) or (op == "/=")) or (op == "%=")) or (op == "+=")) or (op == "-=")) or (op == "<<=")) or (op == ">>=")) or (op == ">>>=")) or (op == "&=")) or (op == "^=")) or (op == "|=")
def consumeSemicolon():
line = None
if (ord(source[index]) if index < len(source) else None) == 59:
lex()
return
line = lineNumber
skipComment()
if lineNumber != line:
return
if match(";"):
lex()
return
if (lookahead.type != Token.EOF) and (not match("}")):
throwUnexpected(lookahead)
def isLeftHandSide(expr=None):
return (expr.type == Syntax.Identifier) or (expr.type == Syntax.MemberExpression)
def parseArrayInitialiser():
elements = []
expect("[")
while not match("]"):
if match(","):
lex()
elements.append(None)
else:
elements.append(parseAssignmentExpression())
if not match("]"):
expect(",")
expect("]")
return delegate.createArrayExpression(elements)
def parsePropertyFunction(param=None, first=None):
global strict
previousStrict = None
body = None
previousStrict = strict
skipComment()
delegate.markStart()
body = parseFunctionSourceElements()
if (first and strict) and isRestrictedWord(param[0].name):
throwErrorTolerant(first, Messages.StrictParamName)
strict = previousStrict
return delegate.markEnd(delegate.createFunctionExpression(None, param, [], body))
def parseObjectPropertyKey():
token = None
skipComment()
delegate.markStart()
token = lex()
if (token.type == Token.StringLiteral) or (token.type == Token.NumericLiteral):
if strict and token.octal:
throwErrorTolerant(token, Messages.StrictOctalLiteral)
return delegate.markEnd(delegate.createLiteral(token))
return delegate.markEnd(delegate.createIdentifier(token.value))
def parseObjectProperty():
token = None
key = None
id = None
value = None
param = None
token = lookahead
skipComment()
delegate.markStart()
if token.type == Token.Identifier:
id = parseObjectPropertyKey()
if (token.value == "get") and (not match(":")):
key = parseObjectPropertyKey()
expect("(")
expect(")")
value = parsePropertyFunction([])
return delegate.markEnd(delegate.createProperty("get", key, value))
if (token.value == "set") and (not match(":")):
key = parseObjectPropertyKey()
expect("(")
token = lookahead
if token.type != Token.Identifier:
expect(")")
throwErrorTolerant(token, Messages.UnexpectedToken, token.value)
value = parsePropertyFunction([])
else:
param = [parseVariableIdentifier()]
expect(")")
value = parsePropertyFunction(param, token)
return delegate.markEnd(delegate.createProperty("set", key, value))
expect(":")
value = parseAssignmentExpression()
return delegate.markEnd(delegate.createProperty("init", id, value))
if (token.type == Token.EOF) or (token.type == Token.Punctuator):
throwUnexpected(token)
else:
key = parseObjectPropertyKey()
expect(":")
value = parseAssignmentExpression()
return delegate.markEnd(delegate.createProperty("init", key, value))
def parseObjectInitialiser():
properties = []
property = None
name = None
key = None
kind = None
map = jsdict({
})
toString = str
expect("{")
while not match("}"):
property = parseObjectProperty()
if property.key.type == Syntax.Identifier:
name = property.key.name
else:
name = toString(property.key.value)
kind = (PropertyKind.Data if property.kind == "init" else (PropertyKind.Get if property.kind == "get" else PropertyKind.Set))
key = "$" + name
if key in map:
if map[key] == PropertyKind.Data:
if strict and (kind == PropertyKind.Data):
throwErrorTolerant(jsdict({
}), Messages.StrictDuplicateProperty)
elif kind != PropertyKind.Data:
throwErrorTolerant(jsdict({
}), Messages.AccessorDataProperty)
else:
if kind == PropertyKind.Data:
throwErrorTolerant(jsdict({
}), Messages.AccessorDataProperty)
elif map[key] & kind:
throwErrorTolerant(jsdict({
}), Messages.AccessorGetSet)
map[key] |= kind
else:
map[key] = kind
properties.append(property)
if not match("}"):
expect(",")
expect("}")
return delegate.createObjectExpression(properties)
def parseGroupExpression():
expr = None
expect("(")
expr = parseExpression()
expect(")")
return expr
def parsePrimaryExpression():
type = None
token = None
expr = None
if match("("):
return parseGroupExpression()
type = lookahead.type
delegate.markStart()
if type == Token.Identifier:
expr = delegate.createIdentifier(lex().value)
elif (type == Token.StringLiteral) or (type == Token.NumericLiteral):
if strict and lookahead.octal:
throwErrorTolerant(lookahead, Messages.StrictOctalLiteral)
expr = delegate.createLiteral(lex())
elif type == Token.Keyword:
if matchKeyword("this"):
lex()
expr = delegate.createThisExpression()
elif matchKeyword("function"):
expr = parseFunctionExpression()
elif type == Token.BooleanLiteral:
token = lex()
token.value = token.value == "true"
expr = delegate.createLiteral(token)
elif type == Token.NullLiteral:
token = lex()
token.value = None
expr = delegate.createLiteral(token)
elif match("["):
expr = parseArrayInitialiser()
elif match("{"):
expr = parseObjectInitialiser()
elif match("/") or match("/="):
expr = delegate.createLiteral(scanRegExp())
if expr:
return delegate.markEnd(expr)
throwUnexpected(lex())
def parseArguments():
args = []
expect("(")
if not match(")"):
while index < length:
args.append(parseAssignmentExpression())
if match(")"):
break
expect(",")
expect(")")
return args
def parseNonComputedProperty():
token = None
delegate.markStart()
token = lex()
if not isIdentifierName(token):
throwUnexpected(token)
return delegate.markEnd(delegate.createIdentifier(token.value))
def parseNonComputedMember():
expect(".")
return parseNonComputedProperty()
def parseComputedMember():
expr = None
expect("[")
expr = parseExpression()
expect("]")
return expr
def parseNewExpression():
callee = None
args = None
delegate.markStart()
expectKeyword("new")
callee = parseLeftHandSideExpression()
args = (parseArguments() if match("(") else [])
return delegate.markEnd(delegate.createNewExpression(callee, args))
def parseLeftHandSideExpressionAllowCall():
marker = None
expr = None
args = None
property = None
marker = createLocationMarker()
expr = (parseNewExpression() if matchKeyword("new") else parsePrimaryExpression())
while (match(".") or match("[")) or match("("):
if match("("):
args = parseArguments()
expr = delegate.createCallExpression(expr, args)
elif match("["):
property = parseComputedMember()
expr = delegate.createMemberExpression("[", expr, property)
else:
property = parseNonComputedMember()
expr = delegate.createMemberExpression(".", expr, property)
if marker:
marker.end()
marker.apply(expr)
return expr
def parseLeftHandSideExpression():
marker = None
expr = None
property = None
marker = createLocationMarker()
expr = (parseNewExpression() if matchKeyword("new") else parsePrimaryExpression())
while match(".") or match("["):
if match("["):
property = parseComputedMember()
expr = delegate.createMemberExpression("[", expr, property)
else:
property = parseNonComputedMember()
expr = delegate.createMemberExpression(".", expr, property)
if marker:
marker.end()
marker.apply(expr)
return expr
def parsePostfixExpression():
expr = None
token = None
delegate.markStart()
expr = parseLeftHandSideExpressionAllowCall()
if lookahead.type == Token.Punctuator:
if (match("++") or match("--")) and (not peekLineTerminator()):
if (strict and (expr.type == Syntax.Identifier)) and isRestrictedWord(expr.name):
throwErrorTolerant(jsdict({
}), Messages.StrictLHSPostfix)
if not isLeftHandSide(expr):
throwError(jsdict({
}), Messages.InvalidLHSInAssignment)
token = lex()
expr = delegate.createPostfixExpression(token.value, expr)
return delegate.markEndIf(expr)
def parseUnaryExpression():
token = None
expr = None
delegate.markStart()
if (lookahead.type != Token.Punctuator) and (lookahead.type != Token.Keyword):
expr = parsePostfixExpression()
elif match("++") or match("--"):
token = lex()
expr = parseUnaryExpression()
if (strict and (expr.type == Syntax.Identifier)) and isRestrictedWord(expr.name):
throwErrorTolerant(jsdict({
}), Messages.StrictLHSPrefix)
if not isLeftHandSide(expr):
throwError(jsdict({
}), Messages.InvalidLHSInAssignment)
expr = delegate.createUnaryExpression(token.value, expr)
elif ((match("+") or match("-")) or match("~")) or match("!"):
token = lex()
expr = parseUnaryExpression()
expr = delegate.createUnaryExpression(token.value, expr)
elif (matchKeyword("delete") or matchKeyword("void")) or matchKeyword("typeof"):
token = lex()
expr = parseUnaryExpression()
expr = delegate.createUnaryExpression(token.value, expr)
if (strict and (expr.operator == "delete")) and (expr.argument.type == Syntax.Identifier):
throwErrorTolerant(jsdict({
}), Messages.StrictDelete)
else:
expr = parsePostfixExpression()
return delegate.markEndIf(expr)
def binaryPrecedence(token=None, allowIn=None):
prec = 0
if (token.type != Token.Punctuator) and (token.type != Token.Keyword):
return 0
while 1:
if token.value == "||":
prec = 1
break
elif token.value == "&&":
prec = 2
break
elif token.value == "|":
prec = 3
break
elif token.value == "^":
prec = 4
break
elif token.value == "&":
prec = 5
break
elif (token.value == "!==") or ((token.value == "===") or ((token.value == "!=") or (token.value == "=="))):
prec = 6
break
elif (token.value == "instanceof") or ((token.value == ">=") or ((token.value == "<=") or ((token.value == ">") or (token.value == "<")))):
prec = 7
break
elif token.value == "in":
prec = (7 if allowIn else 0)
break
elif (token.value == ">>>") or ((token.value == ">>") or (token.value == "<<")):
prec = 8
break
elif (token.value == "-") or (token.value == "+"):
prec = 9
break
elif (token.value == "%") or ((token.value == "/") or (token.value == "*")):
prec = 11
break
else:
break
break
return prec
def parseBinaryExpression():
marker = None
markers = None
expr = None
token = None
prec = None
previousAllowIn = None
stack = None
right = None
operator = None
left = None
i = None
previousAllowIn = state.allowIn
state.allowIn = True
marker = createLocationMarker()
left = parseUnaryExpression()
token = lookahead
prec = binaryPrecedence(token, previousAllowIn)
if prec == 0:
return left
token.prec = prec
lex()
markers = [marker, createLocationMarker()]
right = parseUnaryExpression()
stack = [left, token, right]
prec = binaryPrecedence(lookahead, previousAllowIn)
while prec > 0:
while (len(stack) > 2) and (prec <= stack[len(stack) - 2].prec):
right = stack.pop()
operator = stack.pop().value
left = stack.pop()
expr = delegate.createBinaryExpression(operator, left, right)
markers.pop()
marker = markers.pop()
if marker:
marker.end()
marker.apply(expr)
stack.append(expr)
markers.append(marker)
token = lex()
token.prec = prec
stack.append(token)
markers.append(createLocationMarker())
expr = parseUnaryExpression()
stack.append(expr)
prec = binaryPrecedence(lookahead, previousAllowIn)
state.allowIn = previousAllowIn
i = len(stack) - 1
expr = stack[i]
markers.pop()
while i > 1:
expr = delegate.createBinaryExpression(stack[i - 1].value, stack[i - 2], expr)
i -= 2
marker = markers.pop()
if marker:
marker.end()
marker.apply(expr)
return expr
def parseConditionalExpression():
expr = None
previousAllowIn = None
consequent = None
alternate = None
delegate.markStart()
expr = parseBinaryExpression()
if match("?"):
lex()
previousAllowIn = state.allowIn
state.allowIn = True
consequent = parseAssignmentExpression()
state.allowIn = previousAllowIn
expect(":")
alternate = parseAssignmentExpression()
expr = delegate.markEnd(delegate.createConditionalExpression(expr, consequent, alternate))
else:
delegate.markEnd(jsdict({
}))
return expr
def parseAssignmentExpression():
token = None
left = None
right = None
node = None
token = lookahead
delegate.markStart()
left = parseConditionalExpression()
node = left
if matchAssign():
if not isLeftHandSide(left):
throwError(jsdict({
}), Messages.InvalidLHSInAssignment)
if (strict and (left.type == Syntax.Identifier)) and isRestrictedWord(left.name):
throwErrorTolerant(token, Messages.StrictLHSAssignment)
token = lex()
right = parseAssignmentExpression()
node = delegate.createAssignmentExpression(token.value, left, right)
return delegate.markEndIf(node)
def parseExpression():
expr = None
delegate.markStart()
expr = parseAssignmentExpression()
if match(","):
expr = delegate.createSequenceExpression([expr])
while index < length:
if not match(","):
break
lex()
expr.expressions.append(parseAssignmentExpression())
return delegate.markEndIf(expr)
def parseStatementList():
list__py__ = []
statement = None
while index < length:
if match("}"):
break
statement = parseSourceElement()
if ('undefined' if not 'statement' in locals() else typeof(statement)) == "undefined":
break
list__py__.append(statement)
return list__py__
def parseBlock():
block = None
skipComment()
delegate.markStart()
expect("{")
block = parseStatementList()
expect("}")
return delegate.markEnd(delegate.createBlockStatement(block))
def parseVariableIdentifier():
token = None
skipComment()
delegate.markStart()
token = lex()
if token.type != Token.Identifier:
throwUnexpected(token)
return delegate.markEnd(delegate.createIdentifier(token.value))
def parseVariableDeclaration(kind=None):
init = None
id = None
skipComment()
delegate.markStart()
id = parseVariableIdentifier()
if strict and isRestrictedWord(id.name):
throwErrorTolerant(jsdict({
}), Messages.StrictVarName)
if kind == "const":
expect("=")
init = parseAssignmentExpression()
elif match("="):
lex()
init = parseAssignmentExpression()
return delegate.markEnd(delegate.createVariableDeclarator(id, init))
def parseVariableDeclarationList(kind=None):
list__py__ = []
while 1:
list__py__.append(parseVariableDeclaration(kind))
if not match(","):
break
lex()
if not (index < length):
break
return list__py__
def parseVariableStatement():
declarations = None
expectKeyword("var")
declarations = parseVariableDeclarationList()
consumeSemicolon()
return delegate.createVariableDeclaration(declarations, "var")
def parseConstLetDeclaration(kind=None):
declarations = None
skipComment()
delegate.markStart()
expectKeyword(kind)
declarations = parseVariableDeclarationList(kind)
consumeSemicolon()
return delegate.markEnd(delegate.createVariableDeclaration(declarations, kind))
def parseEmptyStatement():
expect(";")
return delegate.createEmptyStatement()
def parseExpressionStatement():
expr = parseExpression()
consumeSemicolon()
return delegate.createExpressionStatement(expr)
def parseIfStatement():
test = None
consequent = None
alternate = None
expectKeyword("if")
expect("(")
test = parseExpression()
expect(")")
consequent = parseStatement()
if matchKeyword("else"):
lex()
alternate = parseStatement()
else:
alternate = None
return delegate.createIfStatement(test, consequent, alternate)
def parseDoWhileStatement():
body = None
test = None
oldInIteration = None
expectKeyword("do")
oldInIteration = state.inIteration
state.inIteration = True
body = parseStatement()
state.inIteration = oldInIteration
expectKeyword("while")
expect("(")
test = parseExpression()
expect(")")
if match(";"):
lex()
return delegate.createDoWhileStatement(body, test)
def parseWhileStatement():
test = None
body = None
oldInIteration = None
expectKeyword("while")
expect("(")
test = parseExpression()
expect(")")
oldInIteration = state.inIteration
state.inIteration = True
body = parseStatement()
state.inIteration = oldInIteration
return delegate.createWhileStatement(test, body)
def parseForVariableDeclaration():
token = None
declarations = None
delegate.markStart()
token = lex()
declarations = parseVariableDeclarationList()
return delegate.markEnd(delegate.createVariableDeclaration(declarations, token.value))
def parseForStatement():
init = None
test = None
update = None
left = None
right = None
body = None
oldInIteration = None
update = None
test = update
init = test
expectKeyword("for")
expect("(")
if match(";"):
lex()
else:
if matchKeyword("var") or matchKeyword("let"):
state.allowIn = False
init = parseForVariableDeclaration()
state.allowIn = True
if (len(init.declarations) == 1) and matchKeyword("in"):
lex()
left = init
right = parseExpression()
init = None
else:
state.allowIn = False
init = parseExpression()
state.allowIn = True
if matchKeyword("in"):
if not isLeftHandSide(init):
throwError(jsdict({
}), Messages.InvalidLHSInForIn)
lex()
left = init
right = parseExpression()
init = None
if ('undefined' if not 'left' in locals() else typeof(left)) == "undefined":
expect(";")
if ('undefined' if not 'left' in locals() else typeof(left)) == "undefined":
if not match(";"):
test = parseExpression()
expect(";")
if not match(")"):
update = parseExpression()
expect(")")
oldInIteration = state.inIteration
state.inIteration = True
body = parseStatement()
state.inIteration = oldInIteration
return (delegate.createForStatement(init, test, update, body) if ('undefined' if not 'left' in locals() else typeof(left)) == "undefined" else delegate.createForInStatement(left, right, body))
def parseContinueStatement():
label = None
key = None
expectKeyword("continue")
if (ord(source[index]) if index < len(source) else None) == 59:
lex()
if not state.inIteration:
throwError(jsdict({
}), Messages.IllegalContinue)
return delegate.createContinueStatement(None)
if peekLineTerminator():
if not state.inIteration:
throwError(jsdict({
}), Messages.IllegalContinue)
return delegate.createContinueStatement(None)
if lookahead.type == Token.Identifier:
label = parseVariableIdentifier()
key = "$" + label.name
if not (key in state.labelSet):
throwError(jsdict({
}), Messages.UnknownLabel, label.name)
consumeSemicolon()
if (label == None) and (not state.inIteration):
throwError(jsdict({
}), Messages.IllegalContinue)
return delegate.createContinueStatement(label)
def parseBreakStatement():
label = None
key = None
expectKeyword("break")
if (ord(source[index]) if index < len(source) else None) == 59:
lex()
if not (state.inIteration or state.inSwitch):
throwError(jsdict({
}), Messages.IllegalBreak)
return delegate.createBreakStatement(None)
if peekLineTerminator():
if not (state.inIteration or state.inSwitch):
throwError(jsdict({
}), Messages.IllegalBreak)
return delegate.createBreakStatement(None)
if lookahead.type == Token.Identifier:
label = parseVariableIdentifier()
key = "$" + label.name
if not (key in state.labelSet):
throwError(jsdict({
}), Messages.UnknownLabel, label.name)
consumeSemicolon()
if (label == None) and (not (state.inIteration or state.inSwitch)):
throwError(jsdict({
}), Messages.IllegalBreak)
return delegate.createBreakStatement(label)
def parseReturnStatement():
argument = None
expectKeyword("return")
if not state.inFunctionBody:
throwErrorTolerant(jsdict({
}), Messages.IllegalReturn)
if (ord(source[index]) if index < len(source) else None) == 32:
if isIdentifierStart((ord(source[index + 1]) if (index + 1) < len(source) else None)):
argument = parseExpression()
consumeSemicolon()
return delegate.createReturnStatement(argument)
if peekLineTerminator():
return delegate.createReturnStatement(None)
if not match(";"):
if (not match("}")) and (lookahead.type != Token.EOF):
argument = parseExpression()
consumeSemicolon()
return delegate.createReturnStatement(argument)
def parseWithStatement():
object = None
body = None
if strict:
throwErrorTolerant(jsdict({
}), Messages.StrictModeWith)
expectKeyword("with")
expect("(")
object = parseExpression()
expect(")")
body = parseStatement()
return delegate.createWithStatement(object, body)
def parseSwitchCase():
test = None
consequent = []
statement = None
skipComment()
delegate.markStart()
if matchKeyword("default"):
lex()
test = None
else:
expectKeyword("case")
test = parseExpression()
expect(":")
while index < length:
if (match("}") or matchKeyword("default")) or matchKeyword("case"):
break
statement = parseStatement()
consequent.append(statement)
return delegate.markEnd(delegate.createSwitchCase(test, consequent))
def parseSwitchStatement():
discriminant = None
cases = None
clause = None
oldInSwitch = None
defaultFound = None
expectKeyword("switch")
expect("(")
discriminant = parseExpression()
expect(")")
expect("{")
if match("}"):
lex()
return delegate.createSwitchStatement(discriminant)
cases = []
oldInSwitch = state.inSwitch
state.inSwitch = True
defaultFound = False
while index < length:
if match("}"):
break
clause = parseSwitchCase()
if clause.test == None:
if defaultFound:
throwError(jsdict({
}), Messages.MultipleDefaultsInSwitch)
defaultFound = True
cases.append(clause)
state.inSwitch = oldInSwitch
expect("}")
return delegate.createSwitchStatement(discriminant, cases)
def parseThrowStatement():
argument = None
expectKeyword("throw")
if peekLineTerminator():
throwError(jsdict({
}), Messages.NewlineAfterThrow)
argument = parseExpression()
consumeSemicolon()
return delegate.createThrowStatement(argument)
def parseCatchClause():
param = None
body = None
skipComment()
delegate.markStart()
expectKeyword("catch")
expect("(")
if match(")"):
throwUnexpected(lookahead)
param = parseVariableIdentifier()
if strict and isRestrictedWord(param.name):
throwErrorTolerant(jsdict({
}), Messages.StrictCatchVariable)
expect(")")
body = parseBlock()
return delegate.markEnd(delegate.createCatchClause(param, body))
def parseTryStatement():
block = None
handlers = []
finalizer = None
expectKeyword("try")
block = parseBlock()
if matchKeyword("catch"):
handlers.append(parseCatchClause())
if matchKeyword("finally"):
lex()
finalizer = parseBlock()
if (len(handlers) == 0) and (not finalizer):
throwError(jsdict({
}), Messages.NoCatchOrFinally)
return delegate.createTryStatement(block, [], handlers, finalizer)
def parseDebuggerStatement():
expectKeyword("debugger")
consumeSemicolon()
return delegate.createDebuggerStatement()
def parseStatement():
type = lookahead.type
expr = None
labeledBody = None
key = None
if type == Token.EOF:
throwUnexpected(lookahead)
skipComment()
delegate.markStart()
if type == Token.Punctuator:
while 1:
if lookahead.value == ";":
return delegate.markEnd(parseEmptyStatement())
elif lookahead.value == "{":
return delegate.markEnd(parseBlock())
elif lookahead.value == "(":
return delegate.markEnd(parseExpressionStatement())
else:
break
break
if type == Token.Keyword:
while 1:
if lookahead.value == "break":
return delegate.markEnd(parseBreakStatement())
elif lookahead.value == "continue":
return delegate.markEnd(parseContinueStatement())
elif lookahead.value == "debugger":
return delegate.markEnd(parseDebuggerStatement())
elif lookahead.value == "do":
return delegate.markEnd(parseDoWhileStatement())
elif lookahead.value == "for":
return delegate.markEnd(parseForStatement())
elif lookahead.value == "function":
return delegate.markEnd(parseFunctionDeclaration())
elif lookahead.value == "if":
return delegate.markEnd(parseIfStatement())
elif lookahead.value == "return":
return delegate.markEnd(parseReturnStatement())
elif lookahead.value == "switch":
return delegate.markEnd(parseSwitchStatement())
elif lookahead.value == "throw":
return delegate.markEnd(parseThrowStatement())
elif lookahead.value == "try":
return delegate.markEnd(parseTryStatement())
elif lookahead.value == "var":
return delegate.markEnd(parseVariableStatement())
elif lookahead.value == "while":
return delegate.markEnd(parseWhileStatement())
elif lookahead.value == "with":
return delegate.markEnd(parseWithStatement())
else:
break
break
expr = parseExpression()
if (expr.type == Syntax.Identifier) and match(":"):
lex()
key = "$" + expr.name
if key in state.labelSet:
throwError(jsdict({
}), Messages.Redeclaration, "Label", expr.name)
state.labelSet[key] = True
labeledBody = parseStatement()
del state.labelSet[key]
return delegate.markEnd(delegate.createLabeledStatement(expr, labeledBody))
consumeSemicolon()
return delegate.markEnd(delegate.createExpressionStatement(expr))
def parseFunctionSourceElements():
global strict
sourceElement = None
sourceElements = []
token = None
directive = None
firstRestricted = None
oldLabelSet = None
oldInIteration = None
oldInSwitch = None
oldInFunctionBody = None
skipComment()
delegate.markStart()
expect("{")
while index < length:
if lookahead.type != Token.StringLiteral:
break
token = lookahead
sourceElement = parseSourceElement()
sourceElements.append(sourceElement)
if sourceElement.expression.type != Syntax.Literal:
break
directive = source[(token.range[0] + 1):(token.range[1] - 1)]
if directive == "use strict":
strict = True
if firstRestricted:
throwErrorTolerant(firstRestricted, Messages.StrictOctalLiteral)
else:
if (not firstRestricted) and token.octal:
firstRestricted = token
oldLabelSet = state.labelSet
oldInIteration = state.inIteration
oldInSwitch = state.inSwitch
oldInFunctionBody = state.inFunctionBody
state.labelSet = jsdict({
})
state.inIteration = False
state.inSwitch = False
state.inFunctionBody = True
while index < length:
if match("}"):
break
sourceElement = parseSourceElement()
if ('undefined' if not 'sourceElement' in locals() else typeof(sourceElement)) == "undefined":
break
sourceElements.append(sourceElement)
expect("}")
state.labelSet = oldLabelSet
state.inIteration = oldInIteration
state.inSwitch = oldInSwitch
state.inFunctionBody = oldInFunctionBody
return delegate.markEnd(delegate.createBlockStatement(sourceElements))
def parseParams(firstRestricted=None):
param = None
params = []
token = None
stricted = None
paramSet = None
key = None
message = None
expect("(")
if not match(")"):
paramSet = jsdict({
})
while index < length:
token = lookahead
param = parseVariableIdentifier()
key = "$" + token.value
if strict:
if isRestrictedWord(token.value):
stricted = token
message = Messages.StrictParamName
if key in paramSet:
stricted = token
message = Messages.StrictParamDupe
elif not firstRestricted:
if isRestrictedWord(token.value):
firstRestricted = token
message = Messages.StrictParamName
elif isStrictModeReservedWord(token.value):
firstRestricted = token
message = Messages.StrictReservedWord
elif key in paramSet:
firstRestricted = token
message = Messages.StrictParamDupe
params.append(param)
paramSet[key] = True
if match(")"):
break
expect(",")
expect(")")
return jsdict({
"params": params,
"stricted": stricted,
"firstRestricted": firstRestricted,
"message": message,
})
def parseFunctionDeclaration():
global strict
id = None
params = []
body = None
token = None
stricted = None
tmp = None
firstRestricted = None
message = None
previousStrict = None
skipComment()
delegate.markStart()
expectKeyword("function")
token = lookahead
id = parseVariableIdentifier()
if strict:
if isRestrictedWord(token.value):
throwErrorTolerant(token, Messages.StrictFunctionName)
else:
if isRestrictedWord(token.value):
firstRestricted = token
message = Messages.StrictFunctionName
elif isStrictModeReservedWord(token.value):
firstRestricted = token
message = Messages.StrictReservedWord
tmp = parseParams(firstRestricted)
params = tmp.params
stricted = tmp.stricted
firstRestricted = tmp.firstRestricted
if tmp.message:
message = tmp.message
previousStrict = strict
body = parseFunctionSourceElements()
if strict and firstRestricted:
throwError(firstRestricted, message)
if strict and stricted:
throwErrorTolerant(stricted, message)
strict = previousStrict
return delegate.markEnd(delegate.createFunctionDeclaration(id, params, [], body))
def parseFunctionExpression():
global strict
token = None
id = None
stricted = None
firstRestricted = None
message = None
tmp = None
params = []
body = None
previousStrict = None
delegate.markStart()
expectKeyword("function")
if not match("("):
token = lookahead
id = parseVariableIdentifier()
if strict:
if isRestrictedWord(token.value):
throwErrorTolerant(token, Messages.StrictFunctionName)
else:
if isRestrictedWord(token.value):
firstRestricted = token
message = Messages.StrictFunctionName
elif isStrictModeReservedWord(token.value):
firstRestricted = token
message = Messages.StrictReservedWord
tmp = parseParams(firstRestricted)
params = tmp.params
stricted = tmp.stricted
firstRestricted = tmp.firstRestricted
if tmp.message:
message = tmp.message
previousStrict = strict
body = parseFunctionSourceElements()
if strict and firstRestricted:
throwError(firstRestricted, message)
if strict and stricted:
throwErrorTolerant(stricted, message)
strict = previousStrict
return delegate.markEnd(delegate.createFunctionExpression(id, params, [], body))
def parseSourceElement():
if lookahead.type == Token.Keyword:
while 1:
if (lookahead.value == "let") or (lookahead.value == "const"):
return parseConstLetDeclaration(lookahead.value)
elif lookahead.value == "function":
return parseFunctionDeclaration()
else:
return parseStatement()
break
if lookahead.type != Token.EOF:
return parseStatement()
def parseSourceElements():
global strict
sourceElement = None
sourceElements = []
token = None
directive = None
firstRestricted = None
while index < length:
token = lookahead
if token.type != Token.StringLiteral:
break
sourceElement = parseSourceElement()
sourceElements.append(sourceElement)
if sourceElement.expression.type != Syntax.Literal:
break
directive = source[(token.range[0] + 1):(token.range[1] - 1)]
if directive == "use strict":
strict = True
if firstRestricted:
throwErrorTolerant(firstRestricted, Messages.StrictOctalLiteral)
else:
if (not firstRestricted) and token.octal:
firstRestricted = token
while index < length:
sourceElement = parseSourceElement()
if ('undefined' if not 'sourceElement' in locals() else typeof(sourceElement)) == "undefined":
break
sourceElements.append(sourceElement)
return sourceElements
def parseProgram():
global strict
body = None
skipComment()
delegate.markStart()
strict = False
peek()
body = parseSourceElements()
return delegate.markEnd(delegate.createProgram(body))
def collectToken():
start = None
loc = None
token = None
range = None
value = None
skipComment()
start = index
loc = jsdict({
"start": jsdict({
"line": lineNumber,
"column": index - lineStart,
}),
})
token = extra.advance()
loc.end = jsdict({
"line": lineNumber,
"column": index - lineStart,
})
if token.type != Token.EOF:
range = [token.range[0], token.range[1]]
value = source[token.range[0]:token.range[1]]
extra.tokens.append(jsdict({
"type": TokenName[token.type],
"value": value,
"range": range,
"loc": loc,
}))
return token
def collectRegex():
pos = None
loc = None
regex = None
token = None
skipComment()
pos = index
loc = jsdict({
"start": jsdict({
"line": lineNumber,
"column": index - lineStart,
}),
})
regex = extra.scanRegExp()
loc.end = jsdict({
"line": lineNumber,
"column": index - lineStart,
})
if not extra.tokenize:
if len(extra.tokens) > 0:
token = extra.tokens[len(extra.tokens) - 1]
if (token.range[0] == pos) and (token.type == "Punctuator"):
if (token.value == "/") or (token.value == "/="):
extra.tokens.pop()
extra.tokens.append(jsdict({
"type": "RegularExpression",
"value": regex.literal,
"range": [pos, index],
"loc": loc,
}))
return regex
def filterTokenLocation():
i = None
entry = None
token = None
tokens = []
i = 0
while 1:
if not (i < len(extra.tokens)):
break
entry = extra.tokens[i]
token = jsdict({
"type": entry.type,
"value": entry.value,
})
if extra.range:
token.range = entry.range
if extra.loc:
token.loc = entry.loc
tokens.append(token)
i += 1
extra.tokens = tokens
class LocationMarker(object):
def __init__(self=None):
self.marker = [index, lineNumber, index - lineStart, 0, 0, 0]
def end(self=None):
self.marker[3] = index
self.marker[4] = lineNumber
self.marker[5] = index - lineStart
def apply(self=None, node=None):
if extra.range:
node.range = [self.marker[0], self.marker[3]]
if extra.loc:
node.loc = jsdict({
"start": jsdict({
"line": self.marker[1],
"column": self.marker[2],
}),
"end": jsdict({
"line": self.marker[4],
"column": self.marker[5],
}),
})
node = delegate.postProcess(node)
def createLocationMarker():
if (not extra.loc) and (not extra.range):
return None
skipComment()
return LocationMarker()
def patch():
global advance, scanRegExp
if ('undefined' if not ('tokens' in extra) else typeof(extra.tokens)) != "undefined":
extra.advance = advance
extra.scanRegExp = scanRegExp
advance = collectToken
scanRegExp = collectRegex
def unpatch():
global advance, scanRegExp
if ('undefined' if not ('scanRegExp' in extra) else typeof(extra.scanRegExp)) == "function":
advance = extra.advance
scanRegExp = extra.scanRegExp
def tokenize(code, **options):
global delegate, source, index, lineNumber, lineStart, length, lookahead, state, extra
options = jsdict(options)
toString = None
token = None
tokens = None
toString = str
if (('undefined' if not 'code' in locals() else typeof(code)) != "string") and (not isinstance(code, str)):
code = toString(code)
delegate = SyntaxTreeDelegate
source = code
index = 0
lineNumber = (1 if len(source) > 0 else 0)
lineStart = 0
length = len(source)
lookahead = None
state = jsdict({
"allowIn": True,
"labelSet": jsdict({
}),
"inFunctionBody": False,
"inIteration": False,
"inSwitch": False,
"lastCommentStart": -1,
})
extra = jsdict({
})
options = options or jsdict({
})
options.tokens = True
extra.tokens = []
extra.tokenize = True
extra.openParenToken = -1
extra.openCurlyToken = -1
extra.range = (('undefined' if not ('range' in options) else typeof(options.range)) == "boolean") and options.range
extra.loc = (('undefined' if not ('loc' in options) else typeof(options.loc)) == "boolean") and options.loc
if (('undefined' if not ('comment' in options) else typeof(options.comment)) == "boolean") and options.comment:
extra.comments = []
if (('undefined' if not ('tolerant' in options) else typeof(options.tolerant)) == "boolean") and options.tolerant:
extra.errors = []
if length > 0:
if (typeof(source[0])) == "undefined":
if isinstance(code, str):
source = code.valueOf()
patch()
try:
peek()
if lookahead.type == Token.EOF:
return extra.tokens
token = lex()
while lookahead.type != Token.EOF:
try:
token = lex()
except Exception as lexError:
token = lookahead
if extra.errors:
extra.errors.append(lexError)
break
else:
raise
filterTokenLocation()
tokens = extra.tokens
if ('undefined' if not ('comments' in extra) else typeof(extra.comments)) != "undefined":
tokens.comments = extra.comments
if ('undefined' if not ('errors' in extra) else typeof(extra.errors)) != "undefined":
tokens.errors = extra.errors
except Exception as e:
raise
finally:
unpatch()
extra = jsdict({
})
return tokens
def parse(code, **options):
global delegate, source, index, lineNumber, lineStart, length, lookahead, state, extra
options = jsdict(options)
program = None
toString = None
toString = str
if (('undefined' if not 'code' in locals() else typeof(code)) != "string") and (not isinstance(code, str)):
code = toString(code)
delegate = SyntaxTreeDelegate
source = code
index = 0
lineNumber = (1 if len(source) > 0 else 0)
lineStart = 0
length = len(source)
lookahead = None
state = jsdict({
"allowIn": True,
"labelSet": jsdict({
}),
"inFunctionBody": False,
"inIteration": False,
"inSwitch": False,
"lastCommentStart": -1,
"markerStack": [],
})
extra = jsdict({
})
if ('undefined' if not 'options' in locals() else typeof(options)) != "undefined":
extra.range = (('undefined' if not ('range' in options) else typeof(options.range)) == "boolean") and options.range
extra.loc = (('undefined' if not ('loc' in options) else typeof(options.loc)) == "boolean") and options.loc
if (extra.loc and (options.source != None)) and (options.source != undefined):
extra.source = toString(options.source)
if (('undefined' if not ('tokens' in options) else typeof(options.tokens)) == "boolean") and options.tokens:
extra.tokens = []
if (('undefined' if not ('comment' in options) else typeof(options.comment)) == "boolean") and options.comment:
extra.comments = []
if (('undefined' if not ('tolerant' in options) else typeof(options.tolerant)) == "boolean") and options.tolerant:
extra.errors = []
if length > 0:
if (typeof(source[0])) == "undefined":
if isinstance(code, str):
source = code.valueOf()
patch()
try:
program = parseProgram()
if ('undefined' if not ('comments' in extra) else typeof(extra.comments)) != "undefined":
program.comments = extra.comments
if ('undefined' if not ('tokens' in extra) else typeof(extra.tokens)) != "undefined":
filterTokenLocation()
program.tokens = extra.tokens
if ('undefined' if not ('errors' in extra) else typeof(extra.errors)) != "undefined":
program.errors = extra.errors
except Exception as e:
raise
finally:
unpatch()
extra = jsdict({
})
return program
parse('var = 490 \n a=4;') | apache-2.0 |
lordmuffin/aws-cfn-plex | functions/credstash/cryptography/exceptions.py | 27 | 1234 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from enum import Enum
class _Reasons(Enum):
BACKEND_MISSING_INTERFACE = 0
UNSUPPORTED_HASH = 1
UNSUPPORTED_CIPHER = 2
UNSUPPORTED_PADDING = 3
UNSUPPORTED_MGF = 4
UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5
UNSUPPORTED_ELLIPTIC_CURVE = 6
UNSUPPORTED_SERIALIZATION = 7
UNSUPPORTED_X509 = 8
UNSUPPORTED_EXCHANGE_ALGORITHM = 9
UNSUPPORTED_DIFFIE_HELLMAN = 10
class UnsupportedAlgorithm(Exception):
def __init__(self, message, reason=None):
super(UnsupportedAlgorithm, self).__init__(message)
self._reason = reason
class AlreadyFinalized(Exception):
pass
class AlreadyUpdated(Exception):
pass
class NotYetFinalized(Exception):
pass
class InvalidTag(Exception):
pass
class InvalidSignature(Exception):
pass
class InternalError(Exception):
def __init__(self, msg, err_code):
super(InternalError, self).__init__(msg)
self.err_code = err_code
class InvalidKey(Exception):
pass
| mit |
pombredanne/nTLP | examples/gridworlds/gw_bm_analysis.py | 1 | 4888 | # Copyright (c) 2011, 2012 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
# Take averages of the output from the gridworld benchmark script.
import numpy as np
import sys
import os
import string
expform = (string.Template("exp(${SOLVER}_a*x + ${SOLVER}_b)"), "exp(%.3f*x + %.3f)")
linform = (string.Template("${SOLVER}_a*x + ${SOLVER}_b"), "%.3f*x + %.3f")
plotfit = string.Template("""${SOLVER}_a = ${SOLVER}_b = 0.5
${SOLVER}_f(x) = $FORMULA
fit ${SOLVER}_f(x) \"$FILENAME\" using $XCOL:((stringcolumn(1) eq "$SOLVER") ? $$$YCOL : 1/0) via ${SOLVER}_a, ${SOLVER}_b
""")
plottpl = string.Template("\"$FILENAME\" using $XCOL:((stringcolumn(1) eq \"$SOLVER\") ? $$$YCOL : 1/0):$ERRCOL with errorbars \
title \"$SOLVER\" lt $COLOR, ${SOLVER}_f(x) title sprintf(\"$SOLVER fit: $FORMULA\", ${SOLVER}_a, ${SOLVER}_b) lt $COLOR")
pf = string.Template("""
set xlabel "$XAXIS"
set ylabel "$YAXIS"
set terminal png font "" 10
set output "$FN_PNG"
""")
columns = ["", "Solver", "Cells", "Goals", "WDensity", "AvgTime", "StDevTime", "AvgStates", "StDevStates"]
colnames = ["", "Solver", "Grid cells", "Number of goals", "Wall Density", "CPU time (s)", "", "Number of states", ""]
err = { columns.index("AvgTime") : columns.index("StDevTime"),
columns.index("AvgStates") : columns.index("StDevStates") }
if len(sys.argv) < 4:
print "Usage: gw_bm_analysis.py [data file] [x-col] [y-col] <exp/lin>"
sys.exit(0)
d = np.genfromtxt(sys.argv[1], dtype="S16, i4, i4, i4, f8, f8, i4", names=True)
xcol = columns.index(sys.argv[2])
ycol = columns.index(sys.argv[3])
if len(sys.argv) >= 5:
EXP = (sys.argv[4] == "exp")
else:
# Default linear fit
EXP = False
if EXP: eqn = expform
else: eqn = linform
avgs = []
solvers = ["NuSMV", "jtlv", "gr1c", "SPIN"]
# List of columns specifying dimension of a grid
dimension = ["W", "H", "Goals", "WDensity"]
for solver in solvers:
s_data = d[d["Solver"] == solver]
for dim in np.unique(s_data[dimension]):
# Mean & error in the mean
times = s_data[s_data[dimension] == dim]["Time"]
time_mean = times.mean()
time_stdev = times.std()/np.sqrt(len(times))
states = s_data[s_data[dimension] == dim]["NStates"]
states_mean = states.mean()
states_stdev = states.std()/np.sqrt(len(states))
avgs.append((solver, dim[0] * dim[1], dim[2], dim[3], time_mean,
time_stdev, states_mean, states_stdev))
(prefix, ext) = os.path.splitext(sys.argv[1])
outfile = prefix + ".avg" + ext
pltfile = prefix + ".avg.plt"
pngfile = prefix + ".png"
with open(outfile, "w") as f:
f.write(" ".join(columns[1:]) + "\n")
for a in avgs:
f.write("%s %d %d %.4f %.4f %.4f %.4f %.4f\n" % a)
with open(pltfile, "w") as f:
pl = []
for (n, solver) in enumerate(solvers):
fx = eqn[0].substitute(SOLVER=solver)
s = plotfit.substitute(SOLVER=solver, FILENAME=outfile, XCOL=xcol,
YCOL=ycol, FORMULA=fx)
f.write(s)
s = plottpl.substitute(SOLVER=solver, FILENAME=outfile, XCOL=xcol,
YCOL=ycol, ERRCOL=err[ycol], COLOR=n, FORMULA=eqn[1])
pl.append(s)
s = pf.safe_substitute(FN_PNG=pngfile, XAXIS=colnames[xcol],
YAXIS=colnames[ycol])
f.write(s)
if EXP: f.write("set log y\n")
f.write("plot " + ", ".join(pl))
| bsd-3-clause |
hazelnusse/sympy-old | sympy/geometry/curve.py | 5 | 1190 | from sympy.core import sympify
from sympy.geometry.exceptions import GeometryError
from entity import GeometryEntity
class Curve(GeometryEntity):
"""
A curve in space.
Example:
========
>>> from sympy import sin, cos, Symbol
>>> t = Symbol("t")
>>> C = Curve([sin(t), cos(t)], (t, 0, 2))
>>> C.functions
[sin(t), cos(t)]
>>> C.limits
(t, 0, 2)
>>> C.parameter
t
"""
def __new__(cls, function, limits):
fun = sympify(function)
if not fun:
raise GeometryError("%s.__new__ don't know how to handle" % cls.__name__);
if not isinstance(limits, (list, tuple)) or len(limits) != 3:
raise ValueError("Limits argument has wrong syntax");
return GeometryEntity.__new__(cls, fun, limits)
@property
def functions(self):
"""The functions specifying the curve."""
return self.__getitem__(0)
@property
def parameter(self):
"""The curve function variable."""
return self.__getitem__(1)[0]
@property
def limits(self):
"""The limits for the curve."""
return self.__getitem__(1)
| bsd-3-clause |
bleib1dj/neomodel | neomodel/core.py | 3 | 8157 | import os
from .exception import DoesNotExist
from .properties import Property, PropertyManager
from .signals import hooks
from .util import Database, deprecated, classproperty
DATABASE_URL = os.environ.get('NEO4J_REST_URL', 'http://localhost:7474/db/data/')
db = Database(DATABASE_URL)
def install_labels(cls):
# TODO when to execute this?
for key, prop in cls.defined_properties(aliases=False, rels=False).items():
if prop.index:
db.cypher_query("CREATE INDEX on :{}({}); ".format(cls.__label__, key))
elif prop.unique_index:
db.cypher_query("CREATE CONSTRAINT on (n:{}) ASSERT n.{} IS UNIQUE; ".format(
cls.__label__, key))
class NodeMeta(type):
def __new__(mcs, name, bases, dct):
dct.update({'DoesNotExist': type('DoesNotExist', (DoesNotExist,), dct)})
inst = super(NodeMeta, mcs).__new__(mcs, name, bases, dct)
if hasattr(inst, '__abstract_node__'):
delattr(inst, '__abstract_node__')
else:
for key, value in dct.items():
if key == 'deleted':
raise ValueError("Class property called 'deleted' "
+ "conflicts with neomodel internals")
if issubclass(value.__class__, Property):
value.name = key
value.owner = inst
# support for 'magic' properties
if hasattr(value, 'setup') and hasattr(value.setup, '__call__'):
value.setup()
if '__label__' in dct:
inst.__label__ = dct['__label__']
else:
inst.__label__ = inst.__name__
install_labels(inst)
from .index import NodeIndexManager
inst.index = NodeIndexManager(inst, inst.__label__)
return inst
NodeBase = NodeMeta('NodeBase', (PropertyManager,), {'__abstract_node__': True})
class StructuredNode(NodeBase):
__abstract_node__ = True
@classproperty
def nodes(cls):
from .match import NodeSet
return NodeSet(cls)
def __init__(self, *args, **kwargs):
if 'deleted' in kwargs:
raise ValueError("deleted property is reserved for neomodel")
for key, val in self.defined_properties(aliases=False, properties=False).items():
self.__dict__[key] = val.build_manager(self, key)
super(StructuredNode, self).__init__(*args, **kwargs)
def __eq__(self, other):
if not isinstance(other, (StructuredNode,)):
return False
if hasattr(self, '_id') and hasattr(other, '_id'):
return self._id == other._id
return False
def __ne__(self, other):
return not self.__eq__(other)
def labels(self):
self._pre_action_check('labels')
return self.cypher("START self=node({self}) RETURN labels(self)")[0][0][0]
def cypher(self, query, params=None):
self._pre_action_check('cypher')
params = params or {}
params.update({'self': self._id})
return db.cypher_query(query, params)
@classmethod
def inherited_labels(cls):
return [scls.__label__ for scls in cls.mro()
if hasattr(scls, '__label__') and not hasattr(scls, '__abstract_node__')]
@classmethod
@deprecated("Category nodes are now deprecated, the functionality is emulated using labels")
def category(cls):
return FakeCategory(cls)
@hooks
def save(self):
# create or update instance node
if hasattr(self, '_id'):
# update
query = "START self=node({self})\n"
query += "\n".join(["SET self.{} = {{{}}}".format(key, key) + "\n"
for key in self.__properties__.keys()])
for label in self.inherited_labels():
query += "SET self:`{}`\n".format(label)
params = self.deflate(self.__properties__, self)
self.cypher(query, params)
elif hasattr(self, 'deleted') and self.deleted:
raise ValueError("{}.save() attempted on deleted node".format(self.__class__.__name__))
else: # create
self._id = self.create(self.__properties__)[0]._id
return self
def _pre_action_check(self, action):
if hasattr(self, 'deleted') and self.deleted:
raise ValueError("{}.{}() attempted on deleted node".format(self.__class__.__name__, action))
if not hasattr(self, '_id'):
raise ValueError("{}.{}() attempted on unsaved node".format(self.__class__.__name__, action))
@hooks
def delete(self):
self._pre_action_check('delete')
self.cypher("START self=node({self}) OPTIONAL MATCH (self)-[r]-() DELETE r, self")
del self.__dict__['_id']
self.deleted = True
return True
def refresh(self):
"""Reload this object from its node id in the database"""
self._pre_action_check('refresh')
if hasattr(self, '_id'):
node = self.inflate(self.cypher("START n=node({self}) RETURN n")[0][0][0])
for key, val in node.__properties__.items():
setattr(self, key, val)
else:
raise ValueError("Can't refresh unsaved node")
@classmethod
def create(cls, *props):
query = ""
deflated = [cls.deflate(p) for p in list(props)]
params = {}
for i in range(0, len(deflated)):
props = ", ".join(["{}: {{n{}_{}}}".format(key, i, key)
for key, value in deflated[i].items()])
query += "CREATE (n{} {{{}}})\n".format(i, props)
for label in cls.inherited_labels():
query += "SET n{}:`{}`\n".format(i, label)
for key, value in deflated[i].items():
params["n{}_{}".format(i, key)] = value
query += "RETURN "
query += ", ".join(["n" + str(i) for i in range(0, len(deflated))])
results, meta = db.cypher_query(query, params)
if hasattr(cls, 'post_create'):
for node in results:
node.post_create()
return [cls.inflate(node) for node in results[0]]
@classmethod
def inflate(cls, node):
props = {}
for key, prop in cls.defined_properties(aliases=False, rels=False).items():
if key in node._properties:
props[key] = prop.inflate(node._properties[key], node)
elif prop.has_default:
props[key] = prop.default_value()
else:
props[key] = None
snode = cls(**props)
snode._id = node._id
return snode
class FakeCategory(object):
"""
Category nodes are no longer required with the introduction of labels.
This class behaves like the old category nodes used in earlier version of neomodel
but uses labels under the hood calling the traversal api.
"""
def __init__(self, cls):
self.instance = FakeInstanceRel(cls)
def cypher(self, *args, **kwargs):
raise NotImplemented("cypher method on category nodes no longer supported")
class FakeInstanceRel(object):
"""
Fake rel manager for our fake category node
"""
def __init__(self, cls):
from .match import NodeSet
self._node_set = NodeSet(cls)
def __len__(self):
return self._node_set.query_cls(self._node_set)._count()
def __bool__(self):
return len(self) > 0
def __nonzero__(self):
return len(self) > 0
def count(self):
return self.__len__()
def all(self):
return self._node_set.all()
def search(self, **kwargs):
ns = self._node_set
for field, value in kwargs.items():
ns.filter(**{field: value})
return self._node_set.all()
def get(self, **kwargs):
result = self.search(**kwargs)
if len(result) == 1:
return result[0]
if len(result) > 1:
raise Exception("Multiple items returned, use search?")
if not result:
raise DoesNotExist("No items exist for the specified arguments")
| mit |
fernandezcuesta/ansible | lib/ansible/plugins/action/net_base.py | 12 | 8156 | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.six import iteritems
from imp import find_module, load_module
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
self.provider = self._load_provider(play_context.network_os)
if play_context.network_os == 'junos':
play_context.connection = 'netconf'
play_context.port = self.provider['port'] or self._play_context.port or 830
else:
play_context.connection = 'network_cli'
play_context.port = self.provider['port'] or self._play_context.port or 22
play_context.remote_addr = self.provider['host'] or self._play_context.remote_addr
play_context.remote_user = self.provider['username'] or self._play_context.connection_user
play_context.password = self.provider['password'] or self._play_context.password
play_context.private_key_file = self.provider['ssh_keyfile'] or self._play_context.private_key_file
play_context.timeout = self.provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT
if 'authorize' in self.provider.keys():
play_context.become = self.provider['authorize'] or False
play_context.become_pass = self.provider['auth_pass']
socket_path = self._start_connection(play_context)
task_vars['ansible_socket'] = socket_path
if 'fail_on_missing_module' not in self._task.args:
self._task.args['fail_on_missing_module'] = False
result = super(ActionModule, self).run(tmp, task_vars)
module = self._get_implementation_module(play_context.network_os, self._task.action)
if not module:
if self._task.args['fail_on_missing_module']:
result['failed'] = True
else:
result['failed'] = False
result['msg'] = ('Could not find implementation module %s for %s' %
(self._task.action, play_context.network_os))
else:
new_module_args = self._task.args.copy()
# perhaps delete the provider argument here as well since the
# module code doesn't need the information, the connection is
# already started
if 'network_os' in new_module_args:
del new_module_args['network_os']
del new_module_args['fail_on_missing_module']
display.vvvv('Running implementation module %s' % module)
result.update(self._execute_module(module_name=module,
module_args=new_module_args, task_vars=task_vars,
wrap_async=self._task.async))
display.vvvv('Caching network OS %s in facts' % play_context.network_os)
result['ansible_facts'] = {'network_os': play_context.network_os}
return result
def _start_connection(self, play_context):
display.vvv('using connection plugin %s' % play_context.connection, play_context.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent',
play_context, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, play_context.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
if str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
return socket_path
def _get_network_os(self, task_vars):
if ('network_os' in self._task.args and self._task.args['network_os']):
display.vvvv('Getting network OS from task argument')
network_os = self._task.args['network_os']
elif (self._play_context.network_os):
display.vvvv('Getting network OS from inventory')
network_os = self._play_context.network_os
elif ('network_os' in task_vars['ansible_facts'] and
task_vars['ansible_facts']['network_os']):
display.vvvv('Getting network OS from fact')
network_os = task_vars['ansible_facts']['network_os']
else:
# this will be replaced by the call to get_capabilities() on the
# connection
display.vvvv('Getting network OS from net discovery')
network_os = None
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
implementation_module = network_os + '_' + platform_agnostic_module.partition('_')[2]
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module
def _load_provider(self, network_os):
# we should be able to stream line this a bit by creating a common
# provider argument spec in module_utils/network_common.py or another
# option is that there isn't a need to push provider into the module
# since the connection is started in the action handler.
f, p, d = find_module('ansible')
f2, p2, d2 = find_module('module_utils', [p])
f3, p3, d3 = find_module(network_os, [p2])
module = load_module('ansible.module_utils.' + network_os, f3, p3, d3)
provider = self._task.args.get('provider', {})
for key, value in iteritems(module.get_argspec()):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| gpl-3.0 |
zhangpanrobot/myblog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/autumn.py | 364 | 2144 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
| mit |
lyhiving/livestreamer | src/livestreamer/plugins/cybergame.py | 34 | 2269 | import re
from livestreamer.compat import urlparse
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import RTMPStream
PLAYLIST_URL = "http://api.cybergame.tv/p/playlist.smil"
_url_re = re.compile("""
http(s)?://(\w+\.)?cybergame.tv
(?:
/videos/(?P<video_id>\d+)
)?
(?:
/(?P<channel>[^/&?]+)
)?
""", re.VERBOSE)
_playlist_schema = validate.Schema(
validate.union({
"base": validate.all(
validate.xml_find("./head/meta"),
validate.get("base"),
validate.url(scheme="rtmp")
),
"videos": validate.all(
validate.xml_findall(".//video"),
[
validate.union({
"src": validate.all(
validate.get("src"),
validate.text
),
"height": validate.all(
validate.get("height"),
validate.text,
validate.transform(int)
)
})
]
)
})
)
class Cybergame(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_playlist(self, **params):
res = http.get(PLAYLIST_URL, params=params)
playlist = http.xml(res, schema=_playlist_schema)
streams = {}
for video in playlist["videos"]:
name = "{0}p".format(video["height"])
stream = RTMPStream(self.session, {
"rtmp": "{0}/{1}".format(playlist["base"], video["src"]),
"app": urlparse(playlist["base"]).path[1:],
"pageUrl": self.url,
"rtmp": playlist["base"],
"playpath": video["src"],
"live": True
})
streams[name] = stream
return streams
def _get_streams(self):
match = _url_re.match(self.url)
video_id = match.group("video_id")
channel = match.group("channel")
if video_id:
return self._get_playlist(vod=video_id)
elif channel:
return self._get_playlist(channel=channel)
__plugin__ = Cybergame
| bsd-2-clause |
mathiasertl/django-ca | ca/django_ca/deprecation.py | 1 | 1194 | # This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
# see <http://www.gnu.org/licenses/>.
"""Deprecation classes in django-ca."""
class RemovedInDjangoCA120Warning(PendingDeprecationWarning):
"""Warning if a feature will be removed in django-ca==1.20."""
class RemovedInDjangoCA121Warning(PendingDeprecationWarning):
"""Warning if a feature will be removed in django-ca==1.21."""
class RemovedInDjangoCA122Warning(PendingDeprecationWarning):
"""Warning if a feature will be removed in django-ca==1.22."""
RemovedInNextVersionWarning = RemovedInDjangoCA120Warning
| gpl-3.0 |
Desarrollo-CeSPI/meran | dev-plugins/node/lib/node/wafadmin/__init__.py | 4 | 1914 | #!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005 (ita)
| gpl-3.0 |
ForAP/Advanc3d-Pr0graming | Kivy-Resources/kivy-examples/widgets/effectwidget.py | 38 | 5814 | '''
Example usage of the effectwidget.
Currently highly experimental.
'''
from kivy.app import App
from kivy.uix.effectwidget import EffectWidget
from kivy.uix.image import Image
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scatter import Scatter
from kivy.uix.button import Button
from kivy.uix.spinner import Spinner
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.uix.effectwidget import (MonochromeEffect,
InvertEffect,
ScanlinesEffect,
ChannelMixEffect,
ScanlinesEffect,
FXAAEffect,
PixelateEffect,
HorizontalBlurEffect,
VerticalBlurEffect)
class ComparisonWidget(EffectWidget):
pass
class ComparisonWidget(EffectWidget):
pass
class EffectSpinner(Spinner):
pass
class SpinnerRow(BoxLayout):
effectwidget = ObjectProperty()
def update_effectwidget(self, *args):
effects = []
for child in self.children[::-1]:
text = child.text
if text == 'none':
pass
if text == 'fxaa':
effects.append(FXAAEffect())
if text == 'monochrome':
effects.append(MonochromeEffect())
if text == 'invert':
effects.append(InvertEffect())
if text == 'mix':
effects.append(ChannelMixEffect())
if text == 'flash':
effects.append(FlashEffect())
if text == 'blur_h':
effects.append(HorizontalBlurEffect())
if text == 'blur_v':
effects.append(VerticalBlurEffect())
if text == 'postprocessing':
effects.append(ScanlinesEffect())
if text == 'pixelate':
effects.append(PixelateEffect())
if self.effectwidget:
self.effectwidget.effects = effects
example = Builder.load_string('''
#:import Vector kivy.vector.Vector
BoxLayout:
orientation: 'vertical'
FloatLayout:
ComparisonWidget:
pos_hint: {'x': 0, 'y': 0}
size_hint: 0.5, 1
id: effect1
ComparisonWidget:
pos_hint: {'x': pos_slider.value, 'y': 0}
size_hint: 0.5, 1
id: effect2
background_color: (rs.value, gs.value, bs.value, als.value)
SpinnerRow:
effectwidget: effect1
text: 'left effects'
SpinnerRow:
effectwidget: effect2
text: 'right effects'
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'control overlap:'
Slider:
min: 0
max: 0.5
value: 0.5
id: pos_slider
BoxLayout:
size_hint_y: None
height: sp(40)
Label:
text: 'right bg r,g,b,a'
Slider:
min: 0
max: 1
value: 0
id: rs
Slider:
min: 0
max: 1
value: 0
id: gs
Slider:
min: 0
max: 1
value: 0
id: bs
Slider:
min: 0
max: 1
value: 0
id: als
<ComparisonWidget>:
Widget:
canvas:
Color:
rgba: 1, 0, 0, 1
Ellipse:
pos: Vector(self.pos) + 0.5*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 1, 0.3, 1
Ellipse:
pos: Vector(self.pos) + 0.1*Vector(self.size)
size: 0.6*Vector(self.size)
Color:
rgba: 0.5, 0.3, 0.8, 1
Ellipse:
pos: Vector(self.pos) + Vector([0, 0.6])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 1, 0.8, 0.1, 1
Ellipse:
pos: Vector(self.pos) + Vector([0.5, 0])*Vector(self.size)
size: 0.4*Vector(self.size)
Color:
rgba: 0, 0, 0.8, 1
Line:
points:
[self.x, self.y,
self.x + self.width, self.y + 0.3*self.height,
self.x + 0.2*self.width, self.y + 0.1*self.height,
self.x + 0.85*self.width, self.y + 0.72*self.height,
self.x + 0.31*self.width, self.y + 0.6*self.height,
self.x, self.top]
width: 1
Color:
rgba: 0, 0.9, 0.1, 1
Line:
points:
[self.x + self.width, self.y + self.height,
self.x + 0.35*self.width, self.y + 0.6*self.height,
self.x + 0.7*self.width, self.y + 0.15*self.height,
self.x + 0.2*self.width, self.y + 0.22*self.height,
self.x + 0.3*self.width, self.y + 0.92*self.height]
width: 2
<SpinnerRow>:
orientation: 'horizontal'
size_hint_y: None
height: dp(40)
text: ''
Label:
text: root.text
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
EffectSpinner:
on_text: root.update_effectwidget()
<EffectSpinner>:
text: 'none'
values:
['none', 'fxaa', 'monochrome',
'invert', 'mix',
'blur_h', 'blur_v',
'postprocessing', 'pixelate',]
''')
class EffectApp(App):
def build(self):
return example
EffectApp().run()
| gpl-2.0 |
rcharp/toyota-flask | venv/lib/python2.7/site-packages/requests/packages/urllib3/request.py | 853 | 5751 | try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.')
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| apache-2.0 |
lubosz/cerbero | test/test_cerbero_packages_packagesstore.py | 27 | 5247 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import tempfile
from cerbero.config import Platform
from cerbero.errors import PackageNotFoundError
from cerbero.packages.package import Package, MetaPackage, SDKPackage,\
InstallerPackage, App
from cerbero.packages.packagesstore import PackagesStore
from test import test_packages_common as common
PACKAGE = '''
class Package(package.Package):
name = 'test-package'
def test_imports(self):
Platform.WINDOWS
Distro.WINDOWS
DistroVersion.WINDOWS_7
Architecture.X86
'''
SDKPACKAGE = '''
class SDKPackage(package.SDKPackage):
name = 'test-package'
'''
INSTALLERPACKAGE = '''
class InstallerPackage(package.InstallerPackage):
name = 'test-package'
'''
class PackageTest(unittest.TestCase):
def setUp(self):
self.config = common.DummyConfig()
self.config.packages_dir = '/test'
self.config.target_platform = Platform.LINUX
self.store = PackagesStore(self.config, False)
def testAddPackage(self):
package = common.Package1(self.config, None, None)
self.assertEquals(len(self.store._packages), 0)
self.store.add_package(package)
self.assertEquals(len(self.store._packages), 1)
self.assertEquals(package, self.store._packages[package.name])
def testGetPackage(self):
package = common.Package1(self.config, None, None)
self.store.add_package(package)
self.assertEquals(package, self.store.get_package(package.name))
def testPackageNotFound(self):
self.failUnlessRaises(PackageNotFoundError, self.store.get_package,
'unknown')
def testPackagesList(self):
package = common.Package1(self.config, None, None)
metapackage = common.MetaPackage(self.config, None)
self.store.add_package(package)
self.store.add_package(metapackage)
l = sorted([package, metapackage], key=lambda x: x.name)
self.assertEquals(l, self.store.get_packages_list())
def testPackageDeps(self):
package = common.Package1(self.config, None, None)
package2 = common.Package2(self.config, None, None)
self.store.add_package(package)
self.store.add_package(package2)
self.assertEquals(package.deps,
[x.name for x in self.store.get_package_deps(package.name)])
def testMetaPackageDeps(self):
metapackage = common.MetaPackage(self.config, None)
self.store.add_package(metapackage)
# the metapackage depends on package that are not yet in the store
self.failUnlessRaises(PackageNotFoundError,
self.store.get_package_deps, metapackage.name)
for klass in [common.Package1, common.Package2, common.Package3,
common.Package4]:
p = klass(self.config, None, None)
self.store.add_package(p)
for klass in [common.MetaPackage]:
p = klass(self.config, None)
self.store.add_package(p)
deps = ['gstreamer-test-bindings', 'gstreamer-test1',
'gstreamer-test2', 'gstreamer-test3']
res = [x.name for x in self.store.get_package_deps(metapackage.name)]
self.assertEquals(sorted(deps), sorted(res))
def testLoadPackageFromFile(self):
package_file = tempfile.NamedTemporaryFile()
package_file.write(PACKAGE)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
self.assertIsInstance(p, Package)
self.assertEquals('test-package', p.name)
def testLoadMetaPackageFromFile(self):
for x, t in [(SDKPACKAGE, SDKPackage),
(INSTALLERPACKAGE, InstallerPackage)]:
package_file = tempfile.NamedTemporaryFile()
package_file.write(x)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
print p, type(p)
self.assertIsInstance(p, t)
self.assertEquals('test-package', p.name)
def testImports(self):
package_file = tempfile.NamedTemporaryFile()
package_file.write(PACKAGE)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
self.assertIsInstance(p, Package)
try:
p.test_imports()
except ImportError, e:
self.fail("Import error raised, %s", e)
| lgpl-2.1 |
isandlaTech/cohorte-runtime | python/src/lib/python/unidecode/x0bd.py | 253 | 4752 | data = (
'bols', # 0x00
'bolt', # 0x01
'bolp', # 0x02
'bolh', # 0x03
'bom', # 0x04
'bob', # 0x05
'bobs', # 0x06
'bos', # 0x07
'boss', # 0x08
'bong', # 0x09
'boj', # 0x0a
'boc', # 0x0b
'bok', # 0x0c
'bot', # 0x0d
'bop', # 0x0e
'boh', # 0x0f
'bwa', # 0x10
'bwag', # 0x11
'bwagg', # 0x12
'bwags', # 0x13
'bwan', # 0x14
'bwanj', # 0x15
'bwanh', # 0x16
'bwad', # 0x17
'bwal', # 0x18
'bwalg', # 0x19
'bwalm', # 0x1a
'bwalb', # 0x1b
'bwals', # 0x1c
'bwalt', # 0x1d
'bwalp', # 0x1e
'bwalh', # 0x1f
'bwam', # 0x20
'bwab', # 0x21
'bwabs', # 0x22
'bwas', # 0x23
'bwass', # 0x24
'bwang', # 0x25
'bwaj', # 0x26
'bwac', # 0x27
'bwak', # 0x28
'bwat', # 0x29
'bwap', # 0x2a
'bwah', # 0x2b
'bwae', # 0x2c
'bwaeg', # 0x2d
'bwaegg', # 0x2e
'bwaegs', # 0x2f
'bwaen', # 0x30
'bwaenj', # 0x31
'bwaenh', # 0x32
'bwaed', # 0x33
'bwael', # 0x34
'bwaelg', # 0x35
'bwaelm', # 0x36
'bwaelb', # 0x37
'bwaels', # 0x38
'bwaelt', # 0x39
'bwaelp', # 0x3a
'bwaelh', # 0x3b
'bwaem', # 0x3c
'bwaeb', # 0x3d
'bwaebs', # 0x3e
'bwaes', # 0x3f
'bwaess', # 0x40
'bwaeng', # 0x41
'bwaej', # 0x42
'bwaec', # 0x43
'bwaek', # 0x44
'bwaet', # 0x45
'bwaep', # 0x46
'bwaeh', # 0x47
'boe', # 0x48
'boeg', # 0x49
'boegg', # 0x4a
'boegs', # 0x4b
'boen', # 0x4c
'boenj', # 0x4d
'boenh', # 0x4e
'boed', # 0x4f
'boel', # 0x50
'boelg', # 0x51
'boelm', # 0x52
'boelb', # 0x53
'boels', # 0x54
'boelt', # 0x55
'boelp', # 0x56
'boelh', # 0x57
'boem', # 0x58
'boeb', # 0x59
'boebs', # 0x5a
'boes', # 0x5b
'boess', # 0x5c
'boeng', # 0x5d
'boej', # 0x5e
'boec', # 0x5f
'boek', # 0x60
'boet', # 0x61
'boep', # 0x62
'boeh', # 0x63
'byo', # 0x64
'byog', # 0x65
'byogg', # 0x66
'byogs', # 0x67
'byon', # 0x68
'byonj', # 0x69
'byonh', # 0x6a
'byod', # 0x6b
'byol', # 0x6c
'byolg', # 0x6d
'byolm', # 0x6e
'byolb', # 0x6f
'byols', # 0x70
'byolt', # 0x71
'byolp', # 0x72
'byolh', # 0x73
'byom', # 0x74
'byob', # 0x75
'byobs', # 0x76
'byos', # 0x77
'byoss', # 0x78
'byong', # 0x79
'byoj', # 0x7a
'byoc', # 0x7b
'byok', # 0x7c
'byot', # 0x7d
'byop', # 0x7e
'byoh', # 0x7f
'bu', # 0x80
'bug', # 0x81
'bugg', # 0x82
'bugs', # 0x83
'bun', # 0x84
'bunj', # 0x85
'bunh', # 0x86
'bud', # 0x87
'bul', # 0x88
'bulg', # 0x89
'bulm', # 0x8a
'bulb', # 0x8b
'buls', # 0x8c
'bult', # 0x8d
'bulp', # 0x8e
'bulh', # 0x8f
'bum', # 0x90
'bub', # 0x91
'bubs', # 0x92
'bus', # 0x93
'buss', # 0x94
'bung', # 0x95
'buj', # 0x96
'buc', # 0x97
'buk', # 0x98
'but', # 0x99
'bup', # 0x9a
'buh', # 0x9b
'bweo', # 0x9c
'bweog', # 0x9d
'bweogg', # 0x9e
'bweogs', # 0x9f
'bweon', # 0xa0
'bweonj', # 0xa1
'bweonh', # 0xa2
'bweod', # 0xa3
'bweol', # 0xa4
'bweolg', # 0xa5
'bweolm', # 0xa6
'bweolb', # 0xa7
'bweols', # 0xa8
'bweolt', # 0xa9
'bweolp', # 0xaa
'bweolh', # 0xab
'bweom', # 0xac
'bweob', # 0xad
'bweobs', # 0xae
'bweos', # 0xaf
'bweoss', # 0xb0
'bweong', # 0xb1
'bweoj', # 0xb2
'bweoc', # 0xb3
'bweok', # 0xb4
'bweot', # 0xb5
'bweop', # 0xb6
'bweoh', # 0xb7
'bwe', # 0xb8
'bweg', # 0xb9
'bwegg', # 0xba
'bwegs', # 0xbb
'bwen', # 0xbc
'bwenj', # 0xbd
'bwenh', # 0xbe
'bwed', # 0xbf
'bwel', # 0xc0
'bwelg', # 0xc1
'bwelm', # 0xc2
'bwelb', # 0xc3
'bwels', # 0xc4
'bwelt', # 0xc5
'bwelp', # 0xc6
'bwelh', # 0xc7
'bwem', # 0xc8
'bweb', # 0xc9
'bwebs', # 0xca
'bwes', # 0xcb
'bwess', # 0xcc
'bweng', # 0xcd
'bwej', # 0xce
'bwec', # 0xcf
'bwek', # 0xd0
'bwet', # 0xd1
'bwep', # 0xd2
'bweh', # 0xd3
'bwi', # 0xd4
'bwig', # 0xd5
'bwigg', # 0xd6
'bwigs', # 0xd7
'bwin', # 0xd8
'bwinj', # 0xd9
'bwinh', # 0xda
'bwid', # 0xdb
'bwil', # 0xdc
'bwilg', # 0xdd
'bwilm', # 0xde
'bwilb', # 0xdf
'bwils', # 0xe0
'bwilt', # 0xe1
'bwilp', # 0xe2
'bwilh', # 0xe3
'bwim', # 0xe4
'bwib', # 0xe5
'bwibs', # 0xe6
'bwis', # 0xe7
'bwiss', # 0xe8
'bwing', # 0xe9
'bwij', # 0xea
'bwic', # 0xeb
'bwik', # 0xec
'bwit', # 0xed
'bwip', # 0xee
'bwih', # 0xef
'byu', # 0xf0
'byug', # 0xf1
'byugg', # 0xf2
'byugs', # 0xf3
'byun', # 0xf4
'byunj', # 0xf5
'byunh', # 0xf6
'byud', # 0xf7
'byul', # 0xf8
'byulg', # 0xf9
'byulm', # 0xfa
'byulb', # 0xfb
'byuls', # 0xfc
'byult', # 0xfd
'byulp', # 0xfe
'byulh', # 0xff
)
| apache-2.0 |
was4444/chromium.src | build/android/play_services/update_test.py | 19 | 14850 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unittests for update.py.
They set up a temporary directory that is used to mock a bucket, the directory
containing the configuration files and the android sdk directory.
Tests run the script with various inputs and check the status of the filesystem
'''
import shutil
import tempfile
import unittest
import os
import sys
import zipfile
import contextlib
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from play_services import update
class TestFunctions(unittest.TestCase):
DEFAULT_CONFIG_VERSION = 42
DEFAULT_LICENSE = 'Default License'
DEFAULT_ZIP_SHA1 = 'zip0and0filling0to0forty0chars0000000000'
def __init__(self, *args, **kwargs):
super(TestFunctions, self).__init__(*args, **kwargs)
self.paths = None # Initialized in SetUpWorkdir
self.workdir = None # Initialized in setUp
#override
def setUp(self):
self.workdir = tempfile.mkdtemp()
#override
def tearDown(self):
shutil.rmtree(self.workdir)
self.workdir = None
def testUpload(self):
version = 1337
self.SetUpWorkdir(
xml_version=version,
gms_lib=True,
source_prop=True)
status = update.main([
'upload',
'--dry-run',
'--skip-git',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# bucket should contain license, name = license.sha1
self.assertTrue(os.path.isfile(self.paths.config_license_sha1))
license_sha1 = _GetFileContent(self.paths.config_license_sha1)
bucket_license = os.path.join(self.paths.bucket, str(version),
license_sha1)
self.assertTrue(os.path.isfile(bucket_license))
self.assertEqual(_GetFileContent(bucket_license), self.DEFAULT_LICENSE)
# bucket should contain zip, name = zip.sha1
self.assertTrue(os.path.isfile(self.paths.config_zip_sha1))
bucket_zip = os.path.join(self.paths.bucket, str(version),
_GetFileContent(self.paths.config_zip_sha1))
self.assertTrue(os.path.isfile(bucket_zip))
# unzip, should contain expected files
with zipfile.ZipFile(bucket_zip, "r") as bucket_zip_file:
self.assertEqual(bucket_zip_file.namelist(),
['dummy_file', 'res/values/version.xml'])
def testUploadAlreadyLatestVersion(self):
self.SetUpWorkdir(
xml_version=self.DEFAULT_CONFIG_VERSION,
gms_lib=True,
source_prop=True)
status = update.main([
'upload',
'--dry-run',
'--skip-git',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# bucket should be empty
self.assertFalse(os.listdir(self.paths.bucket))
self.assertFalse(os.path.isfile(self.paths.config_license_sha1))
self.assertFalse(os.path.isfile(self.paths.config_zip_sha1))
def testDownload(self):
self.SetUpWorkdir(populate_bucket=True)
with _MockedInput('y'):
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# sdk_root should contain zip contents, zip sha1, license
self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
'dummy_file')))
self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
self.assertTrue(os.path.isfile(self.paths.gms_root_license))
self.assertEquals(_GetFileContent(self.paths.gms_root_license),
self.DEFAULT_LICENSE)
def testDownloadBot(self):
self.SetUpWorkdir(populate_bucket=True, bot_env=True)
# No need to type 'y' on bots
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# sdk_root should contain zip contents, zip sha1, license
self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
'dummy_file')))
self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
self.assertTrue(os.path.isfile(self.paths.gms_root_license))
self.assertEquals(_GetFileContent(self.paths.gms_root_license),
self.DEFAULT_LICENSE)
def testDownloadAlreadyUpToDate(self):
self.SetUpWorkdir(
populate_bucket=True,
existing_zip_sha1=self.DEFAULT_ZIP_SHA1)
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# there should not be new files downloaded to sdk_root
self.assertFalse(os.path.isfile(os.path.join(self.paths.gms_lib,
'dummy_file')))
self.assertFalse(os.path.isfile(self.paths.gms_root_license))
def testDownloadAcceptedLicense(self):
self.SetUpWorkdir(
populate_bucket=True,
existing_license=self.DEFAULT_LICENSE)
# License already accepted, no need to type
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# sdk_root should contain zip contents, zip sha1, license
self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
'dummy_file')))
self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
self.assertTrue(os.path.isfile(self.paths.gms_root_license))
self.assertEquals(_GetFileContent(self.paths.gms_root_license),
self.DEFAULT_LICENSE)
def testDownloadNewLicense(self):
self.SetUpWorkdir(
populate_bucket=True,
existing_license='Old license')
with _MockedInput('y'):
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# sdk_root should contain zip contents, zip sha1, NEW license
self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
'dummy_file')))
self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
self.assertTrue(os.path.isfile(self.paths.gms_root_license))
self.assertEquals(_GetFileContent(self.paths.gms_root_license),
self.DEFAULT_LICENSE)
def testDownloadRefusedLicense(self):
self.SetUpWorkdir(
populate_bucket=True,
existing_license='Old license')
with _MockedInput('n'):
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', self.paths.sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
# there should not be new files downloaded to sdk_root
self.assertFalse(os.path.isfile(os.path.join(self.paths.gms_lib,
'dummy_file')))
self.assertEquals(_GetFileContent(self.paths.gms_root_license),
'Old license')
def testDownloadNoAndroidSDK(self):
self.SetUpWorkdir(
populate_bucket=True,
existing_license='Old license')
non_existing_sdk_root = os.path.join(self.workdir, 'non_existing_sdk_root')
# Should not run, no typing needed
status = update.main([
'download',
'--dry-run',
'--bucket', self.paths.bucket,
'--config', self.paths.config_file,
'--sdk-root', non_existing_sdk_root,
])
self.assertEqual(status, 0, 'the command should have succeeded.')
self.assertFalse(os.path.isdir(non_existing_sdk_root))
def SetUpWorkdir(self,
bot_env=False,
config_version=DEFAULT_CONFIG_VERSION,
existing_license=None,
existing_zip_sha1=None,
gms_lib=False,
populate_bucket=False,
source_prop=None,
xml_version=None):
'''Prepares workdir by putting it in the specified state
Args:
- general
bot_env: sets or unsets CHROME_HEADLESS
- bucket
populate_bucket: boolean. Populate the bucket with a zip and license
file. The sha1s will be copied to the config directory
- config
config_version: number. Version of the current SDK. Defaults to
`self.DEFAULT_CONFIG_VERSION`
- sdk_root
existing_license: string. Create a LICENSE file setting the specified
text as content of the currently accepted license.
existing_zip_sha1: string. Create a sha1 file setting the specified
hash as hash of the SDK supposed to be installed
gms_lib: boolean. Create a dummy file in the location of the play
services SDK.
source_prop: boolean. Create a source.properties file that contains
the license to upload.
xml_version: number. Create a version.xml file with the specified
version that is used when uploading
'''
self.paths = Paths(self.workdir)
# Create the main directories
_MakeDirs(self.paths.sdk_root)
_MakeDirs(self.paths.config_dir)
_MakeDirs(self.paths.bucket)
# is not configured via argument.
update.SHA1_DIRECTORY = self.paths.config_dir
os.environ['CHROME_HEADLESS'] = '1' if bot_env else ''
if config_version:
_MakeDirs(os.path.dirname(self.paths.config_file))
with open(self.paths.config_file, 'w') as stream:
stream.write(('{"version_number":%d,'
'"version_xml_path": "res/values/version.xml"}'
'\n') % config_version)
if existing_license:
_MakeDirs(self.paths.gms_root)
with open(self.paths.gms_root_license, 'w') as stream:
stream.write(existing_license)
if existing_zip_sha1:
_MakeDirs(self.paths.gms_root)
with open(self.paths.gms_root_sha1, 'w') as stream:
stream.write(existing_zip_sha1)
if gms_lib:
_MakeDirs(self.paths.gms_lib)
with open(os.path.join(self.paths.gms_lib, 'dummy_file'), 'w') as stream:
stream.write('foo\n')
if source_prop:
_MakeDirs(os.path.dirname(self.paths.source_prop))
with open(self.paths.source_prop, 'w') as stream:
stream.write('Foo=Bar\n'
'Pkg.License=%s\n'
'Baz=Fizz\n' % self.DEFAULT_LICENSE)
if populate_bucket:
_MakeDirs(self.paths.config_dir)
bucket_dir = os.path.join(self.paths.bucket, str(config_version))
_MakeDirs(bucket_dir)
# TODO(dgn) should we use real sha1s? comparison with the real sha1 is
# done but does not do anything other than displaying a message.
config_license_sha1 = 'license0and0filling0to0forty0chars000000'
with open(self.paths.config_license_sha1, 'w') as stream:
stream.write(config_license_sha1)
with open(os.path.join(bucket_dir, config_license_sha1), 'w') as stream:
stream.write(self.DEFAULT_LICENSE)
config_zip_sha1 = self.DEFAULT_ZIP_SHA1
with open(self.paths.config_zip_sha1, 'w') as stream:
stream.write(config_zip_sha1)
pre_zip_lib = os.path.join(self.workdir, 'pre_zip_lib')
post_zip_lib = os.path.join(bucket_dir, config_zip_sha1)
_MakeDirs(pre_zip_lib)
with open(os.path.join(pre_zip_lib, 'dummy_file'), 'w') as stream:
stream.write('foo\n')
shutil.make_archive(post_zip_lib, 'zip', pre_zip_lib)
# make_archive appends .zip
shutil.move(post_zip_lib + '.zip', post_zip_lib)
if xml_version:
_MakeDirs(os.path.dirname(self.paths.xml_version))
with open(self.paths.xml_version, 'w') as stream:
stream.write(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<resources>\n'
' <integer name="google_play_services_version">%d</integer>\n'
'</resources>\n' % xml_version)
class Paths(object):
'''Declaration of the paths commonly manipulated in the tests.'''
def __init__(self, workdir):
self.bucket = os.path.join(workdir, 'bucket')
self.config_dir = os.path.join(workdir, 'config')
self.config_file = os.path.join(self.config_dir, 'config.json')
self.config_license_sha1 = os.path.join(self.config_dir, 'LICENSE.sha1')
self.config_zip_sha1 = os.path.join(
self.config_dir,
'google_play_services_library.zip.sha1')
self.sdk_root = os.path.join(workdir, 'sdk_root')
self.gms_root = os.path.join(self.sdk_root, 'extras', 'google',
'google_play_services')
self.gms_root_sha1 = os.path.join(self.gms_root,
'google_play_services_library.zip.sha1')
self.gms_root_license = os.path.join(self.gms_root, 'LICENSE')
self.source_prop = os.path.join(self.gms_root, 'source.properties')
self.gms_lib = os.path.join(self.gms_root, 'libproject',
'google-play-services_lib')
self.xml_version = os.path.join(self.gms_lib, 'res', 'values',
'version.xml')
def _GetFileContent(file_path):
with open(file_path, 'r') as stream:
return stream.read()
def _MakeDirs(path):
'''Avoids having to do the error handling everywhere.'''
if not os.path.exists(path):
os.makedirs(path)
@contextlib.contextmanager
def _MockedInput(typed_string):
'''Makes raw_input return |typed_string| while inside the context.'''
try:
original_raw_input = __builtins__.raw_input
__builtins__.raw_input = lambda _: typed_string
yield
finally:
__builtins__.raw_input = original_raw_input
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
DataDog/integrations-core | couchbase/tests/test_unit.py | 1 | 3050 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from copy import deepcopy
import mock
import pytest
from datadog_checks.couchbase import Couchbase
def test_camel_case_to_joined_lower(instance):
couchbase = Couchbase('couchbase', {}, [instance])
CAMEL_CASE_TEST_PAIRS = {
'camelCase': 'camel_case',
'FirstCapital': 'first_capital',
'joined_lower': 'joined_lower',
'joined_Upper1': 'joined_upper1',
'Joined_upper2': 'joined_upper2',
'Joined_Upper3': 'joined_upper3',
'_leading_Underscore': 'leading_underscore',
'Trailing_Underscore_': 'trailing_underscore',
'DOubleCAps': 'd_ouble_c_aps',
'@@@super--$$-Funky__$__$$%': 'super_funky',
}
for test_input, expected_output in CAMEL_CASE_TEST_PAIRS.items():
test_output = couchbase.camel_case_to_joined_lower(test_input)
assert test_output == expected_output, 'Input was {}, expected output was {}, actual output was {}'.format(
test_input, expected_output, test_output
)
def test_extract_seconds_value(instance):
couchbase = Couchbase('couchbase', {}, [instance])
EXTRACT_SECONDS_TEST_PAIRS = {
'3.45s': 3.45,
'12ms': 0.012,
'700.5us': 0.0007005,
u'733.364\u00c2s': 0.000733364,
'0': 0,
}
for test_input, expected_output in EXTRACT_SECONDS_TEST_PAIRS.items():
test_output = couchbase.extract_seconds_value(test_input)
assert test_output == expected_output, 'Input was {}, expected output was {}, actual output was {}'.format(
test_input, expected_output, test_output
)
def test__get_query_monitoring_data(instance_query):
"""
`query_monitoring_url` can potentially fail, be sure we don't raise when the
endpoint is not reachable
"""
couchbase = Couchbase('couchbase', {}, [instance_query])
couchbase._get_query_monitoring_data()
@pytest.mark.parametrize(
'test_case, extra_config, expected_http_kwargs',
[
(
"new auth config",
{'username': 'new_foo', 'password': 'bar', 'tls_verify': False},
{'auth': ('new_foo', 'bar'), 'verify': False},
),
("legacy config", {'user': 'new_foo', 'ssl_verify': False}, {'auth': ('new_foo', 'password'), 'verify': False}),
],
)
def test_config(test_case, extra_config, expected_http_kwargs, instance):
instance = deepcopy(instance)
instance.update(extra_config)
check = Couchbase('couchbase', {}, [instance])
with mock.patch('datadog_checks.base.utils.http.requests') as r:
r.get.return_value = mock.MagicMock(status_code=200)
check.check(instance)
http_wargs = dict(
auth=mock.ANY, cert=mock.ANY, headers=mock.ANY, proxies=mock.ANY, timeout=mock.ANY, verify=mock.ANY
)
http_wargs.update(expected_http_kwargs)
r.get.assert_called_with('http://localhost:8091/pools/default/tasks', **http_wargs)
| bsd-3-clause |
tarzasai/Flexget | flexget/plugins/metainfo/rottentomatoes_lookup.py | 3 | 4365 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
try:
from flexget.plugins.internal.api_rottentomatoes import lookup_movie, API_KEY
except ImportError:
raise plugin.DependencyError(issued_by='rottentomatoes_lookup', missing='api_rottentomatoes',
message='rottentomatoes_lookup requires the `api_rottentomatoes` plugin')
log = logging.getLogger('rottentomatoes_lookup')
def get_rt_url(movie):
for link in movie.links:
if link.name == 'alternate':
return link.url
class PluginRottenTomatoesLookup(object):
"""
Retrieves Rotten Tomatoes information for entries.
Example::
rottentomatoes_lookup: yes
"""
field_map = {
'rt_name': 'title',
'rt_id': 'id',
'rt_year': 'year',
'rt_genres': lambda movie: [genre.name for genre in movie.genres],
'rt_mpaa_rating': 'mpaa_rating',
'rt_runtime': 'runtime',
'rt_critics_consensus': 'critics_consensus',
'rt_releases': lambda movie: dict((release.name, release.date) for
release in movie.release_dates),
'rt_critics_rating': 'critics_rating',
'rt_critics_score': 'critics_score',
'rt_audience_rating': 'audience_rating',
'rt_audience_score': 'audience_score',
'rt_average_score': lambda movie: (movie.critics_score + movie.audience_score) / 2,
'rt_synopsis': 'synopsis',
'rt_posters': lambda movie: dict((poster.name, poster.url) for poster in movie.posters),
'rt_actors': lambda movie: [actor.name for actor in movie.cast],
'rt_directors': lambda movie: [director.name for director in movie.directors],
'rt_studio': 'studio',
'rt_alternate_ids': lambda movie: dict((alt_id.name, alt_id.id)
for alt_id in movie.alternate_ids),
'rt_url': get_rt_url,
# Generic fields filled by all movie lookup plugins:
'movie_name': 'title',
'movie_year': 'year'}
schema = {'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'description': 'provide a custom api key'}
]}
def __init__(self):
self.key = None
def lazy_loader(self, entry):
"""Does the lookup for this entry and populates the entry fields.
:param entry: entry to perform lookup on
:param field: the field to be populated (others may be populated as well)
:returns: the field value
"""
try:
self.lookup(entry, key=self.key)
except plugin.PluginError as e:
log_once(e.value.capitalize(), logger=log)
def lookup(self, entry, search_allowed=True, key=None):
"""
Perform Rotten Tomatoes lookup for entry.
:param entry: Entry instance
:param search_allowed: Allow fallback to search
:param key: optionally specify an API key to use
:raises PluginError: Failure reason
"""
if not key:
key = self.key or API_KEY
movie = lookup_movie(smart_match=entry['title'],
rottentomatoes_id=entry.get('rt_id', eval_lazy=False),
only_cached=(not search_allowed),
api_key=key
)
log.debug(u'Got movie: %s' % movie)
entry.update_using_map(self.field_map, movie)
if not entry.get('imdb_id', eval_lazy=False):
for alt_id in movie.alternate_ids:
if alt_id.name == 'imdb':
entry['imdb_id'] = 'tt' + alt_id.id
break
def on_task_metainfo(self, task, config):
if not config:
return
if isinstance(config, basestring):
self.key = config.lower()
else:
self.key = None
for entry in task.entries:
entry.register_lazy_func(self.lazy_loader, self.field_map)
@event('plugin.register')
def register_plugin():
plugin.register(PluginRottenTomatoesLookup, 'rottentomatoes_lookup', api_ver=2)
| mit |
seem-sky/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| lgpl-3.0 |
mzbotr/python-gsmmodem | gsmmodem/pdu.py | 10 | 32236 | # -*- coding: utf8 -*-
""" SMS PDU encoding methods """
from __future__ import unicode_literals
import sys, codecs, math
from datetime import datetime, timedelta, tzinfo
from copy import copy
from .exceptions import EncodingError
# For Python 3 support
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION >= 3:
MAX_INT = sys.maxsize
dictItemsIter = dict.items
xrange = range
unichr = chr
toByteArray = lambda x: bytearray(codecs.decode(x, 'hex_codec')) if type(x) == bytes else bytearray(codecs.decode(bytes(x, 'ascii'), 'hex_codec')) if type(x) == str else x
rawStrToByteArray = lambda x: bytearray(bytes(x, 'latin-1'))
else: #pragma: no cover
MAX_INT = sys.maxint
dictItemsIter = dict.iteritems
toByteArray = lambda x: bytearray(x.decode('hex')) if type(x) in (str, unicode) else x
rawStrToByteArray = bytearray
# Tables can be found at: http://en.wikipedia.org/wiki/GSM_03.38#GSM_7_bit_default_alphabet_and_extension_table_of_3GPP_TS_23.038_.2F_GSM_03.38
GSM7_BASIC = ('@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x1bÆæßÉ !\"#¤%&\'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑÜ`¿abcdefghijklmnopqrstuvwxyzäöñüà')
GSM7_EXTENDED = {chr(0xFF): 0x0A,
#CR2: chr(0x0D),
'^': chr(0x14),
#SS2: chr(0x1B),
'{': chr(0x28),
'}': chr(0x29),
'\\': chr(0x2F),
'[': chr(0x3C),
'~': chr(0x3D),
']': chr(0x3E),
'|': chr(0x40),
'€': chr(0x65)}
# Maximum message sizes for each data coding
MAX_MESSAGE_LENGTH = {0x00: 160, # GSM-7
0x04: 140, # 8-bit
0x08: 70} # UCS2
class SmsPduTzInfo(tzinfo):
""" Simple implementation of datetime.tzinfo for handling timestamp GMT offsets specified in SMS PDUs """
def __init__(self, pduOffsetStr=None):
"""
:param pduOffset: 2 semi-octet timezone offset as specified by PDU (see GSM 03.40 spec)
:type pduOffset: str
Note: pduOffsetStr is optional in this constructor due to the special requirement for pickling
mentioned in the Python docs. It should, however, be used (or otherwise pduOffsetStr must be
manually set)
"""
self._offset = None
if pduOffsetStr != None:
self._setPduOffsetStr(pduOffsetStr)
def _setPduOffsetStr(self, pduOffsetStr):
# See if the timezone difference is positive/negative by checking MSB of first semi-octet
tzHexVal = int(pduOffsetStr, 16)
if tzHexVal & 0x80 == 0: # positive
self._offset = timedelta(minutes=(int(pduOffsetStr) * 15))
else: # negative
self._offset = timedelta(minutes=(int('{0:0>2X}'.format(tzHexVal & 0x7F)) * -15))
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
""" We do not have enough info in the SMS PDU to implement daylight savings time """
return timedelta(0)
class InformationElement(object):
""" User Data Header (UDH) Information Element (IE) implementation
This represents a single field ("information element") in the PDU's
User Data Header. The UDH itself contains one or more of these
information elements.
If the IEI (IE identifier) is recognized, the class will automatically
specialize into one of the subclasses of InformationElement,
e.g. Concatenation or PortAddress, allowing the user to easily
access the specific (and useful) attributes of these special cases.
"""
def __new__(cls, *args, **kwargs): #iei, ieLen, ieData):
""" Causes a new InformationElement class, or subclass
thereof, to be created. If the IEI is recognized, a specific
subclass of InformationElement is returned """
if len(args) > 0:
targetClass = IEI_CLASS_MAP.get(args[0], cls)
elif 'iei' in kwargs:
targetClass = IEI_CLASS_MAP.get(kwargs['iei'], cls)
else:
return super(InformationElement, cls).__new__(cls)
return super(InformationElement, targetClass).__new__(targetClass)
def __init__(self, iei, ieLen=0, ieData=None):
self.id = iei # IEI
self.dataLength = ieLen # IE Length
self.data = ieData or [] # raw IE data
@classmethod
def decode(cls, byteIter):
""" Decodes a single IE at the current position in the specified
byte iterator
:return: An InformationElement (or subclass) instance for the decoded IE
:rtype: InformationElement, or subclass thereof
"""
iei = next(byteIter)
ieLen = next(byteIter)
ieData = []
for i in xrange(ieLen):
ieData.append(next(byteIter))
return InformationElement(iei, ieLen, ieData)
def encode(self):
""" Encodes this IE and returns the resulting bytes """
result = bytearray()
result.append(self.id)
result.append(self.dataLength)
result.extend(self.data)
return result
def __len__(self):
""" Exposes the IE's total length (including the IEI and IE length octet) in octets """
return self.dataLength + 2
class Concatenation(InformationElement):
""" IE that indicates SMS concatenation.
This implementation handles both 8-bit and 16-bit concatenation
indication, and exposes the specific useful details of this
IE as instance variables.
Exposes:
reference
CSMS reference number, must be same for all the SMS parts in the CSMS
parts
total number of parts. The value shall remain constant for every short
message which makes up the concatenated short message. If the value is zero then
the receiving entity shall ignore the whole information element
number
this part's number in the sequence. The value shall start at 1 and
increment for every short message which makes up the concatenated short message
"""
def __init__(self, iei=0x00, ieLen=0, ieData=None):
super(Concatenation, self).__init__(iei, ieLen, ieData)
if ieData != None:
if iei == 0x00: # 8-bit reference
self.reference, self.parts, self.number = ieData
else: # 0x08: 16-bit reference
self.reference = ieData[0] << 8 | ieData[1]
self.parts = ieData[2]
self.number = ieData[3]
def encode(self):
if self.reference > 0xFF:
self.id = 0x08 # 16-bit reference
self.data = [self.reference >> 8, self.reference & 0xFF, self.parts, self.number]
else:
self.id = 0x00 # 8-bit reference
self.data = [self.reference, self.parts, self.number]
self.dataLength = len(self.data)
return super(Concatenation, self).encode()
class PortAddress(InformationElement):
""" IE that indicates an Application Port Addressing Scheme.
This implementation handles both 8-bit and 16-bit concatenation
indication, and exposes the specific useful details of this
IE as instance variables.
Exposes:
destination: The destination port number
source: The source port number
"""
def __init__(self, iei=0x04, ieLen=0, ieData=None):
super(PortAddress, self).__init__(iei, ieLen, ieData)
if ieData != None:
if iei == 0x04: # 8-bit port addressing scheme
self.destination, self.source = ieData
else: # 0x05: 16-bit port addressing scheme
self.destination = ieData[0] << 8 | ieData[1]
self.source = ieData[2] << 8 | ieData[3]
def encode(self):
if self.destination > 0xFF or self.source > 0xFF:
self.id = 0x05 # 16-bit
self.data = [self.destination >> 8, self.destination & 0xFF, self.source >> 8, self.source & 0xFF]
else:
self.id = 0x04 # 8-bit
self.data = [self.destination, self.source]
self.dataLength = len(self.data)
return super(PortAddress, self).encode()
# Map of recognized IEIs
IEI_CLASS_MAP = {0x00: Concatenation, # Concatenated short messages, 8-bit reference number
0x08: Concatenation, # Concatenated short messages, 16-bit reference number
0x04: PortAddress, # Application port addressing scheme, 8 bit address
0x05: PortAddress # Application port addressing scheme, 16 bit address
}
class Pdu(object):
""" Encoded SMS PDU. Contains raw PDU data and related meta-information """
def __init__(self, data, tpduLength):
""" Constructor
:param data: the raw PDU data (as bytes)
:type data: bytearray
:param tpduLength: Length (in bytes) of the TPDU
:type tpduLength: int
"""
self.data = data
self.tpduLength = tpduLength
def __str__(self):
global PYTHON_VERSION
if PYTHON_VERSION < 3:
return str(self.data).encode('hex').upper()
else: #pragma: no cover
return str(codecs.encode(self.data, 'hex_codec'), 'ascii').upper()
def encodeSmsSubmitPdu(number, text, reference=0, validity=None, smsc=None, requestStatusReport=True, rejectDuplicates=False, sendFlash=False):
""" Creates an SMS-SUBMIT PDU for sending a message with the specified text to the specified number
:param number: the destination mobile number
:type number: str
:param text: the message text
:type text: str
:param reference: message reference number (see also: rejectDuplicates parameter)
:type reference: int
:param validity: message validity period (absolute or relative)
:type validity: datetime.timedelta (relative) or datetime.datetime (absolute)
:param smsc: SMSC number to use (leave None to use default)
:type smsc: str
:param rejectDuplicates: Flag that controls the TP-RD parameter (messages with same destination and reference may be rejected if True)
:type rejectDuplicates: bool
:return: A list of one or more tuples containing the SMS PDU (as a bytearray, and the length of the TPDU part
:rtype: list of tuples
"""
tpduFirstOctet = 0x01 # SMS-SUBMIT PDU
if validity != None:
# Validity period format (TP-VPF) is stored in bits 4,3 of the first TPDU octet
if type(validity) == timedelta:
# Relative (TP-VP is integer)
tpduFirstOctet |= 0x10 # bit4 == 1, bit3 == 0
validityPeriod = [_encodeRelativeValidityPeriod(validity)]
elif type(validity) == datetime:
# Absolute (TP-VP is semi-octet encoded date)
tpduFirstOctet |= 0x18 # bit4 == 1, bit3 == 1
validityPeriod = _encodeTimestamp(validity)
else:
raise TypeError('"validity" must be of type datetime.timedelta (for relative value) or datetime.datetime (for absolute value)')
else:
validityPeriod = None
if rejectDuplicates:
tpduFirstOctet |= 0x04 # bit2 == 1
if requestStatusReport:
tpduFirstOctet |= 0x20 # bit5 == 1
# Encode message text and set data coding scheme based on text contents
try:
encodedText = encodeGsm7(text)
except ValueError:
# Cannot encode text using GSM-7; use UCS2 instead
alphabet = 0x08 # UCS2
else:
alphabet = 0x00 # GSM-7
# Check if message should be concatenated
if len(text) > MAX_MESSAGE_LENGTH[alphabet]:
# Text too long for single PDU - add "concatenation" User Data Header
concatHeaderPrototype = Concatenation()
concatHeaderPrototype.reference = reference
pduCount = int(len(text) / MAX_MESSAGE_LENGTH[alphabet]) + 1
concatHeaderPrototype.parts = pduCount
tpduFirstOctet |= 0x40
else:
concatHeaderPrototype = None
pduCount = 1
# Construct required PDU(s)
pdus = []
for i in xrange(pduCount):
pdu = bytearray()
if smsc:
pdu.extend(_encodeAddressField(smsc, smscField=True))
else:
pdu.append(0x00) # Don't supply an SMSC number - use the one configured in the device
udh = bytearray()
if concatHeaderPrototype != None:
concatHeader = copy(concatHeaderPrototype)
concatHeader.number = i + 1
if alphabet == 0x00:
pduText = text[i*153:(i+1) * 153]
elif alphabet == 0x08:
pduText = text[i * 67 : (i + 1) * 67]
udh.extend(concatHeader.encode())
else:
pduText = text
udhLen = len(udh)
pdu.append(tpduFirstOctet)
pdu.append(reference) # message reference
# Add destination number
pdu.extend(_encodeAddressField(number))
pdu.append(0x00) # Protocol identifier - no higher-level protocol
pdu.append(alphabet if not sendFlash else (0x10 if alphabet == 0x00 else 0x18))
if validityPeriod:
pdu.extend(validityPeriod)
if alphabet == 0x00: # GSM-7
encodedText = encodeGsm7(pduText)
userDataLength = len(encodedText) # Payload size in septets/characters
if udhLen > 0:
shift = ((udhLen + 1) * 8) % 7 # "fill bits" needed to make the UDH end on a septet boundary
userData = packSeptets(encodedText, padBits=shift)
if shift > 0:
userDataLength += 1 # take padding bits into account
else:
userData = packSeptets(encodedText)
elif alphabet == 0x08: # UCS2
userData = encodeUcs2(pduText)
userDataLength = len(userData)
if udhLen > 0:
userDataLength += udhLen + 1 # +1 for the UDH length indicator byte
pdu.append(userDataLength)
pdu.append(udhLen)
pdu.extend(udh) # UDH
else:
pdu.append(userDataLength)
pdu.extend(userData) # User Data (message payload)
tpdu_length = len(pdu) - 1
pdus.append(Pdu(pdu, tpdu_length))
return pdus
def decodeSmsPdu(pdu):
""" Decodes SMS pdu data and returns a tuple in format (number, text)
:param pdu: PDU data as a hex string, or a bytearray containing PDU octects
:type pdu: str or bytearray
:raise EncodingError: If the specified PDU data cannot be decoded
:return: The decoded SMS data as a dictionary
:rtype: dict
"""
try:
pdu = toByteArray(pdu)
except Exception as e:
# Python 2 raises TypeError, Python 3 raises binascii.Error
raise EncodingError(e)
result = {}
pduIter = iter(pdu)
smscNumber, smscBytesRead = _decodeAddressField(pduIter, smscField=True)
result['smsc'] = smscNumber
result['tpdu_length'] = len(pdu) - smscBytesRead
tpduFirstOctet = next(pduIter)
pduType = tpduFirstOctet & 0x03 # bits 1-0
if pduType == 0x00: # SMS-DELIVER or SMS-DELIVER REPORT
result['type'] = 'SMS-DELIVER'
result['number'] = _decodeAddressField(pduIter)[0]
result['protocol_id'] = next(pduIter)
dataCoding = _decodeDataCoding(next(pduIter))
result['time'] = _decodeTimestamp(pduIter)
userDataLen = next(pduIter)
udhPresent = (tpduFirstOctet & 0x40) != 0
ud = _decodeUserData(pduIter, userDataLen, dataCoding, udhPresent)
result.update(ud)
elif pduType == 0x01: # SMS-SUBMIT or SMS-SUBMIT-REPORT
result['type'] = 'SMS-SUBMIT'
result['reference'] = next(pduIter) # message reference - we don't really use this
result['number'] = _decodeAddressField(pduIter)[0]
result['protocol_id'] = next(pduIter)
dataCoding = _decodeDataCoding(next(pduIter))
validityPeriodFormat = (tpduFirstOctet & 0x18) >> 3 # bits 4,3
if validityPeriodFormat == 0x02: # TP-VP field present and integer represented (relative)
result['validity'] = _decodeRelativeValidityPeriod(next(pduIter))
elif validityPeriodFormat == 0x03: # TP-VP field present and semi-octet represented (absolute)
result['validity'] = _decodeTimestamp(pduIter)
userDataLen = next(pduIter)
udhPresent = (tpduFirstOctet & 0x40) != 0
ud = _decodeUserData(pduIter, userDataLen, dataCoding, udhPresent)
result.update(ud)
elif pduType == 0x02: # SMS-STATUS-REPORT or SMS-COMMAND
result['type'] = 'SMS-STATUS-REPORT'
result['reference'] = next(pduIter)
result['number'] = _decodeAddressField(pduIter)[0]
result['time'] = _decodeTimestamp(pduIter)
result['discharge'] = _decodeTimestamp(pduIter)
result['status'] = next(pduIter)
else:
raise EncodingError('Unknown SMS message type: {0}. First TPDU octet was: {1}'.format(pduType, tpduFirstOctet))
return result
def _decodeUserData(byteIter, userDataLen, dataCoding, udhPresent):
""" Decodes PDU user data (UDHI (if present) and message text) """
result = {}
if udhPresent:
# User Data Header is present
result['udh'] = []
udhLen = next(byteIter)
ieLenRead = 0
# Parse and store UDH fields
while ieLenRead < udhLen:
ie = InformationElement.decode(byteIter)
ieLenRead += len(ie)
result['udh'].append(ie)
del ieLenRead
if dataCoding == 0x00: # GSM-7
# Since we are using 7-bit data, "fill bits" may have been added to make the UDH end on a septet boundary
shift = ((udhLen + 1) * 8) % 7 # "fill bits" needed to make the UDH end on a septet boundary
# Simulate another "shift" in the unpackSeptets algorithm in order to ignore the fill bits
prevOctet = next(byteIter)
shift += 1
if dataCoding == 0x00: # GSM-7
if udhPresent:
userDataSeptets = unpackSeptets(byteIter, userDataLen, prevOctet, shift)
else:
userDataSeptets = unpackSeptets(byteIter, userDataLen)
result['text'] = decodeGsm7(userDataSeptets)
elif dataCoding == 0x02: # UCS2
result['text'] = decodeUcs2(byteIter, userDataLen)
else: # 8-bit (data)
userData = []
for b in byteIter:
userData.append(unichr(b))
result['text'] = ''.join(userData)
return result
def _decodeRelativeValidityPeriod(tpVp):
""" Calculates the relative SMS validity period (based on the table in section 9.2.3.12 of GSM 03.40)
:rtype: datetime.timedelta
"""
if tpVp <= 143:
return timedelta(minutes=((tpVp + 1) * 5))
elif 144 <= tpVp <= 167:
return timedelta(hours=12, minutes=((tpVp - 143) * 30))
elif 168 <= tpVp <= 196:
return timedelta(days=(tpVp - 166))
elif 197 <= tpVp <= 255:
return timedelta(weeks=(tpVp - 192))
else:
raise ValueError('tpVp must be in range [0, 255]')
def _encodeRelativeValidityPeriod(validityPeriod):
""" Encodes the specified relative validity period timedelta into an integer for use in an SMS PDU
(based on the table in section 9.2.3.12 of GSM 03.40)
:param validityPeriod: The validity period to encode
:type validityPeriod: datetime.timedelta
:rtype: int
"""
# Python 2.6 does not have timedelta.total_seconds(), so compute it manually
#seconds = validityPeriod.total_seconds()
seconds = validityPeriod.seconds + (validityPeriod.days * 24 * 3600)
if seconds <= 43200: # 12 hours
tpVp = int(seconds / 300) - 1 # divide by 5 minutes, subtract 1
elif seconds <= 86400: # 24 hours
tpVp = int((seconds - 43200) / 1800) + 143 # subtract 12 hours, divide by 30 minutes. add 143
elif validityPeriod.days <= 30: # 30 days
tpVp = validityPeriod.days + 166 # amount of days + 166
elif validityPeriod.days <= 441: # max value of tpVp is 255
tpVp = int(validityPeriod.days / 7) + 192 # amount of weeks + 192
else:
raise ValueError('Validity period too long; tpVp limited to 1 octet (max value: 255)')
return tpVp
def _decodeTimestamp(byteIter):
""" Decodes a 7-octet timestamp """
dateStr = decodeSemiOctets(byteIter, 7)
timeZoneStr = dateStr[-2:]
return datetime.strptime(dateStr[:-2], '%y%m%d%H%M%S').replace(tzinfo=SmsPduTzInfo(timeZoneStr))
def _encodeTimestamp(timestamp):
""" Encodes a 7-octet timestamp from the specified date
Note: the specified timestamp must have a UTC offset set; you can use gsmmodem.util.SimpleOffsetTzInfo for simple cases
:param timestamp: The timestamp to encode
:type timestamp: datetime.datetime
:return: The encoded timestamp
:rtype: bytearray
"""
if timestamp.tzinfo == None:
raise ValueError('Please specify time zone information for the timestamp (e.g. by using gsmmodem.util.SimpleOffsetTzInfo)')
# See if the timezone difference is positive/negative
tzDelta = timestamp.utcoffset()
if tzDelta.days >= 0:
tzValStr = '{0:0>2}'.format(int(tzDelta.seconds / 60 / 15))
else: # negative
tzVal = int((tzDelta.days * -3600 * 24 - tzDelta.seconds) / 60 / 15) # calculate offset in 0.25 hours
# Cast as literal hex value and set MSB of first semi-octet of timezone to 1 to indicate negative value
tzVal = int('{0:0>2}'.format(tzVal), 16) | 0x80
tzValStr = '{0:0>2X}'.format(tzVal)
dateStr = timestamp.strftime('%y%m%d%H%M%S') + tzValStr
return encodeSemiOctets(dateStr)
def _decodeDataCoding(octet):
if octet & 0xC0 == 0:
#compressed = octect & 0x20
alphabet = (octet & 0x0C) >> 2
return alphabet # 0x00 == GSM-7, 0x01 == 8-bit data, 0x02 == UCS2
# We ignore other coding groups
return 0
def _decodeAddressField(byteIter, smscField=False, log=False):
""" Decodes the address field at the current position of the bytearray iterator
:param byteIter: Iterator over bytearray
:type byteIter: iter(bytearray)
:return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length))
:rtype: tuple
"""
addressLen = next(byteIter)
if addressLen > 0:
toa = next(byteIter)
ton = (toa & 0x70) # bits 6,5,4 of type-of-address == type-of-number
if ton == 0x50:
# Alphanumberic number
addressLen = int(math.ceil(addressLen / 2.0))
septets = unpackSeptets(byteIter, addressLen)
addressValue = decodeGsm7(septets)
return (addressValue, (addressLen + 2))
else:
# ton == 0x00: Unknown (might be international, local, etc) - leave as is
# ton == 0x20: National number
if smscField:
addressValue = decodeSemiOctets(byteIter, addressLen-1)
else:
if addressLen % 2:
addressLen = int(addressLen / 2) + 1
else:
addressLen = int(addressLen / 2)
addressValue = decodeSemiOctets(byteIter, addressLen)
addressLen += 1 # for the return value, add the toa byte
if ton == 0x10: # International number
addressValue = '+' + addressValue
return (addressValue, (addressLen + 1))
else:
return (None, 1)
def _encodeAddressField(address, smscField=False):
""" Encodes the address into an address field
:param address: The address to encode (phone number or alphanumeric)
:type byteIter: str
:return: Encoded SMS PDU address field
:rtype: bytearray
"""
# First, see if this is a number or an alphanumeric string
toa = 0x80 | 0x00 | 0x01 # Type-of-address start | Unknown type-of-number | ISDN/tel numbering plan
alphaNumeric = False
if address.isalnum():
# Might just be a local number
if address.isdigit():
# Local number
toa |= 0x20
else:
# Alphanumeric address
toa |= 0x50
toa &= 0xFE # switch to "unknown" numbering plan
alphaNumeric = True
else:
if address[0] == '+' and address[1:].isdigit():
# International number
toa |= 0x10
# Remove the '+' prefix
address = address[1:]
else:
# Alphanumeric address
toa |= 0x50
toa &= 0xFE # switch to "unknown" numbering plan
alphaNumeric = True
if alphaNumeric:
addressValue = packSeptets(encodeGsm7(address, False))
addressLen = len(addressValue) * 2
else:
addressValue = encodeSemiOctets(address)
if smscField:
addressLen = len(addressValue) + 1
else:
addressLen = len(address)
result = bytearray()
result.append(addressLen)
result.append(toa)
result.extend(addressValue)
return result
def encodeSemiOctets(number):
""" Semi-octet encoding algorithm (e.g. for phone numbers)
:return: bytearray containing the encoded octets
:rtype: bytearray
"""
if len(number) % 2 == 1:
number = number + 'F' # append the "end" indicator
octets = [int(number[i+1] + number[i], 16) for i in xrange(0, len(number), 2)]
return bytearray(octets)
def decodeSemiOctets(encodedNumber, numberOfOctets=None):
""" Semi-octet decoding algorithm(e.g. for phone numbers)
:param encodedNumber: The semi-octet-encoded telephone number (in bytearray format or hex string)
:type encodedNumber: bytearray, str or iter(bytearray)
:param numberOfOctets: The expected amount of octets after decoding (i.e. when to stop)
:type numberOfOctets: int
:return: decoded telephone number
:rtype: string
"""
number = []
if type(encodedNumber) in (str, bytes):
encodedNumber = bytearray(codecs.decode(encodedNumber, 'hex_codec'))
i = 0
for octet in encodedNumber:
hexVal = hex(octet)[2:].zfill(2)
number.append(hexVal[1])
if hexVal[0] != 'f':
number.append(hexVal[0])
else:
break
if numberOfOctets != None:
i += 1
if i == numberOfOctets:
break
return ''.join(number)
def encodeGsm7(plaintext, discardInvalid=False):
""" GSM-7 text encoding algorithm
Encodes the specified text string into GSM-7 octets (characters). This method does not pack
the characters into septets.
:param text: the text string to encode
:param discardInvalid: if True, characters that cannot be encoded will be silently discarded
:raise ValueError: if the text string cannot be encoded using GSM-7 encoding (unless discardInvalid == True)
:return: A bytearray containing the string encoded in GSM-7 encoding
:rtype: bytearray
"""
result = bytearray()
if PYTHON_VERSION >= 3:
plaintext = str(plaintext)
for char in plaintext:
idx = GSM7_BASIC.find(char)
if idx != -1:
result.append(idx)
elif char in GSM7_EXTENDED:
result.append(0x1B) # ESC - switch to extended table
result.append(ord(GSM7_EXTENDED[char]))
elif not discardInvalid:
raise ValueError('Cannot encode char "{0}" using GSM-7 encoding'.format(char))
return result
def decodeGsm7(encodedText):
""" GSM-7 text decoding algorithm
Decodes the specified GSM-7-encoded string into a plaintext string.
:param encodedText: the text string to encode
:type encodedText: bytearray or str
:return: A string containing the decoded text
:rtype: str
"""
result = []
if type(encodedText) == str:
encodedText = rawStrToByteArray(encodedText) #bytearray(encodedText)
iterEncoded = iter(encodedText)
for b in iterEncoded:
if b == 0x1B: # ESC - switch to extended table
c = chr(next(iterEncoded))
for char, value in dictItemsIter(GSM7_EXTENDED):
if c == value:
result.append(char)
break
else:
result.append(GSM7_BASIC[b])
return ''.join(result)
def packSeptets(octets, padBits=0):
""" Packs the specified octets into septets
Typically the output of encodeGsm7 would be used as input to this function. The resulting
bytearray contains the original GSM-7 characters packed into septets ready for transmission.
:rtype: bytearray
"""
result = bytearray()
if type(octets) == str:
octets = iter(rawStrToByteArray(octets))
elif type(octets) == bytearray:
octets = iter(octets)
shift = padBits
if padBits == 0:
prevSeptet = next(octets)
else:
prevSeptet = 0x00
for octet in octets:
septet = octet & 0x7f;
if shift == 7:
# prevSeptet has already been fully added to result
shift = 0
prevSeptet = septet
continue
b = ((septet << (7 - shift)) & 0xFF) | (prevSeptet >> shift)
prevSeptet = septet
shift += 1
result.append(b)
if shift != 7:
# There is a bit "left over" from prevSeptet
result.append(prevSeptet >> shift)
return result
def unpackSeptets(septets, numberOfSeptets=None, prevOctet=None, shift=7):
""" Unpacks the specified septets into octets
:param septets: Iterator or iterable containing the septets packed into octets
:type septets: iter(bytearray), bytearray or str
:param numberOfSeptets: The amount of septets to unpack (or None for all remaining in "septets")
:type numberOfSeptets: int or None
:return: The septets unpacked into octets
:rtype: bytearray
"""
result = bytearray()
if type(septets) == str:
septets = iter(rawStrToByteArray(septets))
elif type(septets) == bytearray:
septets = iter(septets)
if numberOfSeptets == None:
numberOfSeptets = MAX_INT # Loop until StopIteration
i = 0
for octet in septets:
i += 1
if shift == 7:
shift = 1
if prevOctet != None:
result.append(prevOctet >> 1)
if i <= numberOfSeptets:
result.append(octet & 0x7F)
prevOctet = octet
if i == numberOfSeptets:
break
else:
continue
b = ((octet << shift) & 0x7F) | (prevOctet >> (8 - shift))
prevOctet = octet
result.append(b)
shift += 1
if i == numberOfSeptets:
break
if shift == 7:
b = prevOctet >> (8 - shift)
if b:
# The final septet value still needs to be unpacked
result.append(b)
return result
def decodeUcs2(byteIter, numBytes):
""" Decodes UCS2-encoded text from the specified byte iterator, up to a maximum of numBytes """
userData = []
i = 0
try:
while i < numBytes:
userData.append(unichr((next(byteIter) << 8) | next(byteIter)))
i += 2
except StopIteration:
# Not enough bytes in iterator to reach numBytes; return what we have
pass
return ''.join(userData)
def encodeUcs2(text):
""" UCS2 text encoding algorithm
Encodes the specified text string into UCS2-encoded bytes.
:param text: the text string to encode
:return: A bytearray containing the string encoded in UCS2 encoding
:rtype: bytearray
"""
result = bytearray()
for b in map(ord, text):
result.append(b >> 8)
result.append(b & 0xFF)
return result
| lgpl-3.0 |
mrrrgn/olympia | apps/amo/tests/test_redirects.py | 15 | 8955 | # -*- coding: utf-8 -*-
"""Check all our redirects from remora to zamboni."""
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Category
from django.db import connection
class TestRedirects(amo.tests.TestCase):
fixtures = ['reviews/test_models', 'addons/persona', 'base/global-stats']
def test_persona_category(self):
"""`/personas/film and tv` should go to /themes/film-and-tv"""
r = self.client.get('/personas/film and tv', follow=True)
assert r.redirect_chain[-1][0].endswith(
'/en-US/firefox/themes/film-and-tv')
def test_top_tags(self):
"""`/top-tags/?` should 301 to `/tags/top`."""
response = self.client.get(u'/top-tags/', follow=True)
self.assert3xx(response, '/en-US/firefox/tags/top',
status_code=301)
def test_contribute_installed(self):
"""`/addon/\d+/about` should go to
`/addon/\d+/contribute/installed`."""
r = self.client.get(u'/addon/5326/about', follow=True)
redirect = r.redirect_chain[-1][0]
assert redirect.endswith(
'/en-US/firefox/addon/5326/contribute/installed/')
def test_contribute(self):
"""`/addons/contribute/$id` should go to `/addon/$id/contribute`."""
response = self.client.get(u'/addon/5326/contribute', follow=True)
redirect = response.redirect_chain[-1][0]
assert redirect.endswith('/en-US/firefox/addon/5326/contribute/')
def test_utf8(self):
"""Without proper unicode handling this will fail."""
response = self.client.get(u'/api/1.5/search/ツールバー',
follow=True)
# Sphinx will be off so let's just test that it redirects.
eq_(response.redirect_chain[0][1], 301)
def test_parameters(self):
"""Bug 554976. Make sure when we redirect, we preserve our query
strings."""
url = u'/users/login?to=/en-US/firefox/users/edit'
r = self.client.get(url, follow=True)
self.assert3xx(r, '/en-US/firefox' + url, status_code=301)
def test_reviews(self):
response = self.client.get('/reviews/display/4', follow=True)
self.assert3xx(response, '/en-US/firefox/addon/a4/reviews/',
status_code=301)
def test_browse(self):
response = self.client.get('/browse/type:3', follow=True)
self.assert3xx(response, '/en-US/firefox/language-tools/',
status_code=301)
response = self.client.get('/browse/type:2', follow=True)
self.assert3xx(response, '/en-US/firefox/complete-themes/',
status_code=301)
# Drop the category.
response = self.client.get('/browse/type:2/cat:all', follow=True)
self.assert3xx(response, '/en-US/firefox/complete-themes/',
status_code=301)
def test_accept_language(self):
"""
Given an Accept Language header, do the right thing. See bug 439568
for juicy details.
"""
response = self.client.get('/', follow=True, HTTP_ACCEPT_LANGUAGE='de')
self.assert3xx(response, '/de/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='en-us, de')
self.assert3xx(response, '/en-US/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='fr, en')
self.assert3xx(response, '/fr/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt-XX, xx, yy')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt, de')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt-XX, xx, de')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='xx, yy, zz')
self.assert3xx(response, '/en-US/firefox/', status_code=301)
response = self.client.get(
'/', follow=True,
HTTP_ACCEPT_LANGUAGE='some,thing-very;very,,,broken!\'jj')
self.assert3xx(response, '/en-US/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='en-us;q=0.5, de')
self.assert3xx(response, '/de/firefox/', status_code=301)
def test_users(self):
response = self.client.get('/users/info/1', follow=True)
self.assert3xx(response, '/en-US/firefox/user/1/',
status_code=301)
def test_extension_sorting(self):
r = self.client.get('/browse/type:1?sort=updated', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=updated',
status_code=301)
r = self.client.get('/browse/type:1?sort=name', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=name',
status_code=301)
r = self.client.get('/browse/type:1?sort=newest', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=created',
status_code=301)
r = self.client.get('/browse/type:1?sort=weeklydownloads', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=popular',
status_code=301)
r = self.client.get('/browse/type:1?sort=averagerating', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=rating',
status_code=301)
# If we don't recognize the sort, they get nothing.
r = self.client.get('/browse/type:1?sort=xxx', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/',
status_code=301)
Category.objects.create(pk=12, slug='woo', type=amo.ADDON_EXTENSION,
application=amo.FIREFOX.id, count=1, weight=0)
r = self.client.get('/browse/type:1/cat:12?sort=averagerating',
follow=True)
url, code = r.redirect_chain[-1]
eq_(code, 301)
assert url.endswith('/en-US/firefox/extensions/woo/?sort=rating')
def test_addons_versions(self):
r = self.client.get('/addons/versions/4', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/a4/versions/', status_code=301)
def test_addons_versions_rss(self):
r = self.client.get('/addons/versions/4/format:rss', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/4/versions/format:rss',
status_code=301)
def test_addons_reviews_rss(self):
r = self.client.get('/addons/reviews/4/format:rss', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/4/reviews/format:rss',
status_code=301)
def test_mobile_to_android(self):
"""
'Mobile' is the legacy XUL-based Firefox for Android.
'Android' is the new hotness.
"""
res = self.client.get('/mobile', follow=True)
self.assert3xx(res, '/en-US/android/', status_code=301)
res = self.client.get('/mobile/', follow=True)
self.assert3xx(res, '/en-US/android/', status_code=301)
res = self.client.get('/mobile/extensions/', follow=True)
self.assert3xx(res, '/en-US/mobile/extensions/', status_code=301)
class TestPersonaRedirect(amo.tests.TestCase):
fixtures = ['addons/persona']
def test_persona_redirect(self):
"""`/persona/\d+` should go to `/addon/\d+`."""
r = self.client.get('/persona/813', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/a15663/', status_code=301)
def test_persona_redirect_addon_no_exist(self):
"""When the persona exists but not its addon, throw a 404."""
# Got get shady to separate Persona/Addons.
try:
connection.cursor().execute("""
SET FOREIGN_KEY_CHECKS = 0;
UPDATE personas SET addon_id=123 WHERE persona_id=813;
SET FOREIGN_KEY_CHECKS = 1;
""")
r = self.client.get('/persona/813', follow=True)
eq_(r.status_code, 404)
finally:
connection.cursor().execute("""
SET FOREIGN_KEY_CHECKS = 0;
UPDATE personas SET addon_id=15663 WHERE persona_id=813;
SET FOREIGN_KEY_CHECKS = 1;
""")
| bsd-3-clause |
lz1988/flaskweb | Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py | 1010 | 4507 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| bsd-2-clause |
shantanu561993/volatility | volatility/plugins/gui/windows.py | 54 | 4108 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.plugins.gui.messagehooks as messagehooks
class WinTree(messagehooks.MessageHooks):
"""Print Z-Order Desktop Windows Tree"""
def render_text(self, outfd, data):
for winsta, atom_tables in data:
for desktop in winsta.desktops():
outfd.write("*" * 50 + "\n")
outfd.write("Window context: {0}\\{1}\\{2}\n\n".format(
winsta.dwSessionId, winsta.Name, desktop.Name))
for wnd, level in desktop.windows(desktop.DeskInfo.spwnd):
outfd.write("{0}{1} {2} {3}:{4} {5}\n".format(
"." * level,
str(wnd.strName or '') or "#{0:x}".format(wnd.head.h),
"(visible)" if wnd.Visible else "",
wnd.Process.ImageFileName,
wnd.Process.UniqueProcessId,
self.translate_atom(winsta, atom_tables, wnd.ClassAtom),
))
class Windows(messagehooks.MessageHooks):
"""Print Desktop Windows (verbose details)"""
def render_text(self, outfd, data):
for winsta, atom_tables in data:
for desktop in winsta.desktops():
outfd.write("*" * 50 + "\n")
outfd.write("Window context: {0}\\{1}\\{2}\n\n".format(
winsta.dwSessionId, winsta.Name, desktop.Name))
for wnd, _level in desktop.windows(desktop.DeskInfo.spwnd):
outfd.write("Window Handle: #{0:x} at {1:#x}, Name: {2}\n".format(
wnd.head.h, wnd.obj_offset, str(wnd.strName or '')
))
outfd.write("ClassAtom: {0:#x}, Class: {1}\n".format(
wnd.ClassAtom,
self.translate_atom(winsta, atom_tables, wnd.ClassAtom),
))
outfd.write("SuperClassAtom: {0:#x}, SuperClass: {1}\n".format(
wnd.SuperClassAtom,
self.translate_atom(winsta, atom_tables, wnd.SuperClassAtom),
))
outfd.write("pti: {0:#x}, Tid: {1} at {2:#x}\n".format(
wnd.head.pti.v(),
wnd.Thread.Cid.UniqueThread,
wnd.Thread.obj_offset,
))
outfd.write("ppi: {0:#x}, Process: {1}, Pid: {2}\n".format(
wnd.head.pti.ppi.v(),
wnd.Process.ImageFileName,
wnd.Process.UniqueProcessId,
))
outfd.write("Visible: {0}\n".format("Yes" if wnd.Visible else "No"))
outfd.write("Left: {0}, Top: {1}, Bottom: {2}, Right: {3}\n".format(
wnd.rcClient.left,
wnd.rcClient.top,
wnd.rcClient.right, wnd.rcClient.bottom
))
outfd.write("Style Flags: {0}\n".format(wnd.style))
outfd.write("ExStyle Flags: {0}\n".format(wnd.ExStyle))
outfd.write("Window procedure: {0:#x}\n".format(
wnd.lpfnWndProc,
))
outfd.write("\n")
| gpl-2.0 |
sahiljain/catapult | third_party/gsutil/third_party/boto/boto/vpc/customergateway.py | 170 | 1968 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Customer Gateway
"""
from boto.ec2.ec2object import TaggedEC2Object
class CustomerGateway(TaggedEC2Object):
def __init__(self, connection=None):
super(CustomerGateway, self).__init__(connection)
self.id = None
self.type = None
self.state = None
self.ip_address = None
self.bgp_asn = None
def __repr__(self):
return 'CustomerGateway:%s' % self.id
def endElement(self, name, value, connection):
if name == 'customerGatewayId':
self.id = value
elif name == 'ipAddress':
self.ip_address = value
elif name == 'type':
self.type = value
elif name == 'state':
self.state = value
elif name == 'bgpAsn':
self.bgp_asn = int(value)
else:
setattr(self, name, value)
| bsd-3-clause |
TwolDE2/enigma2 | lib/python/Plugins/SystemPlugins/VideoTune/VideoFinetune.py | 6 | 11055 | from enigma import gFont, getDesktop, gMainDC, eSize, RT_HALIGN_RIGHT, RT_WRAP
from Screens.Screen import Screen
from Components.Sources.CanvasSource import CanvasSource
from Components.ActionMap import NumberActionMap
from Tools.Directories import fileExists
def RGB(r,g,b):
return (r<<16)|(g<<8)|b
class OverscanTestScreen(Screen):
skin = """
<screen position="fill">
<ePixmap pixmap="skin_default/overscan.png" position="0,0" size="1920,1080" zPosition="1" alphatest="on" />
</screen>"""
def __init__(self, session, xres=1280, yres=720):
Screen.__init__(self, session)
self.setTitle(_("Overscan Test"))
self.xres, self.yres = getDesktop(0).size().width(), getDesktop(0).size().height()
if (self.xres, self.yres) != (xres, yres):
gMainDC.getInstance().setResolution(xres, yres)
getDesktop(0).resize(eSize(xres, yres))
self.onClose.append(self.__close)
self["actions"] = NumberActionMap(["InputActions", "OkCancelActions"],
{
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"7": self.keyNumber,
"ok": self.ok,
"cancel": self.cancel
})
def __close(self):
gMainDC.getInstance().setResolution(self.xres, self.yres)
getDesktop(0).resize(eSize(self.xres, self.yres))
def ok(self):
self.close(True)
def cancel(self):
self.close(False)
def keyNumber(self, key):
self.close(key)
class FullHDTestScreen(OverscanTestScreen):
skin = """
<screen position="fill">
<ePixmap pixmap="skin_default/testscreen.png" position="0,0" size="1920,1080" zPosition="1" alphatest="on" />
</screen>"""
def __init__(self, session):
OverscanTestScreen.__init__(self, session, 1920, 1080)
self.setTitle(_("FullHD Test"))
self["actions"] = NumberActionMap(["InputActions", "OkCancelActions"],
{
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"ok": self.ok,
"cancel": self.cancel
})
class VideoFinetune(Screen):
skin = """
<screen position="fill">
<widget source="Canvas" render="Canvas" position="fill" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("VideoFinetune"))
self.skinAttributes = None
self["Canvas"] = CanvasSource()
self.basic_colors = [RGB(255, 255, 255), RGB(255, 255, 0), RGB(0, 255, 255), RGB(0, 255, 0), RGB(255, 0, 255), RGB(255, 0, 0), RGB(0, 0, 255), RGB(0, 0, 0)]
self.MyFontSize = 20
if getDesktop(0).size().width() > 1280:
self.MyFontSize = 28
if fileExists("/proc/stb/fb/dst_left"):
self.left = open("/proc/stb/fb/dst_left", "r").read()
self.width = open("/proc/stb/fb/dst_width", "r").read()
self.top = open("/proc/stb/fb/dst_top", "r").read()
self.height = open("/proc/stb/fb/dst_height", "r").read()
if self.left != "00000000" or self.top != "00000000" or self.width != "000002d0" or self.height != "0000000240":
open("/proc/stb/fb/dst_left", "w").write("00000000")
open("/proc/stb/fb/dst_width", "w").write("000002d0")
open("/proc/stb/fb/dst_top", "w").write("00000000")
open("/proc/stb/fb/dst_height", "w").write("0000000240")
self.onClose.append(self.__close)
self["actions"] = NumberActionMap(["InputActions", "OkCancelActions"],
{
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"7": self.keyNumber,
"ok": self.callNext,
"cancel": self.close,
})
self.testpic_brightness()
def __close(self):
open("/proc/stb/fb/dst_left", "w").write(self.left)
open("/proc/stb/fb/dst_width", "w").write(self.width)
open("/proc/stb/fb/dst_top", "w").write(self.top)
open("/proc/stb/fb/dst_height", "w").write(self.height)
def keyNumber(self, key):
(self.testpic_brightness, self.testpic_contrast, self.testpic_colors, self.testpic_filter, self.testpic_gamma, self.testpic_overscan, self.testpic_fullhd)[key-1]()
def callNext(self):
if self.next:
self.next()
def bbox(self, x, y, width, height, col, xx, yy):
c = self["Canvas"]
c.fill(x, y, xx, yy, col)
c.fill(x + width - xx, y, xx, yy, col)
c.fill(x, y + height - yy, xx, yy, col)
c.fill(x + width - xx, y + height - yy, xx, yy, col)
def testpic_brightness(self):
self.next = self.testpic_contrast
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
bbw, bbh = xres / 192, yres / 192
c.fill(0, 0, xres, yres, RGB(0,0,0))
for i in range(15):
col = i * 116 / 14
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, RGB(col, col, col))
if col == 0 or col == 16 or col == 116:
c.fill(x, offset, width, 2, RGB(255, 255, 255))
if i < 2:
c.writeText(x + width, offset, width, eh, RGB(255, 255, 255), RGB(0,0,0), gFont("Regular", self.MyFontSize), "%d." % (i+1))
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,255,255), RGB(0,0,0), gFont("Regular", self.MyFontSize * 2),
_("Brightness"))
c.writeText(xres / 10, yres / 5, xres / 2, yres * 4 / 6, RGB(255,255,255), RGB(0,0,0), gFont("Regular", self.MyFontSize),
_("If your TV has a brightness or contrast enhancement, disable it. If there is something called \"dynamic\", "
"set it to standard. Adjust the backlight level to a value suiting your taste. "
"Turn down contrast on your TV as much as possible.\nThen turn the brightness setting as "
"low as possible, but make sure that the two lowermost shades of gray stay distinguishable.\n"
"Do not care about the bright shades now. They will be set up in the next step.\n"
"If you are happy with the result, press OK."),
RT_WRAP)
c.flush()
def testpic_contrast(self):
self.next = self.testpic_colors
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
bbw, bbh = xres / 192, yres / 192
c.fill(0, 0, xres, yres, RGB(0,0,0))
bbw = xres / 192
bbh = yres / 192
c.fill(0, 0, xres, yres, RGB(255,255,255))
for i in range(15):
col = 185 + i * 5
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, RGB(col, col, col))
if col == 185 or col == 235 or col == 255:
c.fill(x, offset, width, 2, RGB(0,0,0))
if i >= 13:
c.writeText(x + width, offset, width, eh, RGB(0, 0, 0), RGB(255, 255, 255), gFont("Regular", self.MyFontSize), "%d." % (i-13+1))
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,0,0), RGB(255,255,255), gFont("Regular", self.MyFontSize * 2),
_("Contrast"))
c.writeText(xres / 10, yres / 5, xres / 2, yres * 4 / 6, RGB(0,0,0), RGB(255,255,255), gFont("Regular", self.MyFontSize),
_("Now, use the contrast setting to turn up the brightness of the background as much as possible, "
"but make sure that you can still see the difference between the two brightest levels of shades."
"If you have done that, press OK."),
RT_WRAP)
c.flush()
def testpic_colors(self):
self.next = self.testpic_filter
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
bbw = xres / 192
bbh = yres / 192
c.fill(0, 0, xres, yres, RGB(255,255,255))
for i in range(33):
col = i * 255 / 32
width = xres - xres/5
ew = width / 33
offset = xres/10 + ew * i
y = yres * 2 / 3
height = yres / 20
o = yres / 60
if i < 16:
c1 = 0xFF
c2 = 0xFF - (0xFF * i / 16)
else:
c1 = 0xFF - (0xFF * (i - 16) / 16)
c2 = 0
c.fill(offset, y, ew, height, RGB(c1, c2, c2))
c.fill(offset, y + (height + o) * 1, ew, height, RGB(c2, c1, c2))
c.fill(offset, y + (height + o) * 2, ew, height, RGB(c2, c2, c1))
c.fill(offset, y + (height + o) * 3, ew, height, RGB(col, col, col))
if i == 0:
self.bbox(offset, y, ew, height, RGB(0,0,0), bbw, bbh)
self.bbox(offset, y + (height + o) * 1, ew, height, RGB(0,0,0), bbw, bbh)
self.bbox(offset, y + (height + o) * 2, ew, height, RGB(0,0,0), bbw, bbh)
for i in range(8):
height = yres / 3
eh = height / 8
offset = yres/6 + eh * i
x = xres * 2 / 3
width = yres / 6
c.fill(x, offset, width, eh, self.basic_colors[i])
if i == 0:
self.bbox(x, offset, width, eh, RGB(0,0,0), bbw, bbh)
c.writeText(xres / 10, yres / 6 - 40, xres * 3 / 5, 40, RGB(128,0,0), RGB(255,255,255), gFont("Regular", self.MyFontSize * 2),
_("Color"))
c.writeText(xres / 10, yres / 5, xres / 2, yres * 4 / 6, RGB(0,0,0), RGB(255,255,255), gFont("Regular", self.MyFontSize),
_("Adjust the color settings so that all the color shades are distinguishable, but appear as saturated as possible. "
"If you are happy with the result, press OK to close the video fine-tuning, or use the number keys to select other test screens."),
RT_WRAP)
c.flush()
def testpic_filter(self):
self.next = self.testpic_gamma
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
c.fill(0, 0, xres, yres, RGB(64, 64, 64))
width = xres - xres/5
offset = xres/10
yb = yres * 2 / 3
height = yres / 20
o = yres / 60
border = xres / 60
g1 = 255
g2 = 128
c.fill(offset - border, yb - border, border * 2 + width, border * 2 + (height * 3 + o * 2), RGB(g1, g1, g1))
for x in xrange(0, width, 2):
c.fill(offset + x, yb, 1, height, RGB(g2,g2,g2))
for x in xrange(0, width, 4):
c.fill(offset + x, yb + (o + height), 2, height, RGB(g2,g2,g2))
for x in xrange(0, width, 8):
c.fill(offset + x, yb + (o + height) * 2, 4, height, RGB(g2,g2,g2))
c.flush()
def testpic_gamma(self):
self.next = self.testpic_overscan
self.show()
c = self["Canvas"]
xres, yres = getDesktop(0).size().width(), getDesktop(0).size().height()
c.fill(0, 0, xres, yres, RGB(0, 0, 0))
width = xres - xres/5
offset_x = xres/10
height = yres - yres/5
offset_y = yres/10
for y in xrange(0, height, 4):
c.fill(offset_x, offset_y + y, width/2, 2, RGB(255,255,255))
l = 0
fnt = gFont("Regular", height / 14)
import math
for i in xrange(1, 15):
y = i * height / 14
h = y - l
gamma = 0.6 + i * 0.2
col = int(math.pow(.5, 1.0/gamma) * 256.0)
c.fill(offset_x + width/2, offset_y + l, width/2, h, RGB(col,col,col))
c.writeText(offset_x + width/2, offset_y + l, width/2, h, RGB(0,0,0), RGB(col,col,col), fnt, "%1.2f" % gamma, RT_WRAP|RT_HALIGN_RIGHT)
l = y
c.flush()
def testpic_overscan(self):
self.next = self.testpic_fullhd
self.hide()
self.session.openWithCallback(self.testpicCallback, OverscanTestScreen)
def testpic_fullhd(self):
self.next = self.testpic_brightness
self.hide()
self.session.openWithCallback(self.testpicCallback, FullHDTestScreen)
def testpicCallback(self, key):
if key:
if key:
self.next()
else:
self.keyNumber(key)
else:
self.close()
| gpl-2.0 |
roshchupkin/hase | tools/VCF2hdf5.py | 1 | 4024 |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import PYTHON_PATH
if PYTHON_PATH is not None:
for i in PYTHON_PATH: sys.path.insert(0,i)
import argparse
import h5py
import pandas as pd
import numpy as np
from hdgwas.tools import Timer
import tables
import glob
def probes_VCF2hdf5(data_path, save_path,study_name, chunk_size=1000000):
if os.path.isfile(os.path.join(save_path,'probes',study_name+'.h5')):
os.remove(os.path.join(save_path,'probes',study_name+'.h5'))
hash_table={'keys':np.array([],dtype=np.int),'allele':np.array([])}
df=pd.read_csv(data_path,sep='\t',chunksize=chunk_size, header=None,index_col=None)
for i,chunk in enumerate(df):
print 'add chunk {}'.format(i)
print chunk.head()
chunk.columns=[ "CHR","bp" ,"ID",'allele1','allele2','QUAL','FILTER','INFO'] #TODO (high) parse INFO
hash_1=chunk.allele1.apply(hash)
hash_2=chunk.allele2.apply(hash)
k,indices=np.unique(np.append(hash_1,hash_2),return_index=True)
s=np.append(chunk.allele1,chunk.allele2)[indices]
ind=np.invert(np.in1d(k,hash_table['keys']))
hash_table['keys']=np.append(hash_table['keys'],k[ind])
hash_table['allele']=np.append(hash_table['allele'],s[ind])
chunk.allele1=hash_1
chunk.allele2=hash_2
chunk.to_hdf(os.path.join(save_path,'probes',study_name+'.h5'),data_columns=["CHR","bp" ,"ID",'allele1','allele2'], key='probes',format='table',append=True,
min_itemsize = 25, complib='zlib',complevel=9 )
pd.DataFrame.from_dict(hash_table).to_csv(os.path.join(save_path,'probes',study_name+'_hash_table.csv.gz'),index=False,compression='gzip', sep='\t')
def ind_VCF2hdf5(data_path, save_path,study_name):
if os.path.isfile(os.path.join(save_path,'individuals',study_name+'.h5')):
os.remove(os.path.join(save_path,'individuals',study_name+'.h5'))
n=[]
f=open(data_path,'r')
for i,j in enumerate(f):
n.append((j[:-1]))
f.close()
n=np.array(n)
chunk=pd.DataFrame.from_dict({"individual":n})
chunk.to_hdf(os.path.join(save_path,'individuals',study_name+'.h5'), key='individuals',format='table',
min_itemsize = 25, complib='zlib',complevel=9 )
def genotype_VCF2hdf5(data_path,id, save_path, study_name):
df=pd.read_csv(data_path, header=None, index_col=None,sep='\t', dtype=np.float16)
data=df.as_matrix()
print data.shape
print 'Saving chunk...{}'.format(os.path.join(save_path,'genotype',str(id)+'_'+study_name+'.h5'))
h5_gen_file = tables.open_file(
os.path.join(save_path,'genotype',str(id)+'_'+study_name+'.h5'), 'w', title=study_name)
atom = tables.Float16Atom()
genotype = h5_gen_file.create_carray(h5_gen_file.root, 'genotype', atom,
(data.shape),
title='Genotype',
filters=tables.Filters(complevel=9, complib='zlib'))
genotype[:] = data
h5_gen_file.close()
os.remove(data_path)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Script to convert VCF data')
parser.add_argument("-study_name", required=True, type=str, help="Study specific name")
parser.add_argument("-id", type=str, help="subject id")
parser.add_argument("-data",required=True, type=str, help="path to file")
parser.add_argument("-out",required=True, type=str, help="path to results save folder")
parser.add_argument("-flag",required=True,type=str,choices=['individuals','probes','chunk'], help="path to file with SNPs info")
args = parser.parse_args()
print args
try:
print ('Creating directories...')
os.mkdir(os.path.join(args.out,'genotype') )
os.mkdir(os.path.join(args.out,'individuals') )
os.mkdir(os.path.join(args.out,'probes') )
os.mkdir(os.path.join(args.out,'tmp_files'))
except:
print('Directories "genotype","probes","individuals" are already exist in {}...'.format(args.out))
if args.flag=='probes':
probes_VCF2hdf5(args.data, args.out, args.study_name)
elif args.flag=='individuals':
ind_VCF2hdf5(args.data, args.out,args.study_name)
elif args.flag=='chunk':
genotype_VCF2hdf5(args.data,args.id, args.out,args.study_name)
| gpl-3.0 |
gnemoug/scrapy | scrapy/tests/test_spidermiddleware_offsite.py | 25 | 1694 | from unittest import TestCase
from scrapy.http import Response, Request
from scrapy.spider import BaseSpider
from scrapy.contrib.spidermiddleware.offsite import OffsiteMiddleware
class TestOffsiteMiddleware(TestCase):
def setUp(self):
self.spider = self._get_spider()
self.mw = OffsiteMiddleware()
self.mw.spider_opened(self.spider)
def _get_spider(self):
return BaseSpider('foo', allowed_domains=['scrapytest.org', 'scrapy.org'])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
onsite_reqs = [Request('http://scrapytest.org/1'),
Request('http://scrapy.org/1'),
Request('http://sub.scrapy.org/1'),
Request('http://offsite.tld/letmepass', dont_filter=True)]
offsite_reqs = [Request('http://scrapy2.org'),
Request('http://offsite.tld/')]
reqs = onsite_reqs + offsite_reqs
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, onsite_reqs)
def tearDown(self):
self.mw.spider_closed(self.spider)
class TestOffsiteMiddleware2(TestOffsiteMiddleware):
def _get_spider(self):
return BaseSpider('foo', allowed_domains=None)
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://a.com/b.html'), Request('http://b.com/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, reqs)
class TestOffsiteMiddleware3(TestOffsiteMiddleware2):
def _get_spider(self):
return BaseSpider('foo')
| bsd-3-clause |
follow99/django | tests/transactions/tests.py | 88 | 19722 | from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with six.assertRaisesRegex(self, OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with six.assertRaisesRegex(self, Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
| bsd-3-clause |
Designist/audacity | lib-src/lv2/lv2/waflib/Scripting.py | 177 | 10667 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.remove(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.remove(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('Could not remove %r'%proj['out_dir'])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
p=os.path.join(k,Options.lockfile)
try:
os.remove(p)
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('Could not remove %r'%p)
if not Options.commands:
for x in'.waf-1. waf-1. .waf3-1. waf3-1.'.split():
if f.startswith(x):
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.7.* **/.waf-1.7* **/waf3-1.7.* **/.waf3-1.7* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.executable,sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=Utils.h_list((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| gpl-2.0 |
FurCode/SubChannel | botcore/reloader.py | 33 | 2824 | import asyncio
import os.path
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class PluginReloader(object):
def __init__(self, bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
self.observer = Observer()
self.bot = bot
self.reloading = set()
self.event_handler = PluginEventHandler(self, patterns=["*.py"])
def start(self, module_path):
"""Starts the plugin reloader
:type module_path: str
"""
self.observer.schedule(self.event_handler, module_path, recursive=False)
self.observer.start()
def stop(self):
"""Stops the plugin reloader"""
self.observer.stop()
def reload(self, path):
"""
Loads or reloads a module, given its file path. Thread safe.
:type path: str
"""
if not os.path.isfile(path):
# we check if the file still exists here because some programs modify a file before deleting
return
if isinstance(path, bytes):
path = path.decode()
self.bot.loop.call_soon_threadsafe(lambda: asyncio.async(self._reload(path), loop=self.bot.loop))
def unload(self, path):
"""
Unloads a module, given its file path. Thread safe.
:type path: str
"""
if isinstance(path, bytes):
path = path.decode()
self.bot.loop.call_soon_threadsafe(lambda: asyncio.async(self._unload(path), loop=self.bot.loop))
@asyncio.coroutine
def _reload(self, path):
if path in self.reloading:
# we already have a coroutine reloading
return
self.reloading.add(path)
# we don't want to reload more than once every 200 milliseconds, so wait that long to make sure there
# are no other file changes in that time.
yield from asyncio.sleep(0.2)
self.reloading.remove(path)
yield from self.bot.plugin_manager.load_plugin(path)
@asyncio.coroutine
def _unload(self, path):
yield from self.bot.plugin_manager.unload_plugin(path)
class PluginEventHandler(PatternMatchingEventHandler):
def __init__(self, loader, *args, **kwargs):
"""
:type loader: PluginReloader
"""
super().__init__(*args, **kwargs)
self.loader = loader
def on_created(self, event):
self.loader.reload(event.src_path)
def on_deleted(self, event):
self.loader.unload(event.src_path)
def on_modified(self, event):
self.loader.reload(event.src_path)
def on_moved(self, event):
# only load if it's moved to a .py file
if event.dest_path.endswith(".py" if isinstance(event.dest_path, str) else b".py"):
self.loader.reload(event.dest_path)
| gpl-2.0 |
maleficarium/youtube-dl | youtube_dl/extractor/closertotruth.py | 83 | 3095 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CloserToTruthIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688',
'info_dict': {
'id': '0_zof1ktre',
'display_id': 'solutions-the-mind-body-problem',
'ext': 'mov',
'title': 'Solutions to the Mind-Body Problem?',
'upload_date': '20140221',
'timestamp': 1392956007,
'uploader_id': 'CTTXML'
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://closertotruth.com/episodes/how-do-brains-work',
'info_dict': {
'id': '0_iuxai6g6',
'display_id': 'how-do-brains-work',
'ext': 'mov',
'title': 'How do Brains Work?',
'upload_date': '20140221',
'timestamp': 1392956024,
'uploader_id': 'CTTXML'
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://closertotruth.com/interviews/1725',
'info_dict': {
'id': '1725',
'title': 'AyaFr-002',
},
'playlist_mincount': 2,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'<script[^>]+src=["\'].*?\b(?:partner_id|p)/(\d+)',
webpage, 'kaltura partner_id')
title = self._search_regex(
r'<title>(.+?)\s*\|\s*.+?</title>', webpage, 'video title')
select = self._search_regex(
r'(?s)<select[^>]+id="select-version"[^>]*>(.+?)</select>',
webpage, 'select version', default=None)
if select:
entry_ids = set()
entries = []
for mobj in re.finditer(
r'<option[^>]+value=(["\'])(?P<id>[0-9a-z_]+)(?:#.+?)?\1[^>]*>(?P<title>[^<]+)',
webpage):
entry_id = mobj.group('id')
if entry_id in entry_ids:
continue
entry_ids.add(entry_id)
entries.append({
'_type': 'url_transparent',
'url': 'kaltura:%s:%s' % (partner_id, entry_id),
'ie_key': 'Kaltura',
'title': mobj.group('title'),
})
if entries:
return self.playlist_result(entries, display_id, title)
entry_id = self._search_regex(
r'<a[^>]+id=(["\'])embed-kaltura\1[^>]+data-kaltura=(["\'])(?P<id>[0-9a-z_]+)\2',
webpage, 'kaltura entry_id', group='id')
return {
'_type': 'url_transparent',
'display_id': display_id,
'url': 'kaltura:%s:%s' % (partner_id, entry_id),
'ie_key': 'Kaltura',
'title': title
}
| unlicense |
neudesk/neucloud | openstack_dashboard/dashboards/admin/flavors/extras/urls.py | 9 | 1194 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.admin.flavors.extras import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<key>[^/]+)/edit/$', views.EditView.as_view(), name='edit')
)
| apache-2.0 |
eeshangarg/oh-mainline | vendor/packages/scrapy/scrapy/utils/reqser.py | 81 | 2081 | """
Helper functions for serializing (and deserializing) requests.
"""
from scrapy.http import Request
def request_to_dict(request, spider=None):
"""Convert Request object to a dict.
If a spider is given, it will try to find out the name of the spider method
used in the callback and store that as the callback.
"""
cb = request.callback
if callable(cb):
cb = _find_method(spider, cb)
eb = request.errback
if callable(eb):
eb = _find_method(spider, eb)
d = {
'url': request.url.decode('ascii'), # urls should be safe (safe_string_url)
'callback': cb,
'errback': eb,
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
}
return d
def request_from_dict(d, spider=None):
"""Create Request object from a dict.
If a spider is given, it will try to resolve the callbacks looking at the
spider for methods with the same name.
"""
cb = d['callback']
if cb and spider:
cb = _get_method(spider, cb)
eb = d['errback']
if eb and spider:
eb = _get_method(spider, eb)
return Request(
url=d['url'].encode('ascii'),
callback=cb,
errback=eb,
method=d['method'],
headers=d['headers'],
body=d['body'],
cookies=d['cookies'],
meta=d['meta'],
encoding=d['_encoding'],
priority=d['priority'],
dont_filter=d['dont_filter'])
def _find_method(obj, func):
if obj and hasattr(func, 'im_self') and func.im_self is obj:
return func.im_func.__name__
else:
raise ValueError("Function %s is not a method of: %s" % (func, obj))
def _get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
raise ValueError("Method %r not found in: %s" % (name, obj))
| agpl-3.0 |
creativecommons/open-ledger | util/scheduled-snapshots/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| mit |
ZhangXinNan/tensorflow | tensorflow/compiler/tests/segment_reduction_ops_test.py | 8 | 7759 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class SegmentReductionOpsTest(xla_test.XLATestCase):
"""Test cases for segment reduction ops."""
def _segmentReduction(self, op, data, indices, num_segments):
with self.test_session() as sess, self.test_scope():
d = array_ops.placeholder(data.dtype, shape=data.shape)
if isinstance(indices, int):
i = array_ops.placeholder(np.int32, shape=[])
else:
i = array_ops.placeholder(indices.dtype, shape=indices.shape)
return sess.run(op(d, i, num_segments), {d: data, i: indices})
def _unsortedSegmentSum(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_sum, data, indices,
num_segments)
def _unsortedSegmentProd(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_prod, data, indices,
num_segments)
def _unsortedSegmentMin(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_min, data, indices,
num_segments)
def _unsortedSegmentMax(self, data, indices, num_segments):
return self._segmentReduction(math_ops.unsorted_segment_max, data, indices,
num_segments)
def testUnsortedSegmentSum0DIndices1DData(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array(
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 1, 2, 3, 4, 5],
[0, 0, 0, 0, 0, 0]],
dtype=dtype),
self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype), 2, 4))
def testUnsortedSegmentSum1DIndices1DData(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([1, 3, 2, 9], dtype=dtype),
self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 0, 2, 1, 3, 3], dtype=np.int32), 4))
def testUnsortedSegmentSum1DIndices1DDataNegativeIndices(self):
for dtype in self.numeric_types:
self.assertAllClose(
np.array([6, 3, 0, 6], dtype=dtype),
self._unsortedSegmentSum(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
def testUnsortedSegmentSum1DIndices2DDataDisjoint(self):
for dtype in self.numeric_types:
data = np.array(
[[0, 1, 2, 3], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43],
[50, 51, 52, 53]],
dtype=dtype)
indices = np.array([8, 1, 0, 3, 7], dtype=np.int32)
num_segments = 10
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[30, 31, 32, 33], [20, 21, 22, 23], [0, 0, 0, 0],
[40, 41, 42, 43], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[50, 51, 52, 53], [0, 1, 2, 3], [0, 0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum1DIndices2DDataNonDisjoint(self):
for dtype in self.numeric_types:
data = np.array(
[[0, 1, 2, 3], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43],
[50, 51, 52, 53]],
dtype=dtype)
indices = np.array([0, 1, 2, 0, 1], dtype=np.int32)
num_segments = 4
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[40, 42, 44, 46], [70, 72, 74, 76], [30, 31, 32, 33],
[0, 0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum2DIndices3DData(self):
for dtype in self.numeric_types:
data = np.array(
[[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]], [[
200, 201, 202
], [210, 211, 212]], [[300, 301, 302], [310, 311, 312]]],
dtype=dtype)
indices = np.array([[3, 5], [3, 1], [5, 0], [6, 2]], dtype=np.int32)
num_segments = 8
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[210, 211, 212], [110, 111, 112], [310, 311, 312], [
100, 102, 104
], [0, 0, 0.], [210, 212, 214], [300, 301, 302], [0, 0, 0]],
dtype=dtype), y)
def testUnsortedSegmentSum1DIndices3DData(self):
for dtype in self.numeric_types:
data = np.array(
[[[0, 1, 2], [10, 11, 12]], [[100, 101, 102], [110, 111, 112]], [[
200, 201, 202
], [210, 211, 212]], [[300, 301, 302], [310, 311, 312]]],
dtype=dtype)
indices = np.array([3, 0, 2, 5], dtype=np.int32)
num_segments = 6
y = self._unsortedSegmentSum(data, indices, num_segments)
self.assertAllClose(
np.array(
[[[100, 101, 102.], [110, 111, 112]], [[0, 0, 0], [0, 0, 0]],
[[200, 201, 202], [210, 211, 212]], [[0, 1, 2.], [10, 11, 12]],
[[0, 0, 0], [0, 0, 0]], [[300, 301, 302], [310, 311, 312]]],
dtype=dtype), y)
def testUnsortedSegmentSumShapeError(self):
for dtype in self.numeric_types:
data = np.ones((4, 8, 7), dtype=dtype)
indices = np.ones((3, 2), dtype=np.int32)
num_segments = 4
self.assertRaises(
ValueError,
functools.partial(self._segmentReduction,
math_ops.unsorted_segment_sum, data, indices,
num_segments))
def testUnsortedSegmentOps1DIndices1DDataNegativeIndices(self):
"""Tests for min, max, and prod ops.
These share most of their implementation with sum, so we only test basic
functionality.
"""
for dtype in self.numeric_types:
self.assertAllClose(
np.array([8, 3, 1, 0], dtype=dtype),
self._unsortedSegmentProd(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
for dtype in self.int_types | self.float_types:
minval = dtypes.as_dtype(dtype).min
maxval = dtypes.as_dtype(dtype).max
self.assertAllClose(
np.array([2, 3, maxval, 0], dtype=dtype),
self._unsortedSegmentMin(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
self.assertAllClose(
np.array([4, 3, minval, 6], dtype=dtype),
self._unsortedSegmentMax(
np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
georgestarcher/TA-SyncKVStore | bin/ta_synckvstore/solnlib/user_access.py | 6 | 30144 | # Copyright 2016 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Splunk user access control related utilities.
'''
import json
import re
from . import splunk_rest_client as rest_client
from .packages.splunklib import binding
from .utils import retry
__all__ = ['ObjectACLException',
'ObjectACL',
'ObjectACLManagerException',
'ObjectACLManager',
'AppCapabilityManagerException',
'AppCapabilityManager',
'UserAccessException',
'check_user_access',
'InvalidSessionKeyException',
'get_current_username',
'UserNotExistException',
'get_user_capabilities',
'user_is_capable',
'get_user_roles']
class ObjectACLException(Exception):
pass
class ObjectACL(object):
'''Object ACL record.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:param obj_app: App of this object.
:param obj_type: ``string``
:param obj_owner: Owner of this object.
:param obj_owner: ``string``
:param obj_perms: Object perms, like: {
'read': ['*'],
'write': ['admin'],
'delete': ['admin']}.
:type obj_perms: ``dict``
:param obj_shared_by_inclusion: Flag of object is shared by inclusion.
:type obj_shared_by_inclusion: ``bool``
Usage::
>>> from solnlib import user_access
>>> obj_acl = user_access.ObjectACL(
>>> 'test_collection',
>>> '9defa6f510d711e6be16a45e60e34295',
>>> 'test_object',
>>> 'Splunk_TA_test',
>>> 'admin',
>>> {'read': ['*'], 'write': ['admin'], 'delete': ['admin']},
>>> False)
'''
OBJ_COLLECTION_KEY = 'obj_collection'
OBJ_ID_KEY = 'obj_id'
OBJ_TYPE_KEY = 'obj_type'
OBJ_APP_KEY = 'obj_app'
OBJ_OWNER_KEY = 'obj_owner'
OBJ_PERMS_KEY = 'obj_perms'
OBJ_PERMS_READ_KEY = 'read'
OBJ_PERMS_WRITE_KEY = 'write'
OBJ_PERMS_DELETE_KEY = 'delete'
OBJ_PERMS_ALLOW_ALL = '*'
OBJ_SHARED_BY_INCLUSION_KEY = 'obj_shared_by_inclusion'
def __init__(self, obj_collection, obj_id, obj_type,
obj_app, obj_owner, obj_perms, obj_shared_by_inclusion):
self.obj_collection = obj_collection
self.obj_id = obj_id
self.obj_type = obj_type
self.obj_app = obj_app
self.obj_owner = obj_owner
self._check_perms(obj_perms)
self._obj_perms = obj_perms
self.obj_shared_by_inclusion = obj_shared_by_inclusion
@classmethod
def _check_perms(cls, obj_perms):
if not isinstance(obj_perms, dict):
raise ObjectACLException(
'Invalid object acl perms type: %s, should be a dict.' %
type(obj_perms))
if not (cls.OBJ_PERMS_READ_KEY in obj_perms and
cls.OBJ_PERMS_WRITE_KEY in obj_perms and
cls.OBJ_PERMS_DELETE_KEY in obj_perms):
raise ObjectACLException(
'Invalid object acl perms: %s, '
'should include read, write and delete perms.' % obj_perms)
@property
def obj_perms(self):
return self._obj_perms
@obj_perms.setter
def obj_perms(self, obj_perms):
self._check_perms(obj_perms)
self._obj_perms = obj_perms
@property
def record(self):
'''Get object acl record.
:returns: Object acl record, like: {
'_key': 'test_collection-1234',
'obj_collection': 'test_collection',
'obj_id': '1234',
'obj_type': 'test_object',
'obj_app': 'Splunk_TA_test',
'obj_owner': 'admin',
'obj_perms': {'read': ['*'], 'write': ['admin'], 'delete': ['admin']},
'obj_shared_by_inclusion': True}
:rtype: ``dict``
'''
return {
'_key': self.generate_key(self.obj_collection, self.obj_id),
self.OBJ_COLLECTION_KEY: self.obj_collection,
self.OBJ_ID_KEY: self.obj_id,
self.OBJ_TYPE_KEY: self.obj_type,
self.OBJ_APP_KEY: self.obj_app,
self.OBJ_OWNER_KEY: self.obj_owner,
self.OBJ_PERMS_KEY: self._obj_perms,
self.OBJ_SHARED_BY_INCLUSION_KEY: self.obj_shared_by_inclusion}
@staticmethod
def generate_key(obj_collection, obj_id):
'''Generate object acl record key.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:returns: Object acl record key.
:rtype: ``string``
'''
return '{obj_collection}_{obj_id}'.format(
obj_collection=obj_collection, obj_id=obj_id)
@staticmethod
def parse(obj_acl_record):
'''Parse object acl record and construct a new `ObjectACL` object from it.
:param obj_acl_record: Object acl record.
:type obj_acl: ``dict``
:returns: New `ObjectACL` object.
:rtype: `ObjectACL`
'''
return ObjectACL(
obj_acl_record[ObjectACL.OBJ_COLLECTION_KEY],
obj_acl_record[ObjectACL.OBJ_ID_KEY],
obj_acl_record[ObjectACL.OBJ_TYPE_KEY],
obj_acl_record[ObjectACL.OBJ_APP_KEY],
obj_acl_record[ObjectACL.OBJ_OWNER_KEY],
obj_acl_record[ObjectACL.OBJ_PERMS_KEY],
obj_acl_record[ObjectACL.OBJ_SHARED_BY_INCLUSION_KEY])
def merge(self, obj_acl):
'''Merge current object perms with perms of `obj_acl`.
:param obj_acl: Object acl to merge.
:type obj_acl: ``ObjectACL``
'''
for perm_key in self._obj_perms:
self._obj_perms[perm_key] = list(
set.union(
set(self._obj_perms[perm_key]),
set(obj_acl._obj_perms[perm_key])))
if self.OBJ_PERMS_ALLOW_ALL in self._obj_perms[perm_key]:
self._obj_perms[perm_key] = [self.OBJ_PERMS_ALLOW_ALL]
def __str__(self):
return json.dumps(self.record)
@retry(exceptions=[binding.HTTPError])
def _get_collection_data(collection_name, session_key, app, owner,
scheme, host, port, **context):
kvstore = rest_client.SplunkRestClient(session_key,
app,
owner=owner,
scheme=scheme,
host=host,
port=port,
**context).kvstore
collection_name = re.sub(r'[^\w]+', '_', collection_name)
try:
kvstore.get(name=collection_name)
except binding.HTTPError as e:
if e.status != 404:
raise
kvstore.create(collection_name)
collections = kvstore.list(search=collection_name)
for collection in collections:
if collection.name == collection_name:
return collection.data
else:
raise KeyError('Get collection data: %s failed.' % collection_name)
class ObjectACLManagerException(Exception):
pass
class ObjectACLNotExistException(Exception):
pass
class ObjectACLManager(object):
'''Object ACL manager.
:param collection_name: Collection name to store object ACL info.
:type collection_name: ``string``
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:raises ObjectACLManagerException: If init ObjectACLManager failed.
Usage::
>>> from solnlib import user_access
>>> oaclm = user_access.ObjectACLManager(session_key,
'Splunk_TA_test')
'''
def __init__(self, collection_name, session_key, app, owner='nobody',
scheme=None, host=None, port=None, **context):
collection_name = '{app}_{collection_name}'.format(
app=app, collection_name=collection_name)
try:
self._collection_data = _get_collection_data(
collection_name, session_key, app, owner,
scheme, host, port, **context)
except KeyError:
raise ObjectACLManagerException(
'Get object acl collection: %s fail.' % collection_name)
@retry(exceptions=[binding.HTTPError])
def update_acl(self, obj_collection, obj_id, obj_type, obj_app, obj_owner,
obj_perms, obj_shared_by_inclusion=True, replace_existing=True):
'''Update acl info of object.
Construct a new object acl info first, if `replace_existing` is True
then replace existing acl info else merge new object acl info with the
old one and replace the old acl info with merged acl info.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:param obj_app: App of this object.
:param obj_type: ``string``
:param obj_owner: Owner of this object.
:param obj_owner: ``string``
:param obj_perms: Object perms, like: {
'read': ['*'],
'write': ['admin'],
'delete': ['admin']}.
:type obj_perms: ``dict``
:param obj_shared_by_inclusion: (optional) Flag of object is shared by
inclusion, default is True.
:type obj_shared_by_inclusion: ``bool``
:param replace_existing: (optional) Replace existing acl info flag, True
indicates replace old acl info with new one else merge with old acl
info, default is True.
:type replace_existing: ``bool``
'''
obj_acl = ObjectACL(
obj_collection, obj_id, obj_type,
obj_app, obj_owner, obj_perms, obj_shared_by_inclusion)
if not replace_existing:
try:
old_obj_acl = self.get_acl(obj_collection, obj_id)
except ObjectACLNotExistException:
old_obj_acl = None
if old_obj_acl:
obj_acl.merge(old_obj_acl)
self._collection_data.batch_save(obj_acl.record)
@retry(exceptions=[binding.HTTPError])
def update_acls(self, obj_collection, obj_ids, obj_type, obj_app, obj_owner,
obj_perms, obj_shared_by_inclusion=True, replace_existing=True):
'''Batch update object acl info to all provided `obj_ids`.
:param obj_collection: Collection where objects currently stored.
:type obj_collection: ``string``
:param obj_id: IDs list of objects.
:type obj_id: ``list``
:param obj_app: App of this object.
:param obj_type: ``string``
:param obj_owner: Owner of this object.
:param obj_owner: ``string``
:param obj_perms: Object perms, like: {
'read': ['*'],
'write': ['admin'],
'delete': ['admin']}.
:type obj_perms: ``dict``
:param obj_shared_by_inclusion: (optional) Flag of object is shared by
inclusion, default is True.
:type obj_shared_by_inclusion: ``bool``
:param replace_existing: (optional) Replace existing acl info flag, True
indicates replace old acl info with new one else merge with old acl
info, default is True.
:type replace_existing: ``bool``
'''
obj_acl_records = []
for obj_id in obj_ids:
obj_acl = ObjectACL(
obj_collection, obj_id, obj_type,
obj_app, obj_owner, obj_perms, obj_shared_by_inclusion)
if not replace_existing:
try:
old_obj_acl = self.get_acl(obj_collection, obj_id)
except ObjectACLNotExistException:
old_obj_acl = None
if old_obj_acl:
obj_acl.merge(old_obj_acl)
obj_acl_records.append(obj_acl.record)
self._collection_data.batch_save(*obj_acl_records)
@retry(exceptions=[binding.HTTPError])
def get_acl(self, obj_collection, obj_id):
'''Get acl info.
Query object acl info with parameter of the combination of
`obj_collection` and `obj_id` from `self.collection_name` and
return it.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:returns: Object acl info if success else None.
:rtype: ``ObjectACL``
:raises ObjectACLNotExistException: If object ACL info does not exist.
'''
key = ObjectACL.generate_key(obj_collection, obj_id)
try:
obj_acl = self._collection_data.query_by_id(key)
except binding.HTTPError as e:
if e.status != 404:
raise
raise ObjectACLNotExistException(
'Object ACL info of %s_%s does not exist.' %
(obj_collection, obj_id))
return ObjectACL.parse(obj_acl)
@retry(exceptions=[binding.HTTPError])
def get_acls(self, obj_collection, obj_ids):
'''Batch get acl info.
Query objects acl info with parameter of the combination of
`obj_collection` and `obj_ids` from KVStore and return them.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_ids: IDs of objects.
:type obj_ids: ``list``
:returns: List of `ObjectACL` instances.
:rtype: ``list``
'''
query = json.dumps(
{'$or': [{'_key': ObjectACL.generate_key(obj_collection, obj_id)}
for obj_id in obj_ids]})
obj_acls = self._collection_data.query(query=query)
return [ObjectACL.parse(obj_acl) for obj_acl in obj_acls]
@retry(exceptions=[binding.HTTPError])
def delete_acl(self, obj_collection, obj_id):
'''Delete acl info.
Query object acl info with parameter of the combination of
`obj_collection` and `obj_ids` from KVStore and delete it.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:raises ObjectACLNotExistException: If object ACL info does not exist.
'''
key = ObjectACL.generate_key(obj_collection, obj_id)
try:
self._collection_data.delete_by_id(key)
except binding.HTTPError as e:
if e.status != 404:
raise
raise ObjectACLNotExistException(
'Object ACL info of %s_%s does not exist.' %
(obj_collection, obj_id))
@retry(exceptions=[binding.HTTPError])
def delete_acls(self, obj_collection, obj_ids):
'''Batch delete acl info.
Query objects acl info with parameter of the combination of
`obj_collection` and `obj_ids` from KVStore and delete them.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_ids: IDs of objects.
:type obj_id: ``list``
'''
query = json.dumps(
{'$or': [{'_key': ObjectACL.generate_key(obj_collection, obj_id)}
for obj_id in obj_ids]})
self._collection_data.delete(query=query)
@retry(exceptions=[binding.HTTPError])
def get_accessible_object_ids(self, user, operation, obj_collection, obj_ids):
'''Get accessible IDs of objects from `obj_acls`.
:param user: User name of current `operation`.
:type user: ``string``
:param operation: User operation, possible option: (read/write/delete).
:type operation: ``string``
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_ids: IDs of objects.
:type obj_id: ``list``
:returns: List of IDs of accessible objects.
:rtype: ``list``
'''
obj_acls = self.get_acls(obj_collection, obj_ids)
accessible_obj_ids = []
for obj_acl in obj_acls:
perms = obj_acl.obj_perms[operation]
if ObjectACL.OBJ_PERMS_ALLOW_ALL in perms or user in perms:
accessible_obj_ids.append(obj_acl.obj_id)
return accessible_obj_ids
class AppCapabilityManagerException(Exception):
pass
class AppCapabilityNotExistException(Exception):
pass
class AppCapabilityManager(object):
'''App capability manager.
:param collection_name: Collection name to store capabilities.
:type collection_name: ``string``
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:raises AppCapabilityManagerException: If init AppCapabilityManager failed.
Usage::
>>> from solnlib import user_access
>>> acm = user_access.AppCapabilityManager('test_collection',
session_key,
'Splunk_TA_test')
>>> acm.register_capabilities(...)
>>> acm.unregister_capabilities(...)
'''
def __init__(self, collection_name, session_key, app, owner='nobody',
scheme=None, host=None, port=None, **context):
self._app = app
collection_name = '{app}_{collection_name}'.format(
app=app, collection_name=collection_name)
try:
self._collection_data = _get_collection_data(
collection_name, session_key, app, owner,
scheme, host, port, **context)
except KeyError:
raise AppCapabilityManagerException(
'Get app capabilities collection: %s failed.' %
collection_name)
@retry(exceptions=[binding.HTTPError])
def register_capabilities(self, capabilities):
'''Register app capabilities.
:param capabilities: App capabilities, example: {
'object_type1': {
'read': 'read_app_object_type1',
'write': 'write_app_object_type1',
'delete': 'delete_app_object_type1'},
'object_type2': {
'read': 'read_app_object_type2',
'write': 'write_app_object_type2',
'delete': 'delete_app_object_type2'},
...}
:type capabilities: ``dict``
'''
record = {'_key': self._app, 'capabilities': capabilities}
self._collection_data.batch_save(record)
@retry(exceptions=[binding.HTTPError])
def unregister_capabilities(self):
'''Unregister app capabilities.
:raises AppCapabilityNotExistException: If app capabilities are
not registered.
'''
try:
self._collection_data.delete_by_id(self._app)
except binding.HTTPError as e:
if e.status != 404:
raise
raise AppCapabilityNotExistException(
'App capabilities for %s have not been registered.' % self._app)
@retry(exceptions=[binding.HTTPError])
def capabilities_are_registered(self):
'''Check if app capabilities are registered.
:returns: True if app capabilities are registered else
False.
:rtype: ``bool``
'''
try:
self._collection_data.query_by_id(self._app)
except binding.HTTPError as e:
if e.status != 404:
raise
return False
return True
@retry(exceptions=[binding.HTTPError])
def get_capabilities(self):
'''Get app capabilities.
:returns: App capabilities.
:rtype: ``dict``
:raises AppCapabilityNotExistException: If app capabilities are
not registered.
'''
try:
record = self._collection_data.query_by_id(self._app)
except binding.HTTPError as e:
if e.status != 404:
raise
raise AppCapabilityNotExistException(
'App capabilities for %s have not been registered.' % self._app)
return record['capabilities']
class UserAccessException(Exception):
pass
def check_user_access(session_key, capabilities, obj_type, operation,
scheme=None, host=None, port=None, **context):
'''User access checker.
It will fetch user capabilities from given `session_key` and check if
the capability extracted from `capabilities`, `obj_type` and `operation`
is contained, if user capabilities include the extracted capability user
access is ok else fail.
:param session_key: Splunk access token.
:type session_key: ``string``
:param capabilities: App capabilities, example: {
'object_type1': {
'read': 'read_app_object_type1',
'write': 'write_app_object_type1',
'delete': 'delete_app_object_type1'},
'object_type2': {
'read': 'read_app_object_type2',
'write': 'write_app_object_type2',
'delete': 'delete_app_object_type2'},
...}
:type capabilities: ``dict``
:param obj_type: Object type.
:type obj_type: ``string``
:param operation: User operation, possible option: (read/write/delete).
:type operation: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:raises UserAccessException: If user access permission is denied.
Usage::
>>> from solnlib.user_access import check_user_access
>>> def fun():
>>> check_user_access(
>>> session_key, capabilities, 'test_object', 'read')
>>> ...
'''
username = get_current_username(
session_key, scheme=scheme, host=host, port=port, **context)
capability = capabilities[obj_type][operation]
if not user_is_capable(session_key, username, capability,
scheme=scheme, host=host, port=port, **context):
raise UserAccessException(
'Permission denied, %s does not have the capability: %s.' %
(username, capability))
class InvalidSessionKeyException(Exception):
pass
@retry(exceptions=[binding.HTTPError])
def get_current_username(session_key,
scheme=None, host=None, port=None, **context):
'''Get current user name from `session_key`.
:param session_key: Splunk access token.
:type session_key: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: Current user name.
:rtype: ``string``
:raises InvalidSessionKeyException: If `session_key` is invalid.
Usage::
>>> from solnlib import user_access
>>> user_name = user_access.get_current_username(session_key)
'''
_rest_client = rest_client.SplunkRestClient(
session_key,
'-',
scheme=scheme,
host=host,
port=port,
**context)
try:
response = _rest_client.get('/services/authentication/current-context',
output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 401:
raise
raise InvalidSessionKeyException('Invalid session key.')
return json.loads(response)['entry'][0]['content']['username']
class UserNotExistException(Exception):
pass
@retry(exceptions=[binding.HTTPError])
def get_user_capabilities(session_key, username,
scheme=None, host=None, port=None, **context):
'''Get user capabilities.
:param session_key: Splunk access token.
:type session_key: ``string``
:param username: User name of capabilities to get.
:type username: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: User capabilities.
:rtype: ``list``
:raises UserNotExistException: If `username` does not exist.
Usage::
>>> from solnlib import user_access
>>> user_capabilities = user_access.get_user_capabilities(
>>> session_key, 'test_user')
'''
_rest_client = rest_client.SplunkRestClient(
session_key,
'-',
scheme=scheme,
host=host,
port=port,
**context)
url = '/services/authentication/users/{username}'.format(username=username)
try:
response = _rest_client.get(url, output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 404:
raise
raise UserNotExistException('User: %s does not exist.' % username)
return json.loads(response)['entry'][0]['content']['capabilities']
def user_is_capable(session_key, username, capability,
scheme=None, host=None, port=None, **context):
'''Check if user is capable for given `capability`.
:param session_key: Splunk access token.
:type session_key: ``string``
:param username: (optional) User name of roles to get.
:type username: ``string``
:param capability: The capability we wish to check for.
:type capability: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: True if user is capable else False.
:rtype: ``bool``
:raises UserNotExistException: If `username` does not exist.
Usage::
>>> from solnlib import user_access
>>> is_capable = user_access.user_is_capable(
>>> session_key, 'test_user', 'object_read_capability')
'''
capabilities = get_user_capabilities(
session_key, username, scheme=scheme, host=host, port=port, **context)
return capability in capabilities
@retry(exceptions=[binding.HTTPError])
def get_user_roles(session_key, username,
scheme=None, host=None, port=None, **context):
'''Get user roles.
:param session_key: Splunk access token.
:type session_key: ``string``
:param username: (optional) User name of roles to get.
:type username: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: User roles.
:rtype: ``list``
:raises UserNotExistException: If `username` does not exist.
Usage::
>>> from solnlib import user_access
>>> user_roles = user_access.get_user_roles(session_key, 'test_user')
'''
_rest_client = rest_client.SplunkRestClient(
session_key,
'-',
scheme=scheme,
host=host,
port=port,
**context)
url = '/services/authentication/users/{username}'.format(username=username)
try:
response = _rest_client.get(url, output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 404:
raise
raise UserNotExistException('User: %s does not exist.' % username)
return json.loads(response)['entry'][0]['content']['roles']
| mit |
maurizi/otm-core | opentreemap/otm1_migrator/migration_rules/philadelphia.py | 12 | 3768 | from otm1_migrator.migration_rules.standard_otm1 import MIGRATION_RULES
from treemap.models import ITreeCodeOverride, ITreeRegion, User
UDFS = {
'plot': {
'owner_additional_id': {
'udf.name': 'Owner Additional Id'
},
'owner_additional_properties': {
'udf.name': 'Owner Additional Properties'
},
'type': {
'udf.name': 'Plot Type',
'udf.choices': ['Well/Pit', 'Median/Island', 'Tree Lawn',
'Park', 'Planter', 'Other', 'Yard',
'Natural Area']
},
'powerline_conflict_potential': {
'udf.name': 'Powerlines Overhead',
'udf.choices': ['Yes', 'No', 'Unknown']
},
'sidewalk_damage': {
'udf.name': 'Sidewalk Damage',
'udf.choices': ['Minor or No Damage', 'Raised More Than 3/4 Inch']
}
},
'tree': {
'sponsor': {'udf.name': 'Sponsor'},
'projects': {'udf.name': 'Projects'},
'canopy_condition': {
'udf.name': 'Canopy Condition',
'udf.choices': ['Full - No Gaps',
'Small Gaps (up to 25% missing)',
'Moderate Gaps (up to 50% missing)',
'Large Gaps (up to 75% missing)',
'Little or None (up to 100% missing)']
},
'condition': {
'udf.name': 'Tree Condition',
'udf.choices': ['Dead', 'Critical', 'Poor',
'Fair', 'Good',
'Very Good', 'Excellent']
}
}
}
SORT_ORDER_INDEX = {
'Bucks': 3,
'Burlington': 4,
'Camden': 5,
'Chester': 6,
'Delaware': 7,
'Gloucester': 8,
'Kent': 9,
'Mercer': 10,
'Montgomery': 11,
'New Castle': 12,
'Salem': 13,
'Sussex': 14,
}
def create_override(species_obj, species_dict):
for region in ['NoEastXXX', 'PiedmtCLT']:
override = ITreeCodeOverride(
instance_species_id=species_obj.pk,
region=ITreeRegion.objects.get(code=region),
itree_code=species_dict['fields']['itree_code'])
override.save_with_user(User.system_user())
return species_obj
MIGRATION_RULES['species']['postsave_actions'] = (MIGRATION_RULES['species']
.get('postsave_actions', [])
+ [create_override])
def mutate_boundary(boundary_obj, boundary_dict):
otm1_fields = boundary_dict.get('fields')
if ((boundary_obj.name.find('County') != -1
or boundary_obj.name == 'Philadelphia')):
boundary_obj.category = 'County'
boundary_obj.sort_order = 1
elif otm1_fields['county'] == 'Philadelphia':
boundary_obj.category = 'Philadelphia Neighborhood'
boundary_obj.sort_order = 2
else:
county = otm1_fields['county']
boundary_obj.category = county + ' Township'
boundary_obj.sort_order = SORT_ORDER_INDEX[county]
return boundary_obj
MIGRATION_RULES['boundary']['presave_actions'] = (MIGRATION_RULES['boundary']
.get('presave_actions', [])
+ [mutate_boundary])
MIGRATION_RULES['species']['missing_fields'] |= {'other'}
# these fields don't exist in the ptm fixture, so can't be specified
# as a value that gets discarded. Remove them.
MIGRATION_RULES['species']['removed_fields'] -= {'family'}
MIGRATION_RULES['tree']['removed_fields'] -= {'pests', 'url'}
# this field doesn't exist, so can no longer have a to -> from def
del MIGRATION_RULES['species']['renamed_fields']['other_part_of_name']
| agpl-3.0 |
rjeli/scikit-image | skimage/draw/tests/test_draw3d.py | 37 | 4429 | import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from nose.tools import raises
from skimage.draw import ellipsoid, ellipsoid_stats
@raises(ValueError)
def test_ellipsoid_sign_parameters1():
ellipsoid(-1, 2, 2)
@raises(ValueError)
def test_ellipsoid_sign_parameters2():
ellipsoid(0, 2, 2)
@raises(ValueError)
def test_ellipsoid_sign_parameters3():
ellipsoid(-3, -2, 2)
def test_ellipsoid_bool():
test = ellipsoid(2, 2, 2)[1:-1, 1:-1, 1:-1]
test_anisotropic = ellipsoid(2, 2, 4, spacing=(1., 1., 2.))
test_anisotropic = test_anisotropic[1:-1, 1:-1, 1:-1]
expected = np.array([[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]])
assert_array_equal(test, expected.astype(bool))
assert_array_equal(test_anisotropic, expected.astype(bool))
def test_ellipsoid_levelset():
test = ellipsoid(2, 2, 2, levelset=True)[1:-1, 1:-1, 1:-1]
test_anisotropic = ellipsoid(2, 2, 4, spacing=(1., 1., 2.),
levelset=True)
test_anisotropic = test_anisotropic[1:-1, 1:-1, 1:-1]
expected = np.array([[[ 2. , 1.25, 1. , 1.25, 2. ],
[ 1.25, 0.5 , 0.25, 0.5 , 1.25],
[ 1. , 0.25, 0. , 0.25, 1. ],
[ 1.25, 0.5 , 0.25, 0.5 , 1.25],
[ 2. , 1.25, 1. , 1.25, 2. ]],
[[ 1.25, 0.5 , 0.25, 0.5 , 1.25],
[ 0.5 , -0.25, -0.5 , -0.25, 0.5 ],
[ 0.25, -0.5 , -0.75, -0.5 , 0.25],
[ 0.5 , -0.25, -0.5 , -0.25, 0.5 ],
[ 1.25, 0.5 , 0.25, 0.5 , 1.25]],
[[ 1. , 0.25, 0. , 0.25, 1. ],
[ 0.25, -0.5 , -0.75, -0.5 , 0.25],
[ 0. , -0.75, -1. , -0.75, 0. ],
[ 0.25, -0.5 , -0.75, -0.5 , 0.25],
[ 1. , 0.25, 0. , 0.25, 1. ]],
[[ 1.25, 0.5 , 0.25, 0.5 , 1.25],
[ 0.5 , -0.25, -0.5 , -0.25, 0.5 ],
[ 0.25, -0.5 , -0.75, -0.5 , 0.25],
[ 0.5 , -0.25, -0.5 , -0.25, 0.5 ],
[ 1.25, 0.5 , 0.25, 0.5 , 1.25]],
[[ 2. , 1.25, 1. , 1.25, 2. ],
[ 1.25, 0.5 , 0.25, 0.5 , 1.25],
[ 1. , 0.25, 0. , 0.25, 1. ],
[ 1.25, 0.5 , 0.25, 0.5 , 1.25],
[ 2. , 1.25, 1. , 1.25, 2. ]]])
assert_allclose(test, expected)
assert_allclose(test_anisotropic, expected)
def test_ellipsoid_stats():
# Test comparison values generated by Wolfram Alpha
vol, surf = ellipsoid_stats(6, 10, 16)
assert_allclose(1280 * np.pi, vol, atol=1e-4)
assert_allclose(1383.28, surf, atol=1e-2)
# Test when a <= b <= c does not hold
vol, surf = ellipsoid_stats(16, 6, 10)
assert_allclose(1280 * np.pi, vol, atol=1e-4)
assert_allclose(1383.28, surf, atol=1e-2)
# Larger test to ensure reliability over broad range
vol, surf = ellipsoid_stats(17, 27, 169)
assert_allclose(103428 * np.pi, vol, atol=1e-4)
assert_allclose(37426.3, surf, atol=1e-1)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause |
joanayma/pyautorunalize | pyautorunanalize.py | 1 | 5119 | #! /bin/env python
"""
PyAutorunalizer 0.1
Python script for autorunalize: http://sysinternals.com/autoruns.com listing autoruns Windows
items. Version 11.6 or greater needed.
http://Virutotal.com externa database of viruses.
original idea: http://trustedsignal.blogspot.com.es/2012/02/finding-evil-automating-autoruns.html
original implementation uses cygwin32, bash and other blobs.
Virustotal API refer: https://github.com/botherder/virustotal/
Autoruns is part of Sysinternals' suit and owns the copyright. Windows are trademark of Microsoft.
Licence: GPLv2
#Use this script at your own.
This script is not inteded as a substitute for any antivirus. Is just a sanity check.
Individuals htat noncomplain the Virustotal or sysinternals terms or harms the antivirus
industry, are out of my resposability.
"""
import xml.etree.ElementTree as ElementTree
import json
import urllib,urllib.request
import sys,os,getopt,subprocess
fnull = open(os.devnull, "w")
def runanalizer(API_KEY):
#Check for autorunsc.exe
try:
with open('./autorunsc.exe'): pass
except IOError:
print('autorunsc.exe binary not found! Download from https://live.sysinternals.com/autorunsc.exe')
sys.exit(3)
try:
if os.environ['HTTP_PROXY'] != None:
proxies = {'https': 'http://{0}'.format(os.environ['HTTP_PROXY'])}
urllib.request.ProxyHandler(proxies)
print("[Info] Going through proxies: ",proxies)
except KeyError:
#not defined
pass
print('[Info] Getting list of files to analise from Autoruns ...')
autoruns_proc = subprocess.Popen(['autorunsc.exe', "/accepteula", '-xaf'], stdout=subprocess.PIPE, stderr = fnull)
autoruns_xml = (autoruns_proc.communicate()[0].decode("utf_16"))
autoruns_xml.replace('\r\n','\n')
#parse XML output
#items =[[]]
try:
autoruns_tree = ElementTree.fromstring(autoruns_xml)
except xml.etree.ElementTree.ParseError as e:
print('[Error] Error parsing xml autoruns\' output. \n Is Autoruns\' latest version?\n', e)
sys.exit(1002)
for item in autoruns_tree:
text = "[Object]"
if item is None:
text = text + " Invalid item (mostly not a binary image)\n"
break
imagepath = item.findtext('imagepath')
name = item.findtext('itemname')
if imagepath is not None:
sha256hash = item.findtext('sha256hash')
text = text + '' + name + '\n ' + imagepath + '\n ' + sha256hash + '\n scanning... '
print(text)
result = scan(sha256hash, API_KEY)
print(result)
def scan(sha256hash, API_KEY):
VIRUSTOTAL_REPORT_URL = 'https://www.virustotal.com/vtapi/v2/file/report'
VIRUSTOTAL_SCAN_URL = 'https://www.virustotal.com/vtapi/v2/file/scan'
if sha256hash == None:
response = "No valid hash for this file"
return response
data = urllib.parse.urlencode({
'resource' : sha256hash,
'apikey' : API_KEY
})
data = data.encode('utf-8')
try:
request = urllib.request.Request(VIRUSTOTAL_REPORT_URL, data)
reply = urllib.request.urlopen(request)
answer = 42
answer = reply.read().decode("utf-8")
report = json.loads(answer)
except Exception as e:
error = "\n[Error] Cannot obtain results from VirusTotal: {0}\n".format(e)
return error
sys.exit(4)
int(report['response_code']) == 0
if int(report['response_code']) == 0:
response = (report['verbose_msg'])
elif int(report['response_code']) < 0:
response = 'Not found on Virustotal database!'
#Shall send the file if is not on virustotal.
else:
response = 'FOUND'
if int(report['positives']) >= 0:
response = response + 'but not infected.'
else:
for av, scan in report['scans'].items():
if scan == 'detected':
response = response + ' INFECTED!\n engine:' + av + ',\n malware:' + scan['result'] + '\n'
return response
def help():
print(main.__doc__)
sys.exit(0)
def main(argv):
"""\n
Script for Windows basic security check using Sysinternal\'s Autoruns
and Virustotal.com\n
Thereforce, you need to get a public API Key from http://www.virustotal.com for your
scripting analysis\n
and autorunsc.exe binary.\n
Usage:\n
autorunalize.exe [--help] --API-KEY YOUR_API_KEY\n
-h, --help Shows this help.\n
-a, --API-KEY Your public API key from Virustotal.
This a 64 characters hexadecimal string.\n
Example:\n
./autorunalize.exe --API-KEY YOUR_API_KEY\n
"""
API_KEY = ''
try:
opts, args = getopt.getopt(argv,"ha:",["help","API-KEY="])
except getopt.GetoptError:
print('pyrunanalizer.py --API-KEY YOUR_API_KEY_HERE')
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
help()
sys.exit()
elif opt in ("-a", "--API-KEY"):
API_KEY = arg
runanalizer(API_KEY)
else:
help()
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 |
openhatch/oh-mainline | vendor/packages/sqlparse/tests/test_regressions.py | 15 | 8709 | # -*- coding: utf-8 -*-
import sys
from tests.utils import TestCaseBase, load_file
import sqlparse
from sqlparse import sql
from sqlparse import tokens as T
class RegressionTests(TestCaseBase):
def test_issue9(self):
# make sure where doesn't consume parenthesis
p = sqlparse.parse('(where 1)')[0]
self.assert_(isinstance(p, sql.Statement))
self.assertEqual(len(p.tokens), 1)
self.assert_(isinstance(p.tokens[0], sql.Parenthesis))
prt = p.tokens[0]
self.assertEqual(len(prt.tokens), 3)
self.assertEqual(prt.tokens[0].ttype, T.Punctuation)
self.assertEqual(prt.tokens[-1].ttype, T.Punctuation)
def test_issue13(self):
parsed = sqlparse.parse(("select 'one';\n"
"select 'two\\'';\n"
"select 'three';"))
self.assertEqual(len(parsed), 3)
self.assertEqual(str(parsed[1]).strip(), "select 'two\\'';")
def test_issue26(self):
# parse stand-alone comments
p = sqlparse.parse('--hello')[0]
self.assertEqual(len(p.tokens), 1)
self.assert_(p.tokens[0].ttype is T.Comment.Single)
p = sqlparse.parse('-- hello')[0]
self.assertEqual(len(p.tokens), 1)
self.assert_(p.tokens[0].ttype is T.Comment.Single)
p = sqlparse.parse('--hello\n')[0]
self.assertEqual(len(p.tokens), 1)
self.assert_(p.tokens[0].ttype is T.Comment.Single)
p = sqlparse.parse('--')[0]
self.assertEqual(len(p.tokens), 1)
self.assert_(p.tokens[0].ttype is T.Comment.Single)
p = sqlparse.parse('--\n')[0]
self.assertEqual(len(p.tokens), 1)
self.assert_(p.tokens[0].ttype is T.Comment.Single)
def test_issue34(self):
t = sqlparse.parse("create")[0].token_first()
self.assertEqual(t.match(T.Keyword.DDL, "create"), True)
self.assertEqual(t.match(T.Keyword.DDL, "CREATE"), True)
def test_issue35(self):
# missing space before LIMIT
sql = sqlparse.format("select * from foo where bar = 1 limit 1",
reindent=True)
self.ndiffAssertEqual(sql, "\n".join(["select *",
"from foo",
"where bar = 1 limit 1"]))
def test_issue38(self):
sql = sqlparse.format("SELECT foo; -- comment",
strip_comments=True)
self.ndiffAssertEqual(sql, "SELECT foo;")
sql = sqlparse.format("/* foo */", strip_comments=True)
self.ndiffAssertEqual(sql, "")
def test_issue39(self):
p = sqlparse.parse('select user.id from user')[0]
self.assertEqual(len(p.tokens), 7)
idt = p.tokens[2]
self.assertEqual(idt.__class__, sql.Identifier)
self.assertEqual(len(idt.tokens), 3)
self.assertEqual(idt.tokens[0].match(T.Name, 'user'), True)
self.assertEqual(idt.tokens[1].match(T.Punctuation, '.'), True)
self.assertEqual(idt.tokens[2].match(T.Name, 'id'), True)
def test_issue40(self):
# make sure identifier lists in subselects are grouped
p = sqlparse.parse(('SELECT id, name FROM '
'(SELECT id, name FROM bar) as foo'))[0]
self.assertEqual(len(p.tokens), 7)
self.assertEqual(p.tokens[2].__class__, sql.IdentifierList)
self.assertEqual(p.tokens[-1].__class__, sql.Identifier)
self.assertEqual(p.tokens[-1].get_name(), u'foo')
sp = p.tokens[-1].tokens[0]
self.assertEqual(sp.tokens[3].__class__, sql.IdentifierList)
# make sure that formatting works as expected
self.ndiffAssertEqual(
sqlparse.format(('SELECT id, name FROM '
'(SELECT id, name FROM bar)'),
reindent=True),
('SELECT id,\n'
' name\n'
'FROM\n'
' (SELECT id,\n'
' name\n'
' FROM bar)'))
self.ndiffAssertEqual(
sqlparse.format(('SELECT id, name FROM '
'(SELECT id, name FROM bar) as foo'),
reindent=True),
('SELECT id,\n'
' name\n'
'FROM\n'
' (SELECT id,\n'
' name\n'
' FROM bar) as foo'))
def test_issue78():
# the bug author provided this nice examples, let's use them!
def _get_identifier(sql):
p = sqlparse.parse(sql)[0]
return p.tokens[2]
results = (('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text'))
variants = (
'select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo',
)
for variant in variants:
i = _get_identifier(variant)
assert isinstance(i, sql.Identifier)
for func_name, result in results:
func = getattr(i, func_name)
assert func() == result
def test_issue83():
sql = """
CREATE OR REPLACE FUNCTION func_a(text)
RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS
$_$
BEGIN
...
END;
$_$;
CREATE OR REPLACE FUNCTION func_b(text)
RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS
$_$
BEGIN
...
END;
$_$;
ALTER TABLE..... ;"""
t = sqlparse.split(sql)
assert len(t) == 3
def test_comment_encoding_when_reindent():
# There was an UnicodeEncodeError in the reindent filter that
# casted every comment followed by a keyword to str.
sql = u'select foo -- Comment containing Ümläuts\nfrom bar'
formatted = sqlparse.format(sql, reindent=True)
assert formatted == sql
def test_parse_sql_with_binary():
# See https://github.com/andialbrecht/sqlparse/pull/88
digest = '\x82|\xcb\x0e\xea\x8aplL4\xa1h\x91\xf8N{'
sql = 'select * from foo where bar = \'%s\'' % digest
formatted = sqlparse.format(sql, reindent=True)
tformatted = 'select *\nfrom foo\nwhere bar = \'%s\'' % digest
if sys.version_info < (3,):
tformatted = tformatted.decode('unicode-escape')
assert formatted == tformatted
def test_dont_alias_keywords():
# The _group_left_right function had a bug where the check for the
# left side wasn't handled correctly. In one case this resulted in
# a keyword turning into an identifier.
p = sqlparse.parse('FROM AS foo')[0]
assert len(p.tokens) == 5
assert p.tokens[0].ttype is T.Keyword
assert p.tokens[2].ttype is T.Keyword
def test_format_accepts_encoding(): # issue20
sql = load_file('test_cp1251.sql', 'cp1251')
formatted = sqlparse.format(sql, reindent=True, encoding='cp1251')
if sys.version_info < (3,):
tformatted = u'insert into foo\nvalues (1); -- Песня про надежду\n'
else:
tformatted = 'insert into foo\nvalues (1); -- Песня про надежду\n'
assert formatted == tformatted
def test_issue90():
sql = ('UPDATE "gallery_photo" SET "owner_id" = 4018, "deleted_at" = NULL,'
' "width" = NULL, "height" = NULL, "rating_votes" = 0,'
' "rating_score" = 0, "thumbnail_width" = NULL,'
' "thumbnail_height" = NULL, "price" = 1, "description" = NULL')
formatted = sqlparse.format(sql, reindent=True)
tformatted = '\n'.join(['UPDATE "gallery_photo"',
'SET "owner_id" = 4018,',
' "deleted_at" = NULL,',
' "width" = NULL,',
' "height" = NULL,',
' "rating_votes" = 0,',
' "rating_score" = 0,',
' "thumbnail_width" = NULL,',
' "thumbnail_height" = NULL,',
' "price" = 1,',
' "description" = NULL'])
assert formatted == tformatted
def test_except_formatting():
sql = 'SELECT 1 FROM foo WHERE 2 = 3 EXCEPT SELECT 2 FROM bar WHERE 1 = 2'
formatted = sqlparse.format(sql, reindent=True)
tformatted = '\n'.join([
'SELECT 1',
'FROM foo',
'WHERE 2 = 3',
'EXCEPT',
'SELECT 2',
'FROM bar',
'WHERE 1 = 2'
])
assert formatted == tformatted
| agpl-3.0 |
moutai/scikit-learn | sklearn/manifold/locally_linear.py | 37 | 25852 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
bilke/OpenSG-1.8 | SConsLocal/scons-local-0.96.1/SCons/Tool/__init__.py | 2 | 13279 | """SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Tool/__init__.py 0.96.1.D001 2004/08/23 09:55:29 knight"
import imp
import sys
import SCons.Errors
import SCons.Defaults
class ToolSpec:
def __init__(self, name):
self.name = name
def __call__(self, env, *args, **kw):
env.Append(TOOLS = [ self.name ])
apply(self.generate, ( env, ) + args, kw)
def __str__(self):
return self.name
def Tool(name, toolpath=[]):
"Select a canned Tool specification, optionally searching in toolpath."
try:
file, path, desc = imp.find_module(name, toolpath)
try:
module = imp.load_module(name, file, path, desc)
spec = ToolSpec(name)
spec.generate = module.generate
spec.exists = module.exists
return spec
finally:
if file:
file.close()
except ImportError, e:
pass
full_name = 'SCons.Tool.' + name
if not sys.modules.has_key(full_name):
try:
file, path, desc = imp.find_module(name,
sys.modules['SCons.Tool'].__path__)
mod = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, name, mod)
except ImportError, e:
raise SCons.Errors.UserError, "No tool named '%s': %s" % (name, e)
if file:
file.close()
spec = ToolSpec(name)
spec.generate = sys.modules[full_name].generate
spec.exists = sys.modules[full_name].exists
return spec
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = SCons.Defaults.ProgScan)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
static_lib = SCons.Builder.Builder(action = SCons.Defaults.ArAction,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$SHLIBPREFIX',
suffix = '$SHLIBSUFFIX',
target_scanner = SCons.Defaults.ProgScan,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SCons.Defaults.ObjSourceScan, single_source=1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SCons.Defaults.ObjSourceScan, single_source=1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env['CFILESUFFIX'] = '.c'
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env['CXXFILESUFFIX'] = '.cc'
return (c_file, cxx_file)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return filter (ToolExists, tools)
def tool_list(platform, env):
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['g77', 'ifl', 'cvf', 'fortran']
ars = ['mslib', 'ar', 'tlib']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', 'mslink']
c_compilers = ['icc', 'gcc', 'msvc', 'cc']
cxx_compilers = ['icc', 'g++', 'msvc', 'c++']
assemblers = ['nasm', 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar', 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f77', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['aixf77', 'g77', 'fortran']
ars = ['ar']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['g77', 'ifort', 'ifl', 'fortran']
ars = ['ar', 'mslib']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
other_tools = FindAllTools(['BitKeeper', 'CVS',
'dmd',
'dvipdf', 'dvips', 'gs',
'jar', 'javac', 'javah',
'latex', 'lex', 'm4', 'midl', 'msvs',
'pdflatex', 'pdftex', 'Perforce',
'RCS', 'rmic', 'SCCS',
# 'Subversion',
'swig',
'tar', 'tex', 'yacc', 'zip'],
env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar]
+ other_tools)
return filter(lambda x: x, tools)
| lgpl-2.1 |
juharris/tensorflow | tensorflow/python/tools/print_selective_registration_header.py | 6 | 4243 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Prints a header file to be used with SELECTIVE_REGISTRATION.
Example usage:
print_selective_registration_header \
--graphs=path/to/graph.pb > ops_to_register.h
Then when compiling tensorflow, include ops_to_register.h in the include
search path and pass -DSELECTIVE_REGISTRATION - see
core/framework/selective_registration.h for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python import pywrap_tensorflow
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('proto_fileformat', 'rawproto',
'Format of proto file, either textproto or rawproto')
tf.app.flags.DEFINE_string(
'graphs', '',
'Comma-separated list of paths to model files to be analyzed.')
tf.app.flags.DEFINE_string('default_ops', 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp',
'Default operator:kernel pairs to always include '
'implementation for')
def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):
"""Gets the ops and kernels needed from the model files."""
ops = set()
for proto_file in proto_files:
tf.logging.info('Loading proto file %s', proto_file)
# Load GraphDef.
file_data = tf.gfile.GFile(proto_file).read()
if proto_fileformat == 'rawproto':
graph_def = graph_pb2.GraphDef.FromString(file_data)
else:
assert proto_fileformat == 'textproto'
graph_def = text_format.Parse(file_data, graph_pb2.GraphDef())
# Find all ops and kernels used by the graph.
for node_def in graph_def.node:
if not node_def.device:
node_def.device = '/cpu:0'
kernel_class = pywrap_tensorflow.TryFindKernelClass(
node_def.SerializeToString())
if kernel_class:
op_and_kernel = (str(node_def.op), kernel_class.decode('utf-8'))
if op_and_kernel not in ops:
ops.add(op_and_kernel)
else:
print(
'Warning: no kernel found for op %s' % node_def.op, file=sys.stderr)
# Add default ops.
for s in default_ops_str.split(','):
op, kernel = s.split(':')
op_and_kernel = (op, kernel)
if op_and_kernel not in ops:
ops.add(op_and_kernel)
return list(sorted(ops))
def print_header(ops_and_kernels, ops):
"""Prints a header for use with tensorflow SELECTIVE_REGISTRATION."""
print('#ifndef OPS_TO_REGISTER')
print('#define OPS_TO_REGISTER')
print('constexpr inline bool ShouldRegisterOp(const char op[]) {')
print(' return false')
for op in sorted(ops):
print(' || (strcmp(op, "%s") == 0)' % op)
print(' ;')
print('}')
line = 'const char kNecessaryOpKernelClasses[] = ","\n'
for _, kernel_class in ops_and_kernels:
line += '"%s,"\n' % kernel_class
line += ';'
print(line)
print('const bool kRequiresSymbolicGradients = %s;' %
('true' if 'SymbolicGradient' in ops else 'false'))
print('#endif')
def main(unused_argv):
if not FLAGS.graphs:
print('--graphs is required')
return 1
graphs = FLAGS.graphs.split(',')
ops_and_kernels = get_ops_and_kernels(FLAGS.proto_fileformat, graphs,
FLAGS.default_ops)
ops = set([op for op, _ in ops_and_kernels])
if not ops:
print('Error reading graph!')
return 1
print_header(ops_and_kernels, ops)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
nshalman/qemu | scripts/analyse-9p-simpletrace.py | 333 | 9058 | #!/usr/bin/env python
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print "Pretty printing 9p simpletrace log ..."
def v9fs_rerror(self, tag, id, err):
print "RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")"
def v9fs_version(self, tag, id, msize, version):
print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_version_return(self, tag, id, msize, version):
print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
def v9fs_attach_return(self, tag, id, type, version, path):
print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_stat(self, tag, id, fid):
print "TSTAT (tag =", tag, ", fid =", fid, ")"
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
def v9fs_getattr(self, tag, id, fid, request_mask):
print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
def v9fs_walk_return(self, tag, id, nwnames, qids):
print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
def v9fs_open(self, tag, id, fid, mode):
print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_fsync(self, tag, id, fid, datasync):
print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
def v9fs_clunk(self, tag, id, fid):
print "TCLUNK (tag =", tag, ", fid =", fid, ")"
def v9fs_read(self, tag, id, fid, off, max_count):
print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
def v9fs_read_return(self, tag, id, count, err):
print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
def v9fs_readdir_return(self, tag, id, count, retval):
print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
def v9fs_write(self, tag, id, fid, off, count, cnt):
print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
def v9fs_write_return(self, tag, id, total, err):
print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
def v9fs_create(self, tag, id, fid, name, perm, mode):
print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
def v9fs_symlink_return(self, tag, id, type, version, path):
print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_flush(self, tag, id, flush_tag):
print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
def v9fs_link(self, tag, id, dfid, oldfid, name):
print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
def v9fs_remove(self, tag, id, fid):
print "TREMOVE (tag =", tag, ", fid =", fid, ")"
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
def v9fs_lock(self, tag, id, fid, type, start, length):
print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_lock_return(self, tag, id, status):
print "RLOCK (tag =", tag, ", status =", status, ")"
def v9fs_getlock(self, tag, id, fid, type, start, length):
print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
def v9fs_xattrwalk_return(self, tag, id, size):
print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
def v9fs_readlink(self, tag, id, fid):
print "TREADLINK (tag =", tag, ", fid =", fid, ")"
def v9fs_readlink_return(self, tag, id, target):
print "RREADLINK (tag =", tag, ", target =", target, ")"
simpletrace.run(VirtFSRequestTracker())
| gpl-2.0 |
CYBAI/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/protocol.py | 4 | 16295 | import traceback
from abc import ABCMeta, abstractmethod
class Protocol(object):
"""Backend for a specific browser-control protocol.
Each Protocol is composed of a set of ProtocolParts that implement
the APIs required for specific interactions. This reflects the fact
that not all implementaions will support exactly the same feature set.
Each ProtocolPart is exposed directly on the protocol through an accessor
attribute with a name given by its `name` property.
:param Executor executor: The Executor instance that's using this Protocol
:param Browser browser: The Browser using this protocol"""
__metaclass__ = ABCMeta
implements = []
def __init__(self, executor, browser):
self.executor = executor
self.browser = browser
for cls in self.implements:
name = cls.name
assert not hasattr(self, name)
setattr(self, name, cls(self))
@property
def logger(self):
""":returns: Current logger"""
return self.executor.logger
def is_alive(self):
"""Is the browser connection still active
:returns: A boolean indicating whether the connection is still active."""
return True
def setup(self, runner):
"""Handle protocol setup, and send a message to the runner to indicate
success or failure."""
msg = None
try:
msg = "Failed to start protocol connection"
self.connect()
msg = None
for cls in self.implements:
getattr(self, cls.name).setup()
msg = "Post-connection steps failed"
self.after_connect()
except Exception:
if msg is not None:
self.logger.warning(msg)
self.logger.warning(traceback.format_exc())
raise
@abstractmethod
def connect(self):
"""Make a connection to the remote browser"""
pass
@abstractmethod
def after_connect(self):
"""Run any post-connection steps. This happens after the ProtocolParts are
initalized so can depend on a fully-populated object."""
pass
def teardown(self):
"""Run cleanup steps after the tests are finished."""
for cls in self.implements:
getattr(self, cls.name).teardown()
class ProtocolPart(object):
"""Base class for all ProtocolParts.
:param Protocol parent: The parent protocol"""
__metaclass__ = ABCMeta
name = None
def __init__(self, parent):
self.parent = parent
@property
def logger(self):
""":returns: Current logger"""
return self.parent.logger
def setup(self):
"""Run any setup steps required for the ProtocolPart."""
pass
def teardown(self):
"""Run any teardown steps required for the ProtocolPart."""
pass
class BaseProtocolPart(ProtocolPart):
"""Generic bits of protocol that are required for multiple test types"""
__metaclass__ = ABCMeta
name = "base"
@abstractmethod
def execute_script(self, script, asynchronous=False):
"""Execute javascript in the current Window.
:param str script: The js source to execute. This is implicitly wrapped in a function.
:param bool asynchronous: Whether the script is asynchronous in the webdriver
sense i.e. whether the return value is the result of
the initial function call or if it waits for some callback.
:returns: The result of the script execution.
"""
pass
@abstractmethod
def set_timeout(self, timeout):
"""Set the timeout for script execution.
:param timeout: Script timeout in seconds"""
pass
@abstractmethod
def wait(self):
"""Wait indefinitely for the browser to close"""
pass
@property
def current_window(self):
"""Return a handle identifying the current top level browsing context
:returns: A protocol-specific handle"""
pass
@abstractmethod
def set_window(self, handle):
"""Set the top level browsing context to one specified by a given handle.
:param handle: A protocol-specific handle identifying a top level browsing
context."""
pass
@abstractmethod
def window_handles(self):
"""Get a list of handles to top-level browsing contexts"""
pass
@abstractmethod
def load(self, url):
"""Load a url in the current browsing context
:param url: The url to load"""
pass
class TestharnessProtocolPart(ProtocolPart):
"""Protocol part required to run testharness tests."""
__metaclass__ = ABCMeta
name = "testharness"
@abstractmethod
def load_runner(self, url_protocol):
"""Load the initial page used to control the tests.
:param str url_protocol: "https" or "http" depending on the test metadata.
"""
pass
@abstractmethod
def close_old_windows(self, url_protocol):
"""Close existing windows except for the initial runner window.
After calling this method there must be exactly one open window that
contains the initial runner page.
:param str url_protocol: "https" or "http" depending on the test metadata.
"""
pass
@abstractmethod
def get_test_window(self, window_id, parent):
"""Get the window handle dorresponding to the window containing the
currently active test.
:param window_id: A string containing the DOM name of the Window that
contains the test, or None.
:param parent: The handle of the runner window.
:returns: A protocol-specific window handle.
"""
pass
@abstractmethod
def test_window_loaded(self):
"""Wait until the newly opened test window has been loaded."""
class PrefsProtocolPart(ProtocolPart):
"""Protocol part that allows getting and setting browser prefs."""
__metaclass__ = ABCMeta
name = "prefs"
@abstractmethod
def set(self, name, value):
"""Set the named pref to value.
:param name: A pref name of browser-specific type
:param value: A pref value of browser-specific type"""
pass
@abstractmethod
def get(self, name):
"""Get the current value of a named pref
:param name: A pref name of browser-specific type
:returns: A pref value of browser-specific type"""
pass
@abstractmethod
def clear(self, name):
"""Reset the value of a named pref back to the default.
:param name: A pref name of browser-specific type"""
pass
class StorageProtocolPart(ProtocolPart):
"""Protocol part for manipulating browser storage."""
__metaclass__ = ABCMeta
name = "storage"
@abstractmethod
def clear_origin(self, url):
"""Clear all the storage for a specified origin.
:param url: A url belonging to the origin"""
pass
class SelectorProtocolPart(ProtocolPart):
"""Protocol part for selecting elements on the page."""
__metaclass__ = ABCMeta
name = "select"
def element_by_selector(self, element_selector):
elements = self.elements_by_selector(element_selector)
if len(elements) == 0:
raise ValueError("Selector '%s' matches no elements" % (element_selector,))
elif len(elements) > 1:
raise ValueError("Selector '%s' matches multiple elements" % (element_selector,))
return elements[0]
@abstractmethod
def elements_by_selector(self, selector):
"""Select elements matching a CSS selector
:param str selector: The CSS selector
:returns: A list of protocol-specific handles to elements"""
pass
class ClickProtocolPart(ProtocolPart):
"""Protocol part for performing trusted clicks"""
__metaclass__ = ABCMeta
name = "click"
@abstractmethod
def element(self, element):
"""Perform a trusted click somewhere on a specific element.
:param element: A protocol-specific handle to an element."""
pass
class CookiesProtocolPart(ProtocolPart):
"""Protocol part for managing cookies"""
__metaclass__ = ABCMeta
name = "cookies"
@abstractmethod
def delete_all_cookies(self):
"""Delete all cookies."""
pass
class SendKeysProtocolPart(ProtocolPart):
"""Protocol part for performing trusted clicks"""
__metaclass__ = ABCMeta
name = "send_keys"
@abstractmethod
def send_keys(self, element, keys):
"""Send keys to a specific element.
:param element: A protocol-specific handle to an element.
:param keys: A protocol-specific handle to a string of input keys."""
pass
class GenerateTestReportProtocolPart(ProtocolPart):
"""Protocol part for generating test reports"""
__metaclass__ = ABCMeta
name = "generate_test_report"
@abstractmethod
def generate_test_report(self, message):
"""Generate a test report.
:param message: The message to be contained in the report."""
pass
class SetPermissionProtocolPart(ProtocolPart):
"""Protocol part for setting permissions"""
__metaclass__ = ABCMeta
name = "set_permission"
@abstractmethod
def set_permission(self, descriptor, state, one_realm=False):
"""Set permission state.
:param descriptor: A PermissionDescriptor object.
:param state: The state to set the permission to.
:param one_realm: Whether to set the permission for only one realm."""
pass
class ActionSequenceProtocolPart(ProtocolPart):
"""Protocol part for performing trusted clicks"""
__metaclass__ = ABCMeta
name = "action_sequence"
@abstractmethod
def send_actions(self, actions):
"""Send a sequence of actions to the window.
:param actions: A protocol-specific handle to an array of actions."""
pass
class TestDriverProtocolPart(ProtocolPart):
"""Protocol part that implements the basic functionality required for
all testdriver-based tests."""
__metaclass__ = ABCMeta
name = "testdriver"
@abstractmethod
def send_message(self, cmd_id, message_type, status, message=None):
"""Send a testdriver message to the browser.
:param int cmd_id: The id of the command to which we're responding
:param str message_type: The kind of the message.
:param str status: Either "failure" or "success" depending on whether the
previous command succeeded.
:param str message: Additional data to add to the message."""
pass
def switch_to_window(self, wptrunner_id):
"""Switch to a window given a wptrunner window id
:param str wptrunner_id: window id"""
if wptrunner_id is None:
return
stack = [str(item) for item in self.parent.base.window_handles()]
while stack:
item = stack.pop()
if item is None:
self._switch_to_parent_frame()
continue
elif isinstance(item, str):
self.parent.base.set_window(item)
else:
self._switch_to_frame(item)
try:
handle_window_id = self.parent.base.execute_script("return window.__wptrunner_id")
if str(handle_window_id) == wptrunner_id:
return
except Exception:
pass
frame_count = self.parent.base.execute_script("return window.length")
# None here makes us switch back to the parent after we've processed all the subframes
stack.append(None)
if frame_count:
stack.extend(reversed(range(0, frame_count)))
raise Exception("Window with id %s not found" % wptrunner_id)
@abstractmethod
def _switch_to_frame(self, index):
"""Switch to a frame in the current window
:param int index: Frame id"""
pass
@abstractmethod
def _switch_to_parent_frame(self):
"""Switch to the parent of the current frame"""
pass
class AssertsProtocolPart(ProtocolPart):
"""ProtocolPart that implements the functionality required to get a count of non-fatal
assertions triggered"""
__metaclass__ = ABCMeta
name = "asserts"
@abstractmethod
def get(self):
"""Get a count of assertions since the last browser start"""
pass
class CoverageProtocolPart(ProtocolPart):
"""Protocol part for collecting per-test coverage data."""
__metaclass__ = ABCMeta
name = "coverage"
@abstractmethod
def reset(self):
"""Reset coverage counters"""
pass
@abstractmethod
def dump(self):
"""Dump coverage counters"""
pass
class VirtualAuthenticatorProtocolPart(ProtocolPart):
"""Protocol part for creating and manipulating virtual authenticators"""
__metaclass__ = ABCMeta
name = "virtual_authenticator"
@abstractmethod
def add_virtual_authenticator(self, config):
"""Add a virtual authenticator
:param config: The Authenticator Configuration"""
pass
@abstractmethod
def remove_virtual_authenticator(self, authenticator_id):
"""Remove a virtual authenticator
:param str authenticator_id: The ID of the authenticator to remove"""
pass
@abstractmethod
def add_credential(self, authenticator_id, credential):
"""Inject a credential onto an authenticator
:param str authenticator_id: The ID of the authenticator to add the credential to
:param credential: The credential to inject"""
pass
@abstractmethod
def get_credentials(self, authenticator_id):
"""Get the credentials stored in an authenticator
:param str authenticator_id: The ID of the authenticator
:returns: An array with the credentials stored on the authenticator"""
pass
@abstractmethod
def remove_credential(self, authenticator_id, credential_id):
"""Remove a credential stored in an authenticator
:param str authenticator_id: The ID of the authenticator
:param str credential_id: The ID of the credential"""
pass
@abstractmethod
def remove_all_credentials(self, authenticator_id):
"""Remove all the credentials stored in an authenticator
:param str authenticator_id: The ID of the authenticator"""
pass
@abstractmethod
def set_user_verified(self, authenticator_id, uv):
"""Sets the user verified flag on an authenticator
:param str authenticator_id: The ID of the authenticator
:param bool uv: the user verified flag"""
pass
class PrintProtocolPart(ProtocolPart):
"""Protocol part for rendering to a PDF."""
__metaclass__ = ABCMeta
name = "pdf_print"
@abstractmethod
def render_as_pdf(self, width, height):
"""Output document as PDF"""
pass
class DebugProtocolPart(ProtocolPart):
"""Protocol part for debugging test failures."""
__metaclass__ = ABCMeta
name = "debug"
@abstractmethod
def load_devtools(self):
"""Load devtools in the current window"""
pass
def load_reftest_analyzer(self, test, result):
import io
import mozlog
from urllib.parse import quote, urljoin
debug_test_logger = mozlog.structuredlog.StructuredLogger("debug_test")
output = io.StringIO()
debug_test_logger.suite_start([])
debug_test_logger.add_handler(mozlog.handlers.StreamHandler(output, formatter=mozlog.formatters.TbplFormatter()))
debug_test_logger.test_start(test.id)
# Always use PASS as the expected value so we get output even for expected failures
debug_test_logger.test_end(test.id, result["status"], "PASS", extra=result.get("extra"))
self.parent.base.load(urljoin(self.parent.executor.server_url("https"),
"/common/third_party/reftest-analyzer.xhtml#log=%s" %
quote(output.getvalue())))
| mpl-2.0 |
jsheedy/velotronheavyindustries.com | intro-to-d3-grid-map/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py | 1382 | 30567 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to compile targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print 'finding compile targets for match', target.name
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
if 'compile_targets' in values:
values['compile_targets'].sort()
print 'Targets that need to be built:'
for target in values['compile_targets']:
print '\t', target
if 'test_targets' in values:
values['test_targets'].sort()
print 'Test targets:'
for target in values['test_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print 'supplied test_targets'
for target_name in self._test_target_names:
print '\t', target_name
print 'found test_targets'
for target in test_targets:
print '\t', target.name
print 'searching for matching test targets'
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print 'matched test_targets'
for target in matching_test_targets:
print '\t', target.name
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print '\tall'
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.itervalues():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print 'Supplied test_targets & compile_targets'
for target in supplied_targets:
print '\t', target.name
print 'Finding compile targets'
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.