repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
codeaudit/gpss-research | experiments/2014-04-16-class-liver-aic.py | 4 | 1443 | Experiment(description='Classification experiment',
data_dir='../data/add-class/r_liver',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=400,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2014-04-16-class-liver-aic/',
iters=250,
base_kernels='SE,RQ,Const,Lin,Noise',
random_seed=1,
period_heuristic=3,
max_period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=False,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoneKernel()', # Starting kernel
lik='ff.LikErf(inference=1)', # Starting likelihood - laplace inference code
score='aic',
stopping_criteria=['no_improvement'],
improvement_tolerance=0.01,
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('None',), {'A': 'kernel'})])
| mit |
songmonit/CTTMSONLINE_V8 | addons/membership/wizard/__init__.py | 432 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aisipos/django | django/db/models/lookups.py | 45 | 20247 | import math
import warnings
from copy import copy
from django.db.models.expressions import Func, Value
from django.db.models.fields import DateTimeField, Field, IntegerField
from django.db.models.query_utils import RegisterLookupMixin
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
from django.utils.six.moves import range
class Lookup(object):
lookup_name = None
prepare_rhs = True
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# Warn the user as soon as possible if they are trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
# We need to import QuerySet here so as to avoid circular
from django.db.models.query import QuerySet
if isinstance(rhs, QuerySet):
raise NotImplementedError("Bilateral transformations on nested querysets are not supported.")
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform in self.bilateral_transforms:
value = transform(value)
return value
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = Value(p, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
_, params = self.get_db_prep_lookup(rhs, connection)
sqls, sqls_params = ['%s'] * len(params), params
return sqls, sqls_params
def get_prep_lookup(self):
if hasattr(self.rhs, '_prepare'):
return self.rhs._prepare(self.lhs.output_field)
if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'):
return self.lhs.output_field.get_prep_value(self.rhs)
return self.rhs
def get_db_prep_lookup(self, value, connection):
return ('%s', [value])
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
return compiler.compile(lhs)
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = Value(value, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
# Due to historical reasons there are a couple of different
# ways to produce sql here. get_compiler is likely a Query
# instance, _as_sql QuerySet and as_sql just something with
# as_sql. Finally the value can of course be just plain
# Python value.
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql'):
sql, params = compiler.compile(value)
return '(' + sql + ')', params
if hasattr(value, '_as_sql'):
sql, params = value._as_sql(connection=connection)
return '(' + sql + ')', params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not(
hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, '_as_sql') or
hasattr(self.rhs, 'get_compiler'))
def relabeled_clone(self, relabels):
new = copy(self)
new.lhs = new.lhs.relabeled_clone(relabels)
if hasattr(new.rhs, 'relabeled_clone'):
new.rhs = new.rhs.relabeled_clone(relabels)
return new
def get_group_by_cols(self):
cols = self.lhs.get_group_by_cols()
if hasattr(self.rhs, 'get_group_by_cols'):
cols.extend(self.rhs.get_group_by_cols())
return cols
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False)
class Transform(RegisterLookupMixin, Func):
"""
RegisterLookupMixin() is first so that get_lookup() and get_transform()
first examine self and then check output_field.
"""
bilateral = False
arity = 1
@property
def lhs(self):
return self.get_source_expressions()[0]
def get_bilateral_transforms(self):
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super(BuiltinLookup, self).process_lhs(
compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(
db_type, field_internal_type) % lhs_sql
lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
return lhs_sql, list(params)
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
class FieldGetDbPrepValueMixin(object):
"""
Some lookups require Field.get_db_prep_value() to be called on their
inputs.
"""
get_db_prep_lookup_value_is_iterable = False
def get_db_prep_lookup(self, value, connection):
# For relational fields, use the output_field of the 'field' attribute.
field = getattr(self.lhs.output_field, 'field', None)
get_db_prep_value = getattr(field, 'get_db_prep_value', None)
if not get_db_prep_value:
get_db_prep_value = self.lhs.output_field.get_db_prep_value
return (
'%s',
[get_db_prep_value(v, connection, prepared=True) for v in value]
if self.get_db_prep_lookup_value_is_iterable else
[get_db_prep_value(value, connection, prepared=True)]
)
class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin):
"""
Some lookups require Field.get_db_prep_value() to be called on each value
in an iterable.
"""
get_db_prep_lookup_value_is_iterable = True
class Exact(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'exact'
Field.register_lookup(Exact)
class IExact(BuiltinLookup):
lookup_name = 'iexact'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super(IExact, self).process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
Field.register_lookup(IExact)
class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'gt'
Field.register_lookup(GreaterThan)
class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'gte'
Field.register_lookup(GreaterThanOrEqual)
class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'lt'
Field.register_lookup(LessThan)
class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = 'lte'
Field.register_lookup(LessThanOrEqual)
class IntegerFieldFloatRounding(object):
"""
Allow floats to work as query values for IntegerField. Without this, the
decimal portion of the float would always be discarded.
"""
def get_prep_lookup(self):
if isinstance(self.rhs, float):
self.rhs = math.ceil(self.rhs)
return super(IntegerFieldFloatRounding, self).get_prep_lookup()
class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual):
pass
IntegerField.register_lookup(IntegerGreaterThanOrEqual)
class IntegerLessThan(IntegerFieldFloatRounding, LessThan):
pass
IntegerField.register_lookup(IntegerLessThan)
class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = 'in'
def get_prep_lookup(self):
if hasattr(self.rhs, '_prepare'):
return self.rhs._prepare(self.lhs.output_field)
if hasattr(self.lhs.output_field, 'get_prep_value'):
return [self.lhs.output_field.get_prep_value(v) for v in self.rhs]
return self.rhs
def process_rhs(self, compiler, connection):
db_rhs = getattr(self.rhs, '_db', None)
if db_rhs is not None and db_rhs != connection.alias:
raise ValueError(
"Subqueries aren't allowed across different databases. Force "
"the inner query to be evaluated using `list(inner_query)`."
)
if self.rhs_is_direct_value():
try:
rhs = set(self.rhs)
except TypeError: # Unhashable items in self.rhs
rhs = self.rhs
if not rhs:
from django.db.models.sql.datastructures import EmptyResultSet
raise EmptyResultSet
# rhs should be an iterable; use batch_process_rhs() to
# prepare/transform those values.
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = '(' + ', '.join(sqls) + ')'
return (placeholder, sqls_params)
else:
return super(In, self).process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return 'IN %s' % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size:
return self.split_parameter_list_as_sql(compiler, connection)
return super(In, self).as_sql(compiler, connection)
def split_parameter_list_as_sql(self, compiler, connection):
# This is a special case for databases which limit the number of
# elements which can appear in an 'IN' clause.
max_in_list_size = connection.ops.max_in_list_size()
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ['(']
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % lhs)
params.extend(lhs_params)
sqls = rhs[offset: offset + max_in_list_size]
sqls_params = rhs_params[offset: offset + max_in_list_size]
param_group = ', '.join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(')')
params.extend(sqls_params)
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
Field.register_lookup(In)
class PatternLookup(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if (hasattr(self.rhs, 'get_compiler') or hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, '_as_sql') or self.bilateral_transforms):
pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc)
return pattern.format(rhs)
else:
return super(PatternLookup, self).get_rhs_op(connection, rhs)
class Contains(PatternLookup):
lookup_name = 'contains'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super(Contains, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
Field.register_lookup(Contains)
class IContains(Contains):
lookup_name = 'icontains'
prepare_rhs = False
Field.register_lookup(IContains)
class StartsWith(PatternLookup):
lookup_name = 'startswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super(StartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
Field.register_lookup(StartsWith)
class IStartsWith(PatternLookup):
lookup_name = 'istartswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super(IStartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
Field.register_lookup(IStartsWith)
class EndsWith(PatternLookup):
lookup_name = 'endswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super(EndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
Field.register_lookup(EndsWith)
class IEndsWith(PatternLookup):
lookup_name = 'iendswith'
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super(IEndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
Field.register_lookup(IEndsWith)
class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = 'range'
def get_prep_lookup(self):
if hasattr(self.rhs, '_prepare'):
return self.rhs._prepare(self.lhs.output_field)
return [self.lhs.output_field.get_prep_value(v) for v in self.rhs]
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of 2 values, we use batch_process_rhs
# to prepare/transform those values
return self.batch_process_rhs(compiler, connection)
else:
return super(Range, self).process_rhs(compiler, connection)
Field.register_lookup(Range)
class IsNull(BuiltinLookup):
lookup_name = 'isnull'
prepare_rhs = False
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
Field.register_lookup(IsNull)
class Search(BuiltinLookup):
lookup_name = 'search'
prepare_rhs = False
def as_sql(self, compiler, connection):
warnings.warn(
'The `__search` lookup is deprecated. See the 1.10 release notes '
'for how to replace it.', RemovedInDjango20Warning, stacklevel=2
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.fulltext_search_sql(field_name=lhs)
return sql_template, lhs_params + rhs_params
Field.register_lookup(Search)
class Regex(BuiltinLookup):
lookup_name = 'regex'
prepare_rhs = False
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super(Regex, self).as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
Field.register_lookup(Regex)
class IRegex(Regex):
lookup_name = 'iregex'
Field.register_lookup(IRegex)
class YearLookup(Lookup):
def year_lookup_bounds(self, connection, year):
output_field = self.lhs.lhs.output_field
if isinstance(output_field, DateTimeField):
bounds = connection.ops.year_lookup_bounds_for_datetime_field(year)
else:
bounds = connection.ops.year_lookup_bounds_for_date_field(year)
return bounds
class YearComparisonLookup(YearLookup):
def as_sql(self, compiler, connection):
# We will need to skip the extract part and instead go
# directly with the originating field, that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
start, finish = self.year_lookup_bounds(connection, rhs_params[0])
params.append(self.get_bound(start, finish))
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
def get_bound(self):
raise NotImplementedError(
'subclasses of YearComparisonLookup must provide a get_bound() method'
)
class YearExact(YearLookup, Exact):
lookup_name = 'exact'
def as_sql(self, compiler, connection):
# We will need to skip the extract part and instead go
# directly with the originating field, that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
try:
# Check that rhs_params[0] exists (IndexError),
# it isn't None (TypeError), and is a number (ValueError)
int(rhs_params[0])
except (IndexError, TypeError, ValueError):
# Can't determine the bounds before executing the query, so skip
# optimizations by falling back to a standard exact comparison.
return super(Exact, self).as_sql(compiler, connection)
bounds = self.year_lookup_bounds(connection, rhs_params[0])
params.extend(bounds)
return '%s BETWEEN %%s AND %%s' % lhs_sql, params
class YearGt(YearComparisonLookup):
lookup_name = 'gt'
def get_bound(self, start, finish):
return finish
class YearGte(YearComparisonLookup):
lookup_name = 'gte'
def get_bound(self, start, finish):
return start
class YearLt(YearComparisonLookup):
lookup_name = 'lt'
def get_bound(self, start, finish):
return start
class YearLte(YearComparisonLookup):
lookup_name = 'lte'
def get_bound(self, start, finish):
return finish
| bsd-3-clause |
Jaesin/OctoPrint | tests/plugin/test_core.py | 1 | 12641 | import unittest
import mock
import ddt
import octoprint.plugin
import octoprint.plugin.core
##~~ Helpers for testing mixin type extraction
class A(object):
pass
class A_1(A):
pass
class A_2(A):
pass
class A_3(A):
pass
class A1_1(A_1):
pass
class B(object):
pass
class B_1(B):
pass
class C(object):
pass
class C_1(C):
pass
class D(object):
pass
@ddt.ddt
class PluginTestCase(unittest.TestCase):
def setUp(self):
import logging
logging.basicConfig(level=logging.DEBUG)
# TODO mock pkg_resources to return some defined entry_points
import os
self.plugin_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "_plugins")
plugin_folders = [self.plugin_folder]
plugin_bases = [octoprint.plugin.OctoPrintPlugin]
plugin_entry_points = None
self.plugin_manager = octoprint.plugin.core.PluginManager(plugin_folders,
plugin_bases,
plugin_entry_points,
plugin_disabled_list=[],
logging_prefix="logging_prefix.")
self.plugin_manager.reload_plugins(startup=True, initialize_implementations=False)
self.plugin_manager.initialize_implementations()
def test_plugin_loading(self):
self.assertEqual(7, len(self.plugin_manager.enabled_plugins))
self.assertEqual(2, len(self.plugin_manager.plugin_hooks))
self.assertEqual(4, len(self.plugin_manager.plugin_implementations))
self.assertEqual(3, len(self.plugin_manager.plugin_implementations_by_type))
# hook_plugin
self.assertTrue("octoprint.core.startup" in self.plugin_manager.plugin_hooks)
self.assertEqual(1, len(self.plugin_manager.plugin_hooks["octoprint.core.startup"]))
# ordered hook plugins
self.assertTrue("some.ordered.callback" in self.plugin_manager.plugin_hooks)
self.assertEqual(3, len(self.plugin_manager.plugin_hooks["some.ordered.callback"]))
# TestStartupPlugin & TestMixedPlugin
self.assertTrue(octoprint.plugin.StartupPlugin in self.plugin_manager.plugin_implementations_by_type)
self.assertEqual(2, len(self.plugin_manager.plugin_implementations_by_type[octoprint.plugin.StartupPlugin]))
# TestSettingsPlugin & TestMixedPlugin
self.assertTrue(octoprint.plugin.SettingsPlugin in self.plugin_manager.plugin_implementations_by_type)
self.assertEqual(2, len(self.plugin_manager.plugin_implementations_by_type[octoprint.plugin.SettingsPlugin]))
# TestDeprecatedAssetPlugin, NOT TestSecondaryDeprecatedAssetPlugin
self.assertTrue(octoprint.plugin.AssetPlugin in self.plugin_manager.plugin_implementations_by_type)
self.assertEqual(1, len(self.plugin_manager.plugin_implementations_by_type[octoprint.plugin.AssetPlugin]))
def test_plugin_initializing(self):
def test_factory(name, implementation):
return dict(test_factory="test_factory_%s" % name)
def verify_injection_order(name, implementation):
self.assertTrue(hasattr(implementation, "_basefolder"))
return dict()
additional_injects = dict(
additional_inject="additional_inject"
)
additional_inject_factories = [test_factory, verify_injection_order]
self.plugin_manager.initialize_implementations(
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories
)
all_implementations = self.plugin_manager.plugin_implementations
self.assertEqual(4, len(all_implementations))
for name, impl in all_implementations.items():
self.assertTrue(name in self.plugin_manager.enabled_plugins)
plugin = self.plugin_manager.enabled_plugins[name]
# test that the standard fields were properly initialized
self.assertTrue(hasattr(impl, "_identifier"))
self.assertEqual(name, impl._identifier)
self.assertTrue(hasattr(impl, "_plugin_name"))
self.assertEqual(plugin.name, impl._plugin_name)
self.assertTrue(hasattr(impl, "_plugin_version"))
self.assertEqual(plugin.version, impl._plugin_version)
self.assertTrue(hasattr(impl, "_logger"))
self.assertIsNotNone(impl._logger)
self.assertEqual("logging_prefix.%s" % name, impl._logger.name)
self.assertTrue(hasattr(impl, "_basefolder"))
self.assertTrue(impl._basefolder.startswith(self.plugin_folder))
# test that the additional injects were properly injected
self.assertTrue(hasattr(impl, "_additional_inject"))
self.assertEqual("additional_inject", impl._additional_inject)
# test that the injection factory was properly executed and the result injected
self.assertTrue(hasattr(impl, "_test_factory"))
self.assertEqual("test_factory_%s" % name, impl._test_factory)
def test_get_plugin(self):
plugin = self.plugin_manager.get_plugin("hook_plugin")
self.assertIsNotNone(plugin)
self.assertEqual("Hook Plugin", plugin.__plugin_name__)
plugin = self.plugin_manager.get_plugin("mixed_plugin")
self.assertIsNotNone(plugin)
self.assertEqual("Mixed Plugin", plugin.__plugin_name__)
plugin = self.plugin_manager.get_plugin("unknown_plugin")
self.assertIsNone(plugin)
def test_get_plugin_info(self):
plugin_info = self.plugin_manager.get_plugin_info("hook_plugin")
self.assertIsNotNone(plugin_info)
self.assertEqual("Hook Plugin", plugin_info.name)
plugin_info = self.plugin_manager.get_plugin_info("unknown_plugin")
self.assertIsNone(plugin_info)
def test_get_hooks(self):
hooks = self.plugin_manager.get_hooks("octoprint.core.startup")
self.assertEqual(1, len(hooks))
self.assertTrue("hook_plugin" in hooks)
self.assertEqual("success", hooks["hook_plugin"]())
hooks = self.plugin_manager.get_hooks("octoprint.printing.print")
self.assertEqual(0, len(hooks))
def test_sorted_hooks(self):
hooks = self.plugin_manager.get_hooks("some.ordered.callback")
self.assertEqual(3, len(hooks))
self.assertListEqual(["one_ordered_hook_plugin", "another_ordered_hook_plugin", "hook_plugin"], hooks.keys())
def test_get_implementations(self):
implementations = self.plugin_manager.get_implementations(octoprint.plugin.StartupPlugin)
self.assertListEqual(["mixed_plugin", "startup_plugin"], map(lambda x: x._identifier, implementations))
implementations = self.plugin_manager.get_implementations(octoprint.plugin.SettingsPlugin)
self.assertListEqual(["mixed_plugin", "settings_plugin"], map(lambda x: x._identifier, implementations))
implementations = self.plugin_manager.get_implementations(octoprint.plugin.StartupPlugin, octoprint.plugin.SettingsPlugin)
self.assertListEqual(["mixed_plugin"], map(lambda x: x._identifier, implementations))
implementations = self.plugin_manager.get_implementations(octoprint.plugin.AssetPlugin)
self.assertListEqual(["deprecated_plugin"], map(lambda x: x._identifier, implementations))
def test_get_filtered_implementations(self):
implementations = self.plugin_manager.get_filtered_implementations(lambda x: x._identifier.startswith("startup"), octoprint.plugin.StartupPlugin)
self.assertEqual(1, len(implementations))
def test_get_sorted_implementations(self):
implementations = self.plugin_manager.get_implementations(octoprint.plugin.StartupPlugin, sorting_context="sorting_test")
self.assertListEqual(["startup_plugin", "mixed_plugin"], map(lambda x: x._identifier, implementations))
def test_client_registration(self):
def test_client(*args, **kwargs):
pass
self.assertEqual(0, len(self.plugin_manager.registered_clients))
self.plugin_manager.register_message_receiver(test_client)
self.assertEqual(1, len(self.plugin_manager.registered_clients))
self.assertIn(test_client, self.plugin_manager.registered_clients)
self.plugin_manager.unregister_message_receiver(test_client)
self.assertEqual(0, len(self.plugin_manager.registered_clients))
self.assertNotIn(test_client, self.plugin_manager.registered_clients)
def test_send_plugin_message(self):
client1 = mock.Mock()
client2 = mock.Mock()
self.plugin_manager.register_message_receiver(client1.on_plugin_message)
self.plugin_manager.register_message_receiver(client2.on_plugin_message)
plugin = "some plugin"
data = "some data"
self.plugin_manager.send_plugin_message(plugin, data)
client1.on_plugin_message.assert_called_once_with(plugin, data)
client2.on_plugin_message.assert_called_once_with(plugin, data)
def test_validate_plugin(self):
self.assertTrue("deprecated_plugin" in self.plugin_manager.enabled_plugins)
plugin = self.plugin_manager.enabled_plugins["deprecated_plugin"]
self.assertTrue(hasattr(plugin.instance, plugin.__class__.attr_implementation))
self.assertFalse(hasattr(plugin.instance, plugin.__class__.attr_implementations))
@ddt.data(
(["octoprint.some_hook"], ["octoprint.some_hook", "octoprint.another_hook"], True),
(["octoprint.*"], ["octoprint.some_hook", "octoprint.another_hook"], True),
(["octoprint.some_hook"], ["octoprint.another_hook"], False),
(["octoprint.some_hook"], [], False),
([], ["octoprint.some_hook"], False)
)
@ddt.unpack
def test_has_any_of_hooks(self, hooks_to_test_for, plugin_hooks, expected):
plugin = mock.MagicMock()
plugin.hooks = dict((hook, hook) for hook in plugin_hooks)
actual = octoprint.plugin.core.PluginManager.has_any_of_hooks(plugin, hooks_to_test_for)
self.assertEqual(actual, expected)
def test_has_any_of_hooks_varargs(self):
plugin = mock.MagicMock()
plugin.hooks = dict((hook, hook) for hook in ["octoprint.some_hook", "octoprint.another_hook"])
result = octoprint.plugin.core.PluginManager.has_any_of_hooks(plugin, "octoprint.some_hook", "octoprint.some_other_hook")
self.assertTrue(result)
def test_has_any_of_hooks_nohooks(self):
plugin = mock.MagicMock()
result = octoprint.plugin.core.PluginManager.has_any_of_hooks(plugin, "octoprint.some_hook", "octoprint.some_other_hook")
self.assertFalse(result)
@ddt.data(
("octoprint.some_hook", ["octoprint.another_hook", "octoprint.some_hook"], True),
("octoprint.some_hook", ["octoprint.*"], True),
("octoprint.some_hook", ["octoprint.some_hook*"], True),
("octoprint.some_hook", ["octoprint.*_hook"], True),
("octoprint.some_hook", ["octoprint.another_hook.*"], False),
("", ["octoprint.some_hook"], False),
(None, ["octoprint.some_hook"], False),
("octoprint.some_hook", [], False),
("octoprint.some_hook", None, False),
("octoprint.some_hook", [None], False)
)
@ddt.unpack
def test_hook_matches_hooks(self, hook, hooks, expected):
actual = octoprint.plugin.core.PluginManager.hook_matches_hooks(hook, hooks)
self.assertEqual(actual, expected)
def test_hook_matches_hooks_varargs(self):
result = octoprint.plugin.core.PluginManager.hook_matches_hooks("octoprint.some_hook",
"octoprint.another_hook", "octoprint.some_hook")
self.assertTrue(result)
@ddt.data(
([octoprint.plugin.RestartNeedingPlugin], [octoprint.plugin.Plugin, octoprint.plugin.RestartNeedingPlugin], True),
([octoprint.plugin.RestartNeedingPlugin], [octoprint.plugin.Plugin], False),
([], [octoprint.plugin.Plugin], False),
([octoprint.plugin.RestartNeedingPlugin], [], False)
)
@ddt.unpack
def test_has_any_of_mixins(self, mixins_to_test_for, plugin_mixins, expected):
plugin = mock.MagicMock()
plugin.implementation = mock.MagicMock()
for mixin in plugin_mixins:
plugin.implementation.mock_add_spec(mixin)
actual = octoprint.plugin.core.PluginManager.has_any_of_mixins(plugin, mixins_to_test_for)
self.assertEqual(actual, expected)
def test_has_any_of_mixins_varargs(self):
plugin = mock.MagicMock()
plugin.implementation = mock.MagicMock()
plugin.implementation.mock_add_spec(octoprint.plugin.Plugin)
plugin.implementation.mock_add_spec(octoprint.plugin.RestartNeedingPlugin)
result = octoprint.plugin.core.PluginManager.has_any_of_mixins(plugin, octoprint.plugin.RestartNeedingPlugin)
self.assertTrue(result)
def test_has_any_of_mixins_noimplementation(self):
plugin = mock.MagicMock()
result = octoprint.plugin.core.PluginManager.has_any_of_mixins(plugin, octoprint.plugin.RestartNeedingPlugin)
self.assertFalse(result)
@ddt.data(
((A1_1, A_2, B_1, C_1), (A, C), (A_1, A1_1, A_2, C_1)),
((A1_1, A_2, B_1, C_1), (B,), (B_1,)),
# not a subclass
((A1_1, A_2, B_1, C_1), (D,), ()),
# subclass only of base
((A,), (A,), ())
)
@ddt.unpack
def test_mixins_matching_bases(self, bases_to_set, bases_to_check, expected):
Foo = type("Foo", bases_to_set, dict())
actual = octoprint.plugin.core.PluginManager.mixins_matching_bases(Foo, *bases_to_check)
self.assertSetEqual(actual, set(expected))
| agpl-3.0 |
ironmagma/pumpkinpy | gentests.py | 1 | 6418 | # Legal boring crap follows. In simple english, you can use this
# code in your own project, be your project commercial or free.
# Just be sure to include the license and stuff. The "copyright"
# here is just for technical reasons.
#
# Copyright 2011, Philip Peterson.
#
# This file is part of Pumpkinpy.
#
# Pumpkinpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pumpkinpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pumpkinpy. If not, see <http://www.gnu.org/licenses/>.
import sys, os, subprocess
from itertools import ifilter as filtered, imap as mapped, ifilterfalse, chain
path = os.path.dirname(os.path.abspath(__file__))
langs = {
"php": "PHP",
"js": "JavaScript"
}
funcs = [
[
"partition",
"stringfunc",
[
["Hello world! How are you doing today?", " "],
["Hello world! How are you doing today?", ","],
["Hello world! How are you doing today?", ""]
]
],
[
"rpartition",
"stringfunc",
[
["Hello world! How are you doing today?", " "],
["Hello world! How are you doing today?", ","],
["Hello world! How are you doing today?", ""]
]
],
[
"startswith",
"stringfunc",
[
["abcdefgh", ""],
["abcdefgh", "abc"],
["abcdefgh", "a"],
["abcdefgh", "abcdefghi"],
["abcdefgh", "bcdefgh"]
]
],
[
"endswith",
"stringfunc",
[
["abcdefgh", ""],
["abcdefgh", "fgh"],
["abcdefgh", "h"],
["abcdefgh", "abcdefg"],
["abcdefgh", "abcdefghi"],
]
],
[
"rstrip",
"stringfunc",
[
[" Johann went to the store today. "],
["Johann went to the store today. "],
[" Johann went to the store today."],
[" Johann went to the store today. \0"]
]
]
]
####
def itercat(*iterators):
"""Concatenate several iterators into one."""
for i in iterators:
for x in i:
yield x
allfuncs = iter([]) # Find functions for which there are no tests
for lang in langs.keys():
myfuncs = filtered(lambda x: not x.startswith("$"), os.listdir(os.path.join(path, lang, "src"))) # filter out $preamble, etc.
myfuncs = mapped(lambda x: x.rpartition(".")[0], myfuncs)
allfuncs = itercat(myfuncs, allfuncs)
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
allfuncs = unique_everseen(allfuncs)
funcnames = [i[0] for i in funcs]
allfuncs = filtered(lambda fun: not fun in funcnames, allfuncs) # Filter out unsupported items
for unsupportedfunc in allfuncs:
print "[!] No test for", unsupportedfunc
####
results = []
veryverbose_on = "-vv" in sys.argv[1:] or "--very-verbose" in sys.argv[1:]
verbose_on = "-v" in sys.argv[1:] or "--verbose" in sys.argv[1:] or veryverbose_on
def vp(*args):
global verbose_on
if verbose_on:
print " ".join(map(str, args))
def vvp(*args):
global veryverbose_on
if veryverbose_on:
print " ".join(map(str, args))
print "Building all..."
for x, y in langs.items():
vp("Building "+y)
subprocess.check_call([os.path.join(".", x, "build.py")])
for function, kind, tests in funcs:
for test in tests:
if kind == "stringfunc":
string_obj = test[0]
args = test[1:]
success = True
try:
result = string_obj.__getattribute__(function)(*args)
except:
success = False
result = None
else:
raise Exception("Unknown function type `%s`." % kind)
test.append([success, result])
all_results = [] # list of all test results, for generating table in readme, etc.
for lang in langs.items():
vp("\nBeginning unit tests on", lang[1])
execfile(os.path.join(path,"_helpers",lang[0]+'.py'))
thislangsresults = [lang[0], lang[1], {}]
mysupport = thislangsresults[2] # This is a dict that will describe support of each function.
all_results.append(thislangsresults)
for function, kind, tests in funcs:
num_tests = len(tests)
num_passed = 0
for test in tests:
if isSupported(function):
args = test[:-1]
code = genTest(function, kind, args)
result = genResult(code)
passedTest = False
expected = json.dumps(test[-1])
try:
actual = json.dumps(json.loads(result))
passedTest = True
except Exception:
actual = "(parse fail)"
print "Could not parse JSON Output of function "+function+"."
vvp("\tJSON: "+result)
if actual!=expected:
passedTest = False
vp(lang[1]+" failed test in "+function+".")
vvp("\tExpected: "+expected+"\n\tActual: "+actual+"\n\tArgs: "+json.dumps(args))
if passedTest:
num_passed += 1
else:
vp(lang[1], "does not support", function+".", "Skipping.")
mysupport[function] = [num_passed, num_tests]
# Display overall results of the tests
print "\nTest results: "
allsuccess = True
for result in all_results:
support = result[2]
for func, fract in support.items():
if fract[0] != fract[1]:
allsuccess = False
print result[0], func, "(", fract[0], "/", fract[1], ")"
if allsuccess:
print "All tests successful."
execfile("_helpers/_gentable.py")
| gpl-3.0 |
genialis/resolwe | resolwe/permissions/tests/test_data.py | 1 | 16467 | # pylint: disable=missing-docstring
import shutil
import unittest
from datetime import timedelta
from django.utils.timezone import now
from guardian.shortcuts import remove_perm
from rest_framework import exceptions, status
from resolwe.flow.models import Collection, Data
from resolwe.flow.serializers import ContributorSerializer
from resolwe.flow.views import DataViewSet
from resolwe.test import ResolweAPITestCase
DATE_FORMAT = r"%Y-%m-%dT%H:%M:%S.%f"
MESSAGES = {
"NOT_FOUND": "Not found.",
# 'NO_PERMISSION': 'You do not have permission to perform this action.',
"ONE_ID_REQUIRED": "Exactly one id required on create.",
}
class DataTestCase(ResolweAPITestCase):
fixtures = [
"users.yaml",
"collections.yaml",
"processes.yaml",
"data.yaml",
"permissions.yaml",
]
def setUp(self):
self.data1 = Data.objects.get(pk=1)
self.resource_name = "data"
self.viewset = DataViewSet
self.data = {
"name": "New data",
"slug": "new_data",
"collection": {"id": 1},
"process": {"slug": "test_process"},
}
super().setUp()
def tearDown(self):
for data in Data.objects.all():
if data.location:
shutil.rmtree(data.location.get_path(), ignore_errors=True)
super().tearDown()
def test_get_list(self):
resp = self._get_list(self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
def test_get_list_public_user(self):
resp = self._get_list()
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 1)
def test_get_list_admin(self):
resp = self._get_list(self.admin)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
@unittest.skipIf(
True,
"since PR308: this test uses transactions, incompatible with the separated manager",
)
def test_post(self):
# logged-in user w/ perms
collection_n = Data.objects.count()
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(Data.objects.count(), collection_n + 1)
d = Data.objects.get(pk=resp.data["id"])
self.assertTrue(now() - d.modified < timedelta(seconds=1))
self.assertTrue(now() - d.created < timedelta(seconds=1))
self.assertEqual(d.status, "OK")
self.assertTrue(now() - d.started < timedelta(seconds=1))
self.assertTrue(now() - d.finished < timedelta(seconds=1))
self.assertEqual(d.contributor_id, self.user1.pk)
def test_post_invalid_fields(self):
data_n = Data.objects.count()
self.data["collection"] = {"id": 42}
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
resp.data["collection"][0],
"Invalid collection value: {'id': 42} - object does not exist.",
)
self.data["collection"] = {"id": 1}
self.data["process"] = {"id": 42}
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
str(resp.data["process"][0]),
"Invalid process value: {'id': 42} - object does not exist.",
)
self.assertEqual(Data.objects.count(), data_n)
def test_post_no_perms(self):
collection = Collection.objects.get(pk=1)
remove_perm("edit_collection", self.user2, collection)
data_count = Data.objects.count()
resp = self._post(self.data, self.user2)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Data.objects.count(), data_count)
def test_post_public_user(self):
data_count = Data.objects.count()
resp = self._post(self.data)
# User has no permission to add Data object to the collection.
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Data.objects.count(), data_count)
def test_post_protected_fields(self):
date_now = now()
self.data["created"] = date_now - timedelta(days=360)
self.data["modified"] = date_now - timedelta(days=180)
self.data["started"] = date_now - timedelta(days=180)
self.data["finished"] = date_now - timedelta(days=90)
self.data["checksum"] = "fake"
self.data["status"] = "DE"
self.data["process_progress"] = 2
self.data["process_rc"] = 18
self.data["process_info"] = "Spam"
self.data["process_warning"] = "More spam"
self.data["process_error"] = "Even more spam"
self.data["contributor_id"] = 2
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(resp.data["started"], None)
self.assertEqual(resp.data["finished"], None)
self.assertEqual(
resp.data["checksum"],
"05bc76611c382a88817389019679f35cdb32ac65fe6662210805b588c30f71e6",
)
self.assertEqual(resp.data["status"], "RE")
self.assertEqual(resp.data["process_progress"], 0)
self.assertEqual(resp.data["process_rc"], None)
self.assertEqual(resp.data["process_info"], [])
self.assertEqual(resp.data["process_warning"], [])
self.assertEqual(resp.data["process_error"], [])
self.assertEqual(
resp.data["contributor"],
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_numeric(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value(self.user1.pk)
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value({"id": self.user1.pk})
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict_extra_data(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value(
{"id": self.user1.pk, "username": "ignored", "first_name": "ignored"}
)
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict_invalid(self):
with self.assertRaises(exceptions.ValidationError):
ContributorSerializer().to_internal_value(
{
"invalid-dictionary": True,
}
)
def test_get_detail(self):
# public user w/ perms
resp = self._get_detail(1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["id"], 1)
# user w/ permissions
resp = self._get_detail(1, self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertKeys(
resp.data,
[
"slug",
"name",
"created",
"modified",
"contributor",
"started",
"finished",
"checksum",
"status",
"process",
"process_progress",
"process_rc",
"process_info",
"process_warning",
"process_error",
"input",
"output",
"descriptor_schema",
"descriptor",
"id",
"size",
"scheduled",
"current_user_permissions",
"descriptor_dirty",
"tags",
"process_memory",
"process_cores",
"collection",
"entity",
"duplicated",
],
)
# user w/ public permissions
resp = self._get_detail(1, self.user2)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertKeys(
resp.data,
[
"slug",
"name",
"created",
"modified",
"contributor",
"started",
"finished",
"checksum",
"status",
"process",
"process_progress",
"process_rc",
"process_info",
"process_warning",
"process_error",
"input",
"output",
"descriptor_schema",
"descriptor",
"id",
"size",
"scheduled",
"current_user_permissions",
"descriptor_dirty",
"tags",
"process_memory",
"process_cores",
"collection",
"entity",
"duplicated",
],
)
def test_get_detail_no_perms(self):
# public user w/o permissions
resp = self._get_detail(2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data["detail"], MESSAGES["NOT_FOUND"])
# user w/o permissions
resp = self._get_detail(2, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data["detail"], MESSAGES["NOT_FOUND"])
def test_patch(self):
data = {"name": "New data"}
resp = self._patch(1, data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
d = Data.objects.get(pk=1)
self.assertEqual(d.name, "New data")
def test_patch_no_perms(self):
data = {"name": "New data"}
resp = self._patch(2, data, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
d = Data.objects.get(pk=2)
self.assertEqual(d.name, "Test data 2")
def test_patch_public_user(self):
data = {"name": "New data"}
resp = self._patch(2, data)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
d = Data.objects.get(pk=2)
self.assertEqual(d.name, "Test data 2")
def test_patch_protected(self):
date_now = now()
# `created`
resp = self._patch(1, {"created": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.created.isoformat(), self.data1.created.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `modified`
resp = self._patch(1, {"modified": date_now - timedelta(days=180)}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `started`
resp = self._patch(1, {"started": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.started.isoformat(), self.data1.started.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `finished`
resp = self._patch(1, {"finished": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.finished.isoformat(), self.data1.finished.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `checksum`
resp = self._patch(1, {"checksum": "fake"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(
d.checksum,
"05bc76611c382a88817389019679f35cdb32ac65fe6662210805b588c30f71e6",
)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `status`
resp = self._patch(1, {"status": "DE"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.status, "OK")
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_progress`
resp = self._patch(1, {"process_progress": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_progress, 0)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_rc`
resp = self._patch(1, {"process_rc": 18}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_rc, None)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_info`
resp = self._patch(1, {"process_info": "Spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_info, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_warning`
resp = self._patch(1, {"process_warning": "More spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_warning, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_error`
resp = self._patch(1, {"process_error": "Even more spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_error, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `contributor`
resp = self._patch(1, {"contributor": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.contributor_id, 1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process`
resp = self._patch(1, {"process": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_id, 1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
def test_delete(self):
resp = self._delete(1, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
query = Data.objects.filter(pk=1).exists()
self.assertFalse(query)
def test_delete_no_perms(self):
resp = self._delete(2, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
query = Data.objects.filter(pk=2).exists()
self.assertTrue(query)
def test_delete_public_user(self):
resp = self._delete(2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
query = Data.objects.filter(pk=2).exists()
self.assertTrue(query)
| apache-2.0 |
yati-sagade/RyDyrect | django/views/i18n.py | 188 | 9673 | import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_unicode
from django.utils.formats import get_format_modules, get_format
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
src = []
for k, v in result.items():
if isinstance(v, (basestring, int)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_unicode(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_unicode(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, basestring):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, basestring):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
csrc.sort()
for k, v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src.append(LibFormatHead)
src.append(get_formats())
src.append(LibFormatFoot)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript')
| bsd-3-clause |
mzp/iTerm2 | tests/esctest/tests/ech.py | 31 | 2426 | from esc import NUL, blank
import escargs
import esccmd
import escio
from esctypes import Point, Rect
from escutil import AssertEQ, AssertScreenCharsInRectEqual, GetCursorPosition, GetScreenSize, knownBug
class ECHTests(object):
def test_ECH_DefaultParam(self):
"""Should erase the character under the cursor."""
escio.Write("abc")
esccmd.CUP(Point(1, 1))
esccmd.ECH()
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1), [ blank() + "bc" ]);
def test_ECH_ExplicitParam(self):
"""Should erase N characters starting at the cursor."""
escio.Write("abc")
esccmd.CUP(Point(1, 1))
esccmd.ECH(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1), [ blank() * 2 + "c" ]);
def test_ECH_IgnoresScrollRegion(self):
"""ECH ignores the scroll region when the cursor is inside it"""
escio.Write("abcdefg")
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(2, 4)
esccmd.CUP(Point(3, 1))
esccmd.ECH(4)
esccmd.DECRESET(esccmd.DECLRMM)
AssertScreenCharsInRectEqual(Rect(1, 1, 7, 1), [ "ab" + blank() * 4 + "g" ]);
def test_ECH_OutsideScrollRegion(self):
"""ECH ignores the scroll region when the cursor is outside it"""
escio.Write("abcdefg")
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(2, 4)
esccmd.CUP(Point(1, 1))
esccmd.ECH(4)
esccmd.DECRESET(esccmd.DECLRMM)
AssertScreenCharsInRectEqual(Rect(1, 1, 7, 1), [ blank() * 4 + "efg" ]);
@knownBug(terminal="xterm",
reason="ECH respects DEC protection, which is questionable at best given the description of DECSCA 'The selective erase control functions (DECSED and DECSEL) can only erase characters defined as erasable'.")
def test_ECH_doesNotRespectDECPRotection(self):
"""ECH should not respect DECSCA."""
escio.Write("a")
escio.Write("b")
esccmd.DECSCA(1)
escio.Write("c")
esccmd.DECSCA(0)
esccmd.CUP(Point(1, 1))
esccmd.ECH(3)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1),
[ blank() * 3 ])
@knownBug(terminal="iTerm2",
reason="Protection not implemented.")
def test_ECH_respectsISOProtection(self):
"""ECH respects SPA/EPA."""
escio.Write("a")
escio.Write("b")
esccmd.SPA()
escio.Write("c")
esccmd.EPA()
esccmd.CUP(Point(1, 1))
esccmd.ECH(3)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1),
[ blank() * 2 + "c" ])
| gpl-2.0 |
ningchi/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
mccheung/kbengine | kbe/res/scripts/common/Lib/test/test_userlist.py | 116 | 1896 | # Check every path through every method of UserList
from collections import UserList
from test import support, list_tests
class UserListTest(list_tests.CommonTest):
type2test = UserList
def test_getslice(self):
super().test_getslice()
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
for i in range(-3, 6):
self.assertEqual(u[:i], l[:i])
self.assertEqual(u[i:], l[i:])
for j in range(-3, 6):
self.assertEqual(u[i:j], l[i:j])
def test_add_specials(self):
u = UserList("spam")
u2 = u + "eggs"
self.assertEqual(u2, list("spameggs"))
def test_radd_specials(self):
u = UserList("eggs")
u2 = "spam" + u
self.assertEqual(u2, list("spameggs"))
u2 = u.__radd__(UserList("spam"))
self.assertEqual(u2, list("spameggs"))
def test_iadd(self):
super().test_iadd()
u = [0, 1]
u += UserList([0, 1])
self.assertEqual(u, [0, 1, 0, 1])
def test_mixedcmp(self):
u = self.type2test([0, 1])
self.assertEqual(u, [0, 1])
self.assertNotEqual(u, [0])
self.assertNotEqual(u, [0, 2])
def test_mixedadd(self):
u = self.type2test([0, 1])
self.assertEqual(u + [], u)
self.assertEqual(u + [2], [0, 1, 2])
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides *are* recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(next(iter(T((1,2)))), "0!!!")
def test_userlist_copy(self):
u = self.type2test([6, 8, 1, 9, 1])
v = u.copy()
self.assertEqual(u, v)
self.assertEqual(type(u), type(v))
def test_main():
support.run_unittest(UserListTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
lesina/Hack70 | env/lib/python3.5/site-packages/pip/_vendor/lockfile/sqlitelockfile.py | 536 | 5506 | from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
| gpl-3.0 |
looker/sentry | src/sentry/south_migrations/0250_auto__add_unique_userreport_project_event_id.py | 2 | 74893 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
if not db.dry_run:
UserReport = orm['sentry.UserReport']
# Clean up any potential duplicates and preserve the most recent one
# before adding in the unique constraint. This isn't the most efficient
# way to do this, but the volume of data is very low, so it's negligable
seen = set()
dupe_ids = UserReport.objects.values_list('event_id').annotate(
models.Count('id'),
).values_list('event_id').filter(
id__count__gt=1
)
for report in UserReport.objects.filter(event_id__in=dupe_ids).order_by('-date_added'):
event_id = report.event_id
if event_id in seen:
report.delete()
else:
seen.add(event_id)
# Adding unique constraint on 'UserReport', fields ['project', 'event_id']
db.create_unique('sentry_userreport', ['project_id', 'event_id'])
def backwards(self, orm):
# Removing unique constraint on 'UserReport', fields ['project', 'event_id']
db.delete_unique('sentry_userreport', ['project_id', 'event_id'])
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 5, 16, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together': "(('event_id', 'key_id', 'value_id'),)",
'object_name': 'EventTag',
'index_together': "(('project_id', 'key_id', 'value_id'),)"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color':
('django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
| bsd-3-clause |
mmpagani/oq-hazardlib | openquake/hazardlib/mfd/truncated_gr.py | 2 | 9984 | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.truncated_gr` defines a Truncated
Gutenberg-Richter MFD.
"""
import math
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.hazardlib.slots import with_slots
@with_slots
class TruncatedGRMFD(BaseMFD):
"""
Truncated Gutenberg-Richter MFD is defined in a functional form.
The annual occurrence rate for a specific bin (magnitude band)
is defined as ::
rate = 10 ** (a_val - b_val * mag_lo) - 10 ** (a_val - b_val * mag_hi)
where
* ``a_val`` is the cumulative ``a`` value (``10 ** a`` is the number
of earthquakes per year with magnitude greater than or equal to 0),
* ``b_val`` is Gutenberg-Richter ``b`` value -- the decay rate
of exponential distribution. It describes the relative size distribution
of earthquakes: a higher ``b`` value indicates a relatively larger
proportion of small events and vice versa.
* ``mag_lo`` and ``mag_hi`` are lower and upper magnitudes of a specific
bin respectively.
:param min_mag:
The lowest possible magnitude for this MFD. The first bin in the
:meth:`result histogram <get_annual_occurrence_rates>` will be aligned
to make its left border match this value.
:param max_mag:
The highest possible magnitude. The same as for ``min_mag``: the last
bin in the histogram will correspond to the magnitude value equal to
``max_mag - bin_width / 2``.
:param bin_width:
A positive float value -- the width of a single histogram bin.
Values for ``min_mag`` and ``max_mag`` don't have to be aligned with
respect to ``bin_width``. They get rounded accordingly anyway so that
both are divisible by ``bin_width`` just before converting a function
to a histogram. See :meth:`_get_min_mag_and_num_bins`.
"""
MODIFICATIONS = set(('increment_max_mag',
'set_max_mag',
'increment_b',
'set_ab'))
__slots__ = 'min_mag max_mag bin_width a_val b_val'.split()
def __init__(self, min_mag, max_mag, bin_width, a_val, b_val):
self.min_mag = min_mag
self.max_mag = max_mag
self.bin_width = bin_width
self.a_val = a_val
self.b_val = b_val
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is greater than 0.
* Minimum magnitude is positive.
* Maximum magnitude is greater than minimum magnitude
by at least one bin width (or equal to that value).
* ``b`` value is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
if not self.max_mag >= self.min_mag + self.bin_width:
raise ValueError('maximum magnitude must be higher than minimum '
'magnitude by bin width at least')
if not 0 < self.b_val:
raise ValueError('b value must be non-negative')
def _get_rate(self, mag):
"""
Calculate and return an annual occurrence rate for a specific bin.
:param mag:
Magnitude value corresponding to the center of the bin of interest.
:returns:
Float number, the annual occurrence rate calculated using formula
described in :class:`TruncatedGRMFD`.
"""
mag_lo = mag - self.bin_width / 2.0
mag_hi = mag + self.bin_width / 2.0
return (10 ** (self.a_val - self.b_val * mag_lo)
- 10 ** (self.a_val - self.b_val * mag_hi))
def _get_min_mag_and_num_bins(self):
"""
Estimate the number of bins in the histogram and return it
along with the first bin center abscissa (magnitude) value.
Rounds ``min_mag`` and ``max_mag`` with respect to ``bin_width``
to make the distance between them include integer number of bins.
:returns:
A tuple of two items: first bin center and total number of bins.
"""
min_mag = round(self.min_mag / self.bin_width) * self.bin_width
max_mag = round(self.max_mag / self.bin_width) * self.bin_width
if min_mag != max_mag:
min_mag += self.bin_width / 2.0
max_mag -= self.bin_width / 2.0
# here we use math round on the result of division and not just
# cast it to integer because for some magnitude values that can't
# be represented as an IEEE 754 double precisely the result can
# look like 7.999999999999 which would become 7 instead of 8
# being naively casted to int so we would lose the last bin.
num_bins = int(round((max_mag - min_mag) / self.bin_width)) + 1
return min_mag, num_bins
def get_min_max_mag(self):
"""
Return the minum magnitude
"""
min_mag, num_bins = self._get_min_mag_and_num_bins()
return min_mag, min_mag + self.bin_width * (num_bins - 1)
def get_annual_occurrence_rates(self):
"""
Calculate and return the annual occurrence rates histogram.
The result histogram has only one bin if minimum and maximum magnitude
values appear equal after rounding.
:returns:
See :meth:
`openquake.hazardlib.mfd.base.BaseMFD.get_annual_occurrence_rates`.
"""
mag, num_bins = self._get_min_mag_and_num_bins()
rates = []
for i in xrange(num_bins):
rate = self._get_rate(mag)
rates.append((mag, rate))
mag += self.bin_width
return rates
def _get_total_moment_rate(self):
"""
Calculate total moment rate (total energy released per unit time) ::
TMR = ((10**ai) / bi) * (10 ** (bi*max_mag) - 10 ** (bi*min_mag))
where ``ai = a + log10(b) + 9.05`` and ``bi = 1.5 - b``.
In case of ``bi == 0`` the following formula is applied::
TMR = (10 ** ai) * (max_mag - min_mag)
:returns:
Float, calculated TMR value in ``N * m / year``
(Newton-meter per year).
"""
ai = 9.05 + self.a_val + math.log10(self.b_val)
bi = 1.5 - self.b_val
if bi == 0.0:
return (10 ** ai) * (self.max_mag - self.min_mag)
else:
return (((10 ** ai) / bi) *
(10 ** (bi * self.max_mag) - 10 ** (bi * self.min_mag)))
def _set_a(self, tmr):
"""
Recalculate an ``a`` value preserving a total moment rate ``tmr`` ::
a = (log10((tmr * bi) / (10 ** (bi*max_mag) - 10 ** (bi*min_mag)))
- 9.05 - log10(b))
where ``bi = 1.5 - b``. If ``bi == 0`` the following formula is used:
a = log10(tmr / (max_mag - min_mag)) - 9.05 - log10(b)
"""
bi = 1.5 - self.b_val
if bi == 0.0:
self.a_val = (math.log10(tmr / (self.max_mag - self.min_mag))
- 9.05
- math.log10(self.b_val))
else:
self.a_val = (math.log10(tmr * bi / (10 ** (bi * self.max_mag)
- 10 ** (bi * self.min_mag)))
- 9.05
- math.log10(self.b_val))
def modify_increment_max_mag(self, value):
"""
Apply relative maximum magnitude modification.
:param value:
A float value to add to ``max_mag``.
The Gutenberg-Richter ``a`` value is :meth:`recalculated <_set_a>`
with respect to old :meth:`total moment rate <_get_total_moment_rate>`.
"""
tmr = self._get_total_moment_rate()
self.max_mag += value
# need to check constraints here because _set_a() would die
# if new max_mag <= min_mag.
self.check_constraints()
self._set_a(tmr)
def modify_set_max_mag(self, value):
"""
Apply absolute maximum magnitude modification.
:param value:
A float value to assign to ``max_mag``.
No specific recalculation of other Gutenberg-Richter parameters
is done after assigning a new value to ``max_mag``.
"""
self.max_mag = value
def modify_increment_b(self, value):
"""
Apply relative ``b``-value modification.
:param value:
A float value to add to ``b_val``.
After changing ``b_val`` the ``a_val`` is recalculated the same
way as for :meth:`modify_increment_max_mag` (with
respect to TMR).
"""
tmr = self._get_total_moment_rate()
self.b_val += value
self.check_constraints()
self._set_a(tmr)
def modify_set_ab(self, a_val, b_val):
"""
Apply absolute ``a`` and ``b`` values modification.
:param a_val:
A float value to use as a new ``a_val``.
:param b_val:
A float value to use as a new ``b_val``.
No recalculation of other Gutenberg-Richter parameters is done.
"""
self.b_val = b_val
self.a_val = a_val
| agpl-3.0 |
fluentstream/asterisk-p2p | res/pjproject/tests/pjsua/scripts-sendto/252_multipart_ok_clutter.py | 3 | 1121 | # $Id: 252_multipart_ok_clutter.py 369517 2012-07-01 17:28:57Z file $
import inc_sip as sip
import inc_sdp as sdp
body = \
"""
This is the preamble. It is to be ignored, though it
is a handy place for composition agents to include an
explanatory note to non-MIME conformant readers.
--123:45
Content-Type: text/plain
The first part is definitely not SDP
--123:45
This is implicitly typed plain US-ASCII text.
It does NOT end with a linebreak.
--123:45
Content-Type: application/sdp
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
--123:45--
This is the epilogue. It is also to be ignored.
"""
args = "--null-audio --auto-answer 200 --max-calls 1"
extra_headers = "Content-Type: multipart/mixed; boundary=\"123:45\""
include = ["v=0", "m=audio"]
exclude = []
sendto_cfg = sip.SendtoCfg( "Valid but cluttered multipart/mixed body containing SDP",
pjsua_args=args, sdp="", resp_code=200,
extra_headers=extra_headers, body=body,
resp_inc=include, resp_exc=exclude)
| gpl-2.0 |
jspargo/AneMo | django/lib/python2.7/site-packages/django/contrib/gis/db/models/lookups.py | 48 | 1423 | from django.db.models.lookups import Lookup
from django.db.models.sql.expressions import SQLEvaluator
class GISLookup(Lookup):
def as_sql(self, qn, connection):
from django.contrib.gis.db.models.sql import GeoWhereNode
# We use the same approach as was used by GeoWhereNode. It would
# be a good idea to upgrade GIS to use similar code that is used
# for other lookups.
if isinstance(self.rhs, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(self.rhs.opts, self.rhs.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
db_type = self.lhs.output_field.db_type(connection=connection)
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, self.rhs, connection=connection)
lhs_sql, lhs_params = self.process_lhs(qn, connection)
# lhs_params not currently supported.
assert not lhs_params
data = (lhs_sql, db_type)
spatial_sql, spatial_params = connection.ops.spatial_lookup_sql(
data, self.lookup_name, self.rhs, self.lhs.output_field, qn)
return spatial_sql, spatial_params + params
| gpl-2.0 |
chamikaramj/incubator-beam | sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_client.py | 15 | 28548 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated client library for dataflow version v1b3."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from apache_beam.runners.dataflow.internal.clients.dataflow import dataflow_v1b3_messages as messages
class DataflowV1b3(base_api.BaseApiClient):
"""Generated client library for service dataflow version v1b3."""
MESSAGES_MODULE = messages
BASE_URL = u'https://dataflow.googleapis.com/'
_PACKAGE = u'dataflow'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/userinfo.email']
_VERSION = u'v1b3'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'DataflowV1b3'
_URL_VERSION = u'v1b3'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new dataflow handle."""
url = url or self.BASE_URL
super(DataflowV1b3, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.projects_jobs_debug = self.ProjectsJobsDebugService(self)
self.projects_jobs_messages = self.ProjectsJobsMessagesService(self)
self.projects_jobs_workItems = self.ProjectsJobsWorkItemsService(self)
self.projects_jobs = self.ProjectsJobsService(self)
self.projects_locations_jobs_messages = self.ProjectsLocationsJobsMessagesService(self)
self.projects_locations_jobs_workItems = self.ProjectsLocationsJobsWorkItemsService(self)
self.projects_locations_jobs = self.ProjectsLocationsJobsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects_templates = self.ProjectsTemplatesService(self)
self.projects = self.ProjectsService(self)
class ProjectsJobsDebugService(base_api.BaseApiService):
"""Service class for the projects_jobs_debug resource."""
_NAME = u'projects_jobs_debug'
def __init__(self, client):
super(DataflowV1b3.ProjectsJobsDebugService, self).__init__(client)
self._method_configs = {
'GetConfig': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.jobs.debug.getConfig',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}/debug/getConfig',
request_field=u'getDebugConfigRequest',
request_type_name=u'DataflowProjectsJobsDebugGetConfigRequest',
response_type_name=u'GetDebugConfigResponse',
supports_download=False,
),
'SendCapture': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.jobs.debug.sendCapture',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}/debug/sendCapture',
request_field=u'sendDebugCaptureRequest',
request_type_name=u'DataflowProjectsJobsDebugSendCaptureRequest',
response_type_name=u'SendDebugCaptureResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def GetConfig(self, request, global_params=None):
"""Get encoded debug configuration for component. Not cacheable.
Args:
request: (DataflowProjectsJobsDebugGetConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetDebugConfigResponse) The response message.
"""
config = self.GetMethodConfig('GetConfig')
return self._RunMethod(
config, request, global_params=global_params)
def SendCapture(self, request, global_params=None):
"""Send encoded debug capture data for component.
Args:
request: (DataflowProjectsJobsDebugSendCaptureRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SendDebugCaptureResponse) The response message.
"""
config = self.GetMethodConfig('SendCapture')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsJobsMessagesService(base_api.BaseApiService):
"""Service class for the projects_jobs_messages resource."""
_NAME = u'projects_jobs_messages'
def __init__(self, client):
super(DataflowV1b3.ProjectsJobsMessagesService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.jobs.messages.list',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[u'endTime', u'location', u'minimumImportance', u'pageSize', u'pageToken', u'startTime'],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}/messages',
request_field='',
request_type_name=u'DataflowProjectsJobsMessagesListRequest',
response_type_name=u'ListJobMessagesResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Request the job status.
Args:
request: (DataflowProjectsJobsMessagesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListJobMessagesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsJobsWorkItemsService(base_api.BaseApiService):
"""Service class for the projects_jobs_workItems resource."""
_NAME = u'projects_jobs_workItems'
def __init__(self, client):
super(DataflowV1b3.ProjectsJobsWorkItemsService, self).__init__(client)
self._method_configs = {
'Lease': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.jobs.workItems.lease',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}/workItems:lease',
request_field=u'leaseWorkItemRequest',
request_type_name=u'DataflowProjectsJobsWorkItemsLeaseRequest',
response_type_name=u'LeaseWorkItemResponse',
supports_download=False,
),
'ReportStatus': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.jobs.workItems.reportStatus',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}/workItems:reportStatus',
request_field=u'reportWorkItemStatusRequest',
request_type_name=u'DataflowProjectsJobsWorkItemsReportStatusRequest',
response_type_name=u'ReportWorkItemStatusResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Lease(self, request, global_params=None):
"""Leases a dataflow WorkItem to run.
Args:
request: (DataflowProjectsJobsWorkItemsLeaseRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LeaseWorkItemResponse) The response message.
"""
config = self.GetMethodConfig('Lease')
return self._RunMethod(
config, request, global_params=global_params)
def ReportStatus(self, request, global_params=None):
"""Reports the status of dataflow WorkItems leased by a worker.
Args:
request: (DataflowProjectsJobsWorkItemsReportStatusRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ReportWorkItemStatusResponse) The response message.
"""
config = self.GetMethodConfig('ReportStatus')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsJobsService(base_api.BaseApiService):
"""Service class for the projects_jobs resource."""
_NAME = u'projects_jobs'
def __init__(self, client):
super(DataflowV1b3.ProjectsJobsService, self).__init__(client)
self._method_configs = {
'Create': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.jobs.create',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'location', u'replaceJobId', u'view'],
relative_path=u'v1b3/projects/{projectId}/jobs',
request_field=u'job',
request_type_name=u'DataflowProjectsJobsCreateRequest',
response_type_name=u'Job',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.jobs.get',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[u'location', u'view'],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}',
request_field='',
request_type_name=u'DataflowProjectsJobsGetRequest',
response_type_name=u'Job',
supports_download=False,
),
'GetMetrics': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.jobs.getMetrics',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[u'location', u'startTime'],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}/metrics',
request_field='',
request_type_name=u'DataflowProjectsJobsGetMetricsRequest',
response_type_name=u'JobMetrics',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.jobs.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'filter', u'location', u'pageSize', u'pageToken', u'view'],
relative_path=u'v1b3/projects/{projectId}/jobs',
request_field='',
request_type_name=u'DataflowProjectsJobsListRequest',
response_type_name=u'ListJobsResponse',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'dataflow.projects.jobs.update',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[u'location'],
relative_path=u'v1b3/projects/{projectId}/jobs/{jobId}',
request_field=u'job',
request_type_name=u'DataflowProjectsJobsUpdateRequest',
response_type_name=u'Job',
supports_download=False,
),
}
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a Cloud Dataflow job.
Args:
request: (DataflowProjectsJobsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Gets the state of the specified Cloud Dataflow job.
Args:
request: (DataflowProjectsJobsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def GetMetrics(self, request, global_params=None):
"""Request the job status.
Args:
request: (DataflowProjectsJobsGetMetricsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobMetrics) The response message.
"""
config = self.GetMethodConfig('GetMetrics')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""List the jobs of a project.
Args:
request: (DataflowProjectsJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListJobsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates the state of an existing Cloud Dataflow job.
Args:
request: (DataflowProjectsJobsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsLocationsJobsMessagesService(base_api.BaseApiService):
"""Service class for the projects_locations_jobs_messages resource."""
_NAME = u'projects_locations_jobs_messages'
def __init__(self, client):
super(DataflowV1b3.ProjectsLocationsJobsMessagesService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.locations.jobs.messages.list',
ordered_params=[u'projectId', u'location', u'jobId'],
path_params=[u'jobId', u'location', u'projectId'],
query_params=[u'endTime', u'minimumImportance', u'pageSize', u'pageToken', u'startTime'],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/messages',
request_field='',
request_type_name=u'DataflowProjectsLocationsJobsMessagesListRequest',
response_type_name=u'ListJobMessagesResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Request the job status.
Args:
request: (DataflowProjectsLocationsJobsMessagesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListJobMessagesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsLocationsJobsWorkItemsService(base_api.BaseApiService):
"""Service class for the projects_locations_jobs_workItems resource."""
_NAME = u'projects_locations_jobs_workItems'
def __init__(self, client):
super(DataflowV1b3.ProjectsLocationsJobsWorkItemsService, self).__init__(client)
self._method_configs = {
'Lease': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.locations.jobs.workItems.lease',
ordered_params=[u'projectId', u'location', u'jobId'],
path_params=[u'jobId', u'location', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/workItems:lease',
request_field=u'leaseWorkItemRequest',
request_type_name=u'DataflowProjectsLocationsJobsWorkItemsLeaseRequest',
response_type_name=u'LeaseWorkItemResponse',
supports_download=False,
),
'ReportStatus': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.locations.jobs.workItems.reportStatus',
ordered_params=[u'projectId', u'location', u'jobId'],
path_params=[u'jobId', u'location', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/workItems:reportStatus',
request_field=u'reportWorkItemStatusRequest',
request_type_name=u'DataflowProjectsLocationsJobsWorkItemsReportStatusRequest',
response_type_name=u'ReportWorkItemStatusResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Lease(self, request, global_params=None):
"""Leases a dataflow WorkItem to run.
Args:
request: (DataflowProjectsLocationsJobsWorkItemsLeaseRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LeaseWorkItemResponse) The response message.
"""
config = self.GetMethodConfig('Lease')
return self._RunMethod(
config, request, global_params=global_params)
def ReportStatus(self, request, global_params=None):
"""Reports the status of dataflow WorkItems leased by a worker.
Args:
request: (DataflowProjectsLocationsJobsWorkItemsReportStatusRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ReportWorkItemStatusResponse) The response message.
"""
config = self.GetMethodConfig('ReportStatus')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsLocationsJobsService(base_api.BaseApiService):
"""Service class for the projects_locations_jobs resource."""
_NAME = u'projects_locations_jobs'
def __init__(self, client):
super(DataflowV1b3.ProjectsLocationsJobsService, self).__init__(client)
self._method_configs = {
'Create': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.locations.jobs.create',
ordered_params=[u'projectId', u'location'],
path_params=[u'location', u'projectId'],
query_params=[u'replaceJobId', u'view'],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs',
request_field=u'job',
request_type_name=u'DataflowProjectsLocationsJobsCreateRequest',
response_type_name=u'Job',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.locations.jobs.get',
ordered_params=[u'projectId', u'location', u'jobId'],
path_params=[u'jobId', u'location', u'projectId'],
query_params=[u'view'],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}',
request_field='',
request_type_name=u'DataflowProjectsLocationsJobsGetRequest',
response_type_name=u'Job',
supports_download=False,
),
'GetMetrics': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.locations.jobs.getMetrics',
ordered_params=[u'projectId', u'location', u'jobId'],
path_params=[u'jobId', u'location', u'projectId'],
query_params=[u'startTime'],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}/metrics',
request_field='',
request_type_name=u'DataflowProjectsLocationsJobsGetMetricsRequest',
response_type_name=u'JobMetrics',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dataflow.projects.locations.jobs.list',
ordered_params=[u'projectId', u'location'],
path_params=[u'location', u'projectId'],
query_params=[u'filter', u'pageSize', u'pageToken', u'view'],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs',
request_field='',
request_type_name=u'DataflowProjectsLocationsJobsListRequest',
response_type_name=u'ListJobsResponse',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'dataflow.projects.locations.jobs.update',
ordered_params=[u'projectId', u'location', u'jobId'],
path_params=[u'jobId', u'location', u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}',
request_field=u'job',
request_type_name=u'DataflowProjectsLocationsJobsUpdateRequest',
response_type_name=u'Job',
supports_download=False,
),
}
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a Cloud Dataflow job.
Args:
request: (DataflowProjectsLocationsJobsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Gets the state of the specified Cloud Dataflow job.
Args:
request: (DataflowProjectsLocationsJobsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def GetMetrics(self, request, global_params=None):
"""Request the job status.
Args:
request: (DataflowProjectsLocationsJobsGetMetricsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobMetrics) The response message.
"""
config = self.GetMethodConfig('GetMetrics')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""List the jobs of a project.
Args:
request: (DataflowProjectsLocationsJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListJobsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates the state of an existing Cloud Dataflow job.
Args:
request: (DataflowProjectsLocationsJobsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = u'projects_locations'
def __init__(self, client):
super(DataflowV1b3.ProjectsLocationsService, self).__init__(client)
self._method_configs = {
}
self._upload_configs = {
}
class ProjectsTemplatesService(base_api.BaseApiService):
"""Service class for the projects_templates resource."""
_NAME = u'projects_templates'
def __init__(self, client):
super(DataflowV1b3.ProjectsTemplatesService, self).__init__(client)
self._method_configs = {
'Create': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.templates.create',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/templates',
request_field=u'createJobFromTemplateRequest',
request_type_name=u'DataflowProjectsTemplatesCreateRequest',
response_type_name=u'Job',
supports_download=False,
),
}
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a Cloud Dataflow job from a template.
Args:
request: (DataflowProjectsTemplatesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(DataflowV1b3.ProjectsService, self).__init__(client)
self._method_configs = {
'WorkerMessages': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dataflow.projects.workerMessages',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1b3/projects/{projectId}/WorkerMessages',
request_field=u'sendWorkerMessagesRequest',
request_type_name=u'DataflowProjectsWorkerMessagesRequest',
response_type_name=u'SendWorkerMessagesResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def WorkerMessages(self, request, global_params=None):
"""Send a worker_message to the service.
Args:
request: (DataflowProjectsWorkerMessagesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SendWorkerMessagesResponse) The response message.
"""
config = self.GetMethodConfig('WorkerMessages')
return self._RunMethod(
config, request, global_params=global_params)
| apache-2.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py | 1730 | 2746 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == "head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == "meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace, name), value in token["data"].items():
if namespace is not None:
continue
elif name.lower() == 'charset':
token["data"][(namespace, name)] = self.encoding
meta_found = True
break
elif name == 'http-equiv' and value.lower() == 'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, "content") in token["data"]:
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == "head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": "head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
yield {"type": "EndTag", "name": "head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == "head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
| gpl-2.0 |
SebasSBM/django | tests/lookup/tests.py | 89 | 37440 | from __future__ import unicode_literals
import collections
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.Iterator)
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity)
self.assertQuerysetEqual(
(
Author.objects
.values_list('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, author_id, headline, "
"id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(
str(ex), "Unsupported lookup 'starts' for CharField "
"or join on the field not permitted.")
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'),
['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010])
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games = Game.objects.filter(season__year=2009)
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games = Game.objects.filter(season__year__in=[2011])
johnson = Player.objects.create(name="Johnson")
johnson.games = Game.objects.filter(season__year__in=[2011])
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertQuerysetEqual(
MyISAMArticle.objects.filter(headline__search='Reinhardt'),
[dr], lambda x: x)
| bsd-3-clause |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_site.py | 1 | 10065 | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.test_support import TestSkipped, run_unittest, TESTFN, EnvironmentVarGuard
import __builtin__
import os
import sys
import encodings
import subprocess
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise TestSkipped("importation of site.py suppressed")
if not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
The setting of the encoding (set using sys.setdefaultencoding) used by
the Unicode implementation is not tested.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path = self.sys_path
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.failUnlessEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.failUnlessEqual(abs_dir, norm_dir)
else:
self.failUnlessEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.failUnless(entry in dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.failUnless(pth_file.imported in sys.modules,
"%s not in sys.path" % pth_file.imported)
self.failUnless(site.makepath(pth_file.good_dir_path)[0] in sys.path)
self.failUnless(not os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_s_option(self):
usersite = site.USER_SITE
self.assert_(usersite in sys.path)
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite])
self.assertEqual(rc, 1)
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite])
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print>>FILE, "#import @bad module name"
print>>FILE, "\n"
print>>FILE, "import %s" % self.imported
print>>FILE, self.good_dirname
print>>FILE, self.bad_dirname
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path = self.sys_path
def test_abs__file__(self):
# Make sure all imported modules have their __file__ attribute
# as an absolute path.
# Handled by abs__file__()
site.abs__file__()
for module in (sys, os, __builtin__):
try:
self.assertTrue(os.path.isabs(module.__file__), repr(module))
except AttributeError:
continue
# We could try everything in sys.modules; however, when regrtest.py
# runs something like test_frozen before test_site, then we will
# be testing things loaded *after* test_site did path normalization
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.failUnless(path not in seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into __builtin__
self.failUnless(hasattr(__builtin__, "quit"))
self.failUnless(hasattr(__builtin__, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in __builtin__
self.failUnless(hasattr(__builtin__, "copyright"))
self.failUnless(hasattr(__builtin__, "credits"))
def test_setting_help(self):
# 'help' should be set in __builtin__
self.failUnless(hasattr(__builtin__, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.itervalues():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_setdefaultencoding_removed(self):
# Make sure sys.setdefaultencoding is gone
self.failUnless(not hasattr(sys, "setdefaultencoding"))
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| mit |
disqus/graphite-web | docs/conf.py | 27 | 7754 | # -*- coding: utf-8 -*-
#
# Graphite documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 21 12:31:35 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../webapp'))
sys.path.append(os.path.abspath('../whisper'))
sys.path.append(os.path.abspath('../carbon'))
os.environ['DJANGO_SETTINGS_MODULE'] = "graphite.settings"
# Prevent graphite logger from complaining about missing log dir.
from graphite import settings
settings.LOG_DIR = os.path.abspath('.')
# Bring in the new ReadTheDocs sphinx theme
import sphinx_rtd_theme
# Define a custom autodoc documenter for the render.functions module
# This will remove the requestContext parameter which doesnt make sense in the context of the docs
import re
from sphinx.ext import autodoc
class RenderFunctionDocumenter(autodoc.FunctionDocumenter):
priority = 10 # Override FunctionDocumenter
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return autodoc.FunctionDocumenter.can_document_member(member, membername, isattr, parent) and \
parent.name == 'graphite.render.functions'
def format_args(self):
args = autodoc.FunctionDocumenter.format_args(self)
if args is not None:
# Really, a regex sub here is by far the easiest way
return re.sub('requestContext, ','',args)
def setup(app):
app.add_autodocumenter(RenderFunctionDocumenter)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Mapping for external links such as Python standard lib
intersphinx_mapping = {
'python': ('http://docs.python.org/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Graphite'
copyright = u'2008-2012, Chris Davis; 2011-2015 The Graphite Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.10.0'
# The full version, including alpha/beta/rc tags.
release = '0.10.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Graphitedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Graphite.tex', u'Graphite Documentation',
u'Chris Davis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 |
styxit/CouchPotatoServer | libs/subliminal/services/subswiki.py | 105 | 5182 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords, split_keyword
from ..videos import Episode, Movie
from bs4 import BeautifulSoup
import logging
import urllib
logger = logging.getLogger(__name__)
class SubsWiki(ServiceBase):
server_url = 'http://www.subswiki.com'
api_based = False
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
u'English (UK)': Language('eng-GB')}
language_code = 'name'
videos = [Episode, Movie]
require_video = False
required_features = ['permissive']
def list_checked(self, video, languages):
results = []
if isinstance(video, Episode):
results = self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
elif isinstance(video, Movie) and video.year:
results = self.query(video.path or video.release, languages, get_keywords(video.guess), movie=video.title, year=video.year)
return results
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None, movie=None, year=None):
if series and season and episode:
request_series = series.lower().replace(' ', '_')
if isinstance(request_series, unicode):
request_series = request_series.encode('utf-8')
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
r = self.session.get('%s/serie/%s/%s/%s/' % (self.server_url, urllib.quote(request_series), season, episode))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
return []
elif movie and year:
request_movie = movie.title().replace(' ', '_')
if isinstance(request_movie, unicode):
request_movie = request_movie.encode('utf-8')
logger.debug(u'Getting subtitles for %s (%d) with languages %r' % (movie, year, languages))
r = self.session.get('%s/film/%s_(%d)' % (self.server_url, urllib.quote(request_movie), year))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s (%d) with languages %r' % (movie, year, languages))
return []
else:
raise ServiceError('One or more parameter missing')
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
soup = BeautifulSoup(r.content, self.required_features)
subtitles = []
for sub in soup('td', {'class': 'NewsTitle'}):
sub_keywords = split_keyword(sub.b.string.lower())
if not keywords & sub_keywords:
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
continue
for html_language in sub.parent.parent.find_all('td', {'class': 'language'}):
language = self.get_language(html_language.string.strip())
if language not in languages:
logger.debug(u'Language %r not in wanted languages %r' % (language, languages))
continue
html_status = html_language.find_next_sibling('td')
status = html_status.strong.string.strip()
if status != 'Completado':
logger.debug(u'Wrong subtitle status %s' % status)
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s%s' % (self.server_url, html_status.find_next('td').find('a')['href']))
subtitles.append(subtitle)
return subtitles
Service = SubsWiki
| gpl-3.0 |
ykim362/mxnet | example/ssd/symbol/legacy_vgg16_ssd_300.py | 41 | 10127 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from common import legacy_conv_act_layer
from common import multibox_layer
def get_symbol_train(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is a training network with losses
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name="label")
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), \
pooling_convention="full", name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
pool5 = mx.symbol.Pooling(
data=relu5_3, pool_type="max", kernel=(3, 3), stride=(1, 1),
pad=(1,1), name="pool5")
# group 6
conv6 = mx.symbol.Convolution(
data=pool5, kernel=(3, 3), pad=(6, 6), dilate=(6, 6),
num_filter=1024, name="conv6")
relu6 = mx.symbol.Activation(data=conv6, act_type="relu", name="relu6")
# drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
conv7 = mx.symbol.Convolution(
data=relu6, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="conv7")
relu7 = mx.symbol.Activation(data=conv7, act_type="relu", name="relu7")
# drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
### ssd extra layers ###
conv8_1, relu8_1 = legacy_conv_act_layer(relu7, "8_1", 256, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv8_2, relu8_2 = legacy_conv_act_layer(relu8_1, "8_2", 512, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv9_1, relu9_1 = legacy_conv_act_layer(relu8_2, "9_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv9_2, relu9_2 = legacy_conv_act_layer(relu9_1, "9_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv10_1, relu10_1 = legacy_conv_act_layer(relu9_2, "10_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv10_2, relu10_2 = legacy_conv_act_layer(relu10_1, "10_2", 256, kernel=(3,3), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_1, relu11_1 = legacy_conv_act_layer(relu10_2, "11_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_2, relu11_2 = legacy_conv_act_layer(relu11_1, "11_2", 256, kernel=(3,3), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
# specific parameters for VGG16 network
from_layers = [relu4_3, relu7, relu8_2, relu9_2, relu10_2, relu11_2]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1]
steps = [ x / 300.0 for x in [8, 16, 32, 64, 100, 300]]
num_channels = [512]
loc_preds, cls_preds, anchor_boxes = multibox_layer(from_layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_channels, clip=False, interm_layer=0, steps=steps)
tmp = mx.symbol.contrib.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
cls_preds = net.get_internals()["multibox_cls_pred_output"]
loc_preds = net.get_internals()["multibox_loc_pred_output"]
anchor_boxes = net.get_internals()["multibox_anchors_output"]
cls_prob = mx.symbol.SoftmaxActivation(data=cls_preds, mode='channel', \
name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out
| apache-2.0 |
blademainer/intellij-community | plugins/hg4idea/testData/bin/mercurial/store.py | 90 | 16865 | # store.py - repository store handling for Mercurial
#
# Copyright 2008 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import scmutil, util, parsers
import os, stat, errno
_sha = util.sha1
# This avoids a collision between a file named foo and a dir named
# foo.i or foo.d
def _encodedir(path):
'''
>>> _encodedir('data/foo.i')
'data/foo.i'
>>> _encodedir('data/foo.i/bla.i')
'data/foo.i.hg/bla.i'
>>> _encodedir('data/foo.i.hg/bla.i')
'data/foo.i.hg.hg/bla.i'
>>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
'''
return (path
.replace(".hg/", ".hg.hg/")
.replace(".i/", ".i.hg/")
.replace(".d/", ".d.hg/"))
encodedir = getattr(parsers, 'encodedir', _encodedir)
def decodedir(path):
'''
>>> decodedir('data/foo.i')
'data/foo.i'
>>> decodedir('data/foo.i.hg/bla.i')
'data/foo.i/bla.i'
>>> decodedir('data/foo.i.hg.hg/bla.i')
'data/foo.i.hg/bla.i'
'''
if ".hg/" not in path:
return path
return (path
.replace(".d.hg/", ".d/")
.replace(".i.hg/", ".i/")
.replace(".hg.hg/", ".hg/"))
def _buildencodefun():
'''
>>> enc, dec = _buildencodefun()
>>> enc('nothing/special.txt')
'nothing/special.txt'
>>> dec('nothing/special.txt')
'nothing/special.txt'
>>> enc('HELLO')
'_h_e_l_l_o'
>>> dec('_h_e_l_l_o')
'HELLO'
>>> enc('hello:world?')
'hello~3aworld~3f'
>>> dec('hello~3aworld~3f')
'hello:world?'
>>> enc('the\x07quick\xADshot')
'the~07quick~adshot'
>>> dec('the~07quick~adshot')
'the\\x07quick\\xadshot'
'''
e = '_'
winreserved = [ord(x) for x in '\\:*?"<>|']
cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
for x in (range(32) + range(126, 256) + winreserved):
cmap[chr(x)] = "~%02x" % x
for x in range(ord("A"), ord("Z") + 1) + [ord(e)]:
cmap[chr(x)] = e + chr(x).lower()
dmap = {}
for k, v in cmap.iteritems():
dmap[v] = k
def decode(s):
i = 0
while i < len(s):
for l in xrange(1, 4):
try:
yield dmap[s[i:i + l]]
i += l
break
except KeyError:
pass
else:
raise KeyError
return (lambda s: ''.join([cmap[c] for c in s]),
lambda s: ''.join(list(decode(s))))
_encodefname, _decodefname = _buildencodefun()
def encodefilename(s):
'''
>>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO')
'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
'''
return _encodefname(encodedir(s))
def decodefilename(s):
'''
>>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
'foo.i/bar.d/bla.hg/hi:world?/HELLO'
'''
return decodedir(_decodefname(s))
def _buildlowerencodefun():
'''
>>> f = _buildlowerencodefun()
>>> f('nothing/special.txt')
'nothing/special.txt'
>>> f('HELLO')
'hello'
>>> f('hello:world?')
'hello~3aworld~3f'
>>> f('the\x07quick\xADshot')
'the~07quick~adshot'
'''
winreserved = [ord(x) for x in '\\:*?"<>|']
cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
for x in (range(32) + range(126, 256) + winreserved):
cmap[chr(x)] = "~%02x" % x
for x in range(ord("A"), ord("Z") + 1):
cmap[chr(x)] = chr(x).lower()
return lambda s: "".join([cmap[c] for c in s])
lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
# Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
_winres3 = ('aux', 'con', 'prn', 'nul') # length 3
_winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
def _auxencode(path, dotencode):
'''
Encodes filenames containing names reserved by Windows or which end in
period or space. Does not touch other single reserved characters c.
Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
Additionally encodes space or period at the beginning, if dotencode is
True. Parameter path is assumed to be all lowercase.
A segment only needs encoding if a reserved name appears as a
basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
doesn't need encoding.
>>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.'
>>> _auxencode(s.split('/'), True)
['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
>>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
>>> _auxencode(s.split('/'), False)
['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
>>> _auxencode(['foo. '], True)
['foo.~20']
>>> _auxencode([' .foo'], True)
['~20.foo']
'''
for i, n in enumerate(path):
if not n:
continue
if dotencode and n[0] in '. ':
n = "~%02x" % ord(n[0]) + n[1:]
path[i] = n
else:
l = n.find('.')
if l == -1:
l = len(n)
if ((l == 3 and n[:3] in _winres3) or
(l == 4 and n[3] <= '9' and n[3] >= '1'
and n[:3] in _winres4)):
# encode third letter ('aux' -> 'au~78')
ec = "~%02x" % ord(n[2])
n = n[0:2] + ec + n[3:]
path[i] = n
if n[-1] in '. ':
# encode last period or space ('foo...' -> 'foo..~2e')
path[i] = n[:-1] + "~%02x" % ord(n[-1])
return path
_maxstorepathlen = 120
_dirprefixlen = 8
_maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
def _hashencode(path, dotencode):
digest = _sha(path).hexdigest()
le = lowerencode(path).split('/')[1:]
parts = _auxencode(le, dotencode)
basename = parts[-1]
_root, ext = os.path.splitext(basename)
sdirs = []
sdirslen = 0
for p in parts[:-1]:
d = p[:_dirprefixlen]
if d[-1] in '. ':
# Windows can't access dirs ending in period or space
d = d[:-1] + '_'
if sdirslen == 0:
t = len(d)
else:
t = sdirslen + 1 + len(d)
if t > _maxshortdirslen:
break
sdirs.append(d)
sdirslen = t
dirs = '/'.join(sdirs)
if len(dirs) > 0:
dirs += '/'
res = 'dh/' + dirs + digest + ext
spaceleft = _maxstorepathlen - len(res)
if spaceleft > 0:
filler = basename[:spaceleft]
res = 'dh/' + dirs + filler + digest + ext
return res
def _hybridencode(path, dotencode):
'''encodes path with a length limit
Encodes all paths that begin with 'data/', according to the following.
Default encoding (reversible):
Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
characters are encoded as '~xx', where xx is the two digit hex code
of the character (see encodefilename).
Relevant path components consisting of Windows reserved filenames are
masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
Hashed encoding (not reversible):
If the default-encoded path is longer than _maxstorepathlen, a
non-reversible hybrid hashing of the path is done instead.
This encoding uses up to _dirprefixlen characters of all directory
levels of the lowerencoded path, but not more levels than can fit into
_maxshortdirslen.
Then follows the filler followed by the sha digest of the full path.
The filler is the beginning of the basename of the lowerencoded path
(the basename is everything after the last path separator). The filler
is as long as possible, filling in characters from the basename until
the encoded path has _maxstorepathlen characters (or all chars of the
basename have been taken).
The extension (e.g. '.i' or '.d') is preserved.
The string 'data/' at the beginning is replaced with 'dh/', if the hashed
encoding was used.
'''
path = encodedir(path)
ef = _encodefname(path).split('/')
res = '/'.join(_auxencode(ef, dotencode))
if len(res) > _maxstorepathlen:
res = _hashencode(path, dotencode)
return res
def _pathencode(path):
de = encodedir(path)
if len(path) > _maxstorepathlen:
return _hashencode(de, True)
ef = _encodefname(de).split('/')
res = '/'.join(_auxencode(ef, True))
if len(res) > _maxstorepathlen:
return _hashencode(de, True)
return res
_pathencode = getattr(parsers, 'pathencode', _pathencode)
def _plainhybridencode(f):
return _hybridencode(f, False)
def _calcmode(vfs):
try:
# files in .hg/ will be created using this mode
mode = vfs.stat().st_mode
# avoid some useless chmods
if (0777 & ~util.umask) == (0777 & mode):
mode = None
except OSError:
mode = None
return mode
_data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
' phaseroots obsstore')
class basicstore(object):
'''base class for local repository stores'''
def __init__(self, path, vfstype):
vfs = vfstype(path)
self.path = vfs.base
self.createmode = _calcmode(vfs)
vfs.createmode = self.createmode
self.rawvfs = vfs
self.vfs = scmutil.filtervfs(vfs, encodedir)
self.opener = self.vfs
def join(self, f):
return self.path + '/' + encodedir(f)
def _walk(self, relpath, recurse):
'''yields (unencoded, encoded, size)'''
path = self.path
if relpath:
path += '/' + relpath
striplen = len(self.path) + 1
l = []
if self.rawvfs.isdir(path):
visit = [path]
readdir = self.rawvfs.readdir
while visit:
p = visit.pop()
for f, kind, st in readdir(p, stat=True):
fp = p + '/' + f
if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
n = util.pconvert(fp[striplen:])
l.append((decodedir(n), n, st.st_size))
elif kind == stat.S_IFDIR and recurse:
visit.append(fp)
l.sort()
return l
def datafiles(self):
return self._walk('data', True)
def walk(self):
'''yields (unencoded, encoded, size)'''
# yield data files first
for x in self.datafiles():
yield x
# yield manifest before changelog
for x in reversed(self._walk('', False)):
yield x
def copylist(self):
return ['requires'] + _data.split()
def write(self):
pass
def __contains__(self, path):
'''Checks if the store contains path'''
path = "/".join(("data", path))
# file?
if os.path.exists(self.join(path + ".i")):
return True
# dir?
if not path.endswith("/"):
path = path + "/"
return os.path.exists(self.join(path))
class encodedstore(basicstore):
def __init__(self, path, vfstype):
vfs = vfstype(path + '/store')
self.path = vfs.base
self.createmode = _calcmode(vfs)
vfs.createmode = self.createmode
self.rawvfs = vfs
self.vfs = scmutil.filtervfs(vfs, encodefilename)
self.opener = self.vfs
def datafiles(self):
for a, b, size in self._walk('data', True):
try:
a = decodefilename(a)
except KeyError:
a = None
yield a, b, size
def join(self, f):
return self.path + '/' + encodefilename(f)
def copylist(self):
return (['requires', '00changelog.i'] +
['store/' + f for f in _data.split()])
class fncache(object):
# the filename used to be partially encoded
# hence the encodedir/decodedir dance
def __init__(self, vfs):
self.vfs = vfs
self.entries = None
self._dirty = False
def _load(self):
'''fill the entries from the fncache file'''
self._dirty = False
try:
fp = self.vfs('fncache', mode='rb')
except IOError:
# skip nonexistent file
self.entries = set()
return
self.entries = set(decodedir(fp.read()).splitlines())
if '' in self.entries:
fp.seek(0)
for n, line in enumerate(fp):
if not line.rstrip('\n'):
t = _('invalid entry in fncache, line %s') % (n + 1)
raise util.Abort(t)
fp.close()
def _write(self, files, atomictemp):
fp = self.vfs('fncache', mode='wb', atomictemp=atomictemp)
if files:
fp.write(encodedir('\n'.join(files) + '\n'))
fp.close()
self._dirty = False
def rewrite(self, files):
self._write(files, False)
self.entries = set(files)
def write(self):
if self._dirty:
self._write(self.entries, True)
def add(self, fn):
if self.entries is None:
self._load()
if fn not in self.entries:
self._dirty = True
self.entries.add(fn)
def __contains__(self, fn):
if self.entries is None:
self._load()
return fn in self.entries
def __iter__(self):
if self.entries is None:
self._load()
return iter(self.entries)
class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs):
def __init__(self, vfs, fnc, encode):
scmutil.auditvfs.__init__(self, vfs)
self.fncache = fnc
self.encode = encode
def __call__(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rb') and path.startswith('data/'):
self.fncache.add(path)
return self.vfs(self.encode(path), mode, *args, **kw)
def join(self, path):
if path:
return self.vfs.join(self.encode(path))
else:
return self.vfs.join(path)
class fncachestore(basicstore):
def __init__(self, path, vfstype, dotencode):
if dotencode:
encode = _pathencode
else:
encode = _plainhybridencode
self.encode = encode
vfs = vfstype(path + '/store')
self.path = vfs.base
self.pathsep = self.path + '/'
self.createmode = _calcmode(vfs)
vfs.createmode = self.createmode
self.rawvfs = vfs
fnc = fncache(vfs)
self.fncache = fnc
self.vfs = _fncachevfs(vfs, fnc, encode)
self.opener = self.vfs
def join(self, f):
return self.pathsep + self.encode(f)
def getsize(self, path):
return self.rawvfs.stat(path).st_size
def datafiles(self):
rewrite = False
existing = []
for f in sorted(self.fncache):
ef = self.encode(f)
try:
yield f, ef, self.getsize(ef)
existing.append(f)
except OSError, err:
if err.errno != errno.ENOENT:
raise
# nonexistent entry
rewrite = True
if rewrite:
# rewrite fncache to remove nonexistent entries
# (may be caused by rollback / strip)
self.fncache.rewrite(existing)
def copylist(self):
d = ('data dh fncache phaseroots obsstore'
' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
return (['requires', '00changelog.i'] +
['store/' + f for f in d.split()])
def write(self):
self.fncache.write()
def _exists(self, f):
ef = self.encode(f)
try:
self.getsize(ef)
return True
except OSError, err:
if err.errno != errno.ENOENT:
raise
# nonexistent entry
return False
def __contains__(self, path):
'''Checks if the store contains path'''
path = "/".join(("data", path))
# check for files (exact match)
e = path + '.i'
if e in self.fncache and self._exists(e):
return True
# now check for directories (prefix match)
if not path.endswith('/'):
path += '/'
for e in self.fncache:
if e.startswith(path) and self._exists(e):
return True
return False
def store(requirements, path, vfstype):
if 'store' in requirements:
if 'fncache' in requirements:
return fncachestore(path, vfstype, 'dotencode' in requirements)
return encodedstore(path, vfstype)
return basicstore(path, vfstype)
| apache-2.0 |
splbio/openobject-server | openerp/addons/base/tests/test_ir_attachment.py | 68 | 3224 | import hashlib
import os
import unittest2
import openerp
import openerp.tests.common
class test_ir_attachment(openerp.tests.common.TransactionCase):
def test_00_attachment_flow(self):
registry, cr, uid = self.registry, self.cr, self.uid
root_path = openerp.tools.config['root_path']
ira = registry('ir.attachment')
# Blob1
blob1 = 'blob1'
blob1_b64 = blob1.encode('base64')
blob1_hash = hashlib.sha1(blob1).hexdigest()
blob1_fname = blob1_hash[:3] + '/' + blob1_hash
# Blob2
blob2 = 'blob2'
blob2_b64 = blob2.encode('base64')
blob2_hash = hashlib.sha1(blob2).hexdigest()
blob2_fname = blob2_hash[:3] + '/' + blob2_hash
# 'ir_attachment.location' is undefined test database storage
a1 = ira.create(cr, uid, {'name': 'a1', 'datas': blob1_b64})
a1_read = ira.read(cr, uid, [a1], ['datas'])
self.assertEqual(a1_read[0]['datas'], blob1_b64)
cr.execute("select id,db_datas from ir_attachment where id = %s", (a1,) )
a1_db_datas = str(cr.fetchall()[0][1])
self.assertEqual(a1_db_datas, blob1_b64)
# define a location for filestore
registry('ir.config_parameter').set_param(cr, uid, 'ir_attachment.location', 'file:///filestore')
# Test file storage
a2 = ira.create(cr, uid, {'name': 'a2', 'datas': blob1_b64})
a2_read = ira.read(cr, uid, [a2], ['datas'])
self.assertEqual(a2_read[0]['datas'], blob1_b64)
cr.execute("select id,store_fname from ir_attachment where id = %s", (a2,) )
a2_store_fname = cr.fetchall()[0][1]
self.assertEqual(a2_store_fname, blob1_fname)
a2_fn = os.path.join(root_path, 'filestore', cr.dbname, blob1_hash[:3], blob1_hash)
fc = file(a2_fn).read()
self.assertEqual(fc, blob1)
# create a3 with same blob
a3 = ira.create(cr, uid, {'name': 'a3', 'datas': blob1_b64})
a3_read = ira.read(cr, uid, [a3], ['datas'])
self.assertEqual(a3_read[0]['datas'], blob1_b64)
cr.execute("select id,store_fname from ir_attachment where id = %s", (a3,) )
a3_store_fname = cr.fetchall()[0][1]
self.assertEqual(a3_store_fname, a2_store_fname)
# create a4 blob2
a4 = ira.create(cr, uid, {'name': 'a4', 'datas': blob2_b64})
a4_read = ira.read(cr, uid, [a4], ['datas'])
self.assertEqual(a4_read[0]['datas'], blob2_b64)
a4_fn = os.path.join(root_path, 'filestore', cr.dbname, blob2_hash[:3], blob2_hash)
self.assertTrue(os.path.isfile(a4_fn))
# delete a3 but file stays
ira.unlink(cr, uid, [a3])
self.assertTrue(os.path.isfile(a2_fn))
# delete a2 it is unlinked
ira.unlink(cr, uid, [a2])
self.assertFalse(os.path.isfile(a2_fn))
# update a4 blob2 by blob1
ira.write(cr, uid, [a4], {'datas': blob1_b64})
a4_read = ira.read(cr, uid, [a4], ['datas'])
self.assertEqual(a4_read[0]['datas'], blob1_b64)
# file of a4 disapear and a2 reappear
self.assertFalse(os.path.isfile(a4_fn))
self.assertTrue(os.path.isfile(a2_fn))
# everybody applause
| agpl-3.0 |
ssgeejr/mitropm | emailer/common/sendmail.py | 24 | 3169 | import cStringIO
import smtplib
import email.Charset
import email.generator
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from tornado import options
options.define('smtp_user', '', help='User for SMTP (currently unused)')
options.define('smtp_password', '', help='Password for SMTP (currently unused)')
options.define('smtp_host', 'email-smtp.us-east-1.amazonaws.com', help='Host for SMTP (currently unused)')
options.define('smtp_port', '2587', help='Port for SMTP (currently unused)')
# For Mandrill: host = 'smtp.mandrillapp.com' port = 587
# From: http://radix.twistedmatrix.com/2010/07/how-to-send-good-unicode-email-with.html
# Override python's weird assumption that utf-8 text should be encoded with
# base64, and instead use quoted-printable (for both subject and body).
email.Charset.add_charset('utf-8', email.Charset.QP, email.Charset.QP, 'utf-8')
def make_email_message(html_string, subject, to, frm,
text_string='It\'s 2012! Get a mail client that displays HTML: http://www.gmail.com'):
message = MIMEMultipart('alternative')
text_part = MIMEText(text_string, 'plain', 'UTF-8')
if not html_string:
# No HTML: Create a single part message with the text body
message = text_part
message['Subject'] = subject
# Encode as us-ascii: ensures unicode strings do not contain special characters
# TODO: Support senders/recipients with unicode names
frm = frm.encode('us-ascii')
assert type(frm) == str, type(frm)
message['From'] = frm
to = to.encode('us-ascii')
assert type(to) == str
message['To'] = to
if html_string:
# The message is multi-part, with HTML and text
html_part = MIMEText(html_string, 'html', 'UTF-8')
# most important at end
message.attach(text_part)
message.attach(html_part)
return message
def to_string(message):
# The default email Message as_string escapes From lines, in case it is
# used in a Unix mbox format:
# http://homepage.ntlworld.com./jonathan.deboynepollard/FGA/mail-mbox-formats.html
io = cStringIO.StringIO()
g = email.generator.Generator(io, False) # second arg: "should I mangle From?"
g.flatten(message)
return io.getvalue()
def send_message_via_smtp(message, host='localhost', port=25,
user=None, pwd=None):
if (user or pwd): assert (user and pwd)
if type(message) != list:
messages = [message]
else:
messages = message
if user:
s = smtplib.SMTP(host, port)
s.starttls()
s.login(user,pwd)
else:
s = smtplib.SMTP(host, port)
for message in messages:
s.sendmail(message['from'], message['to'], to_string(message))
s.quit()
def send_message_via_smtp_options(message):
''' Send a message via SMTP using settings in command line flags.
message can be a child of email.Message or a list
of those which will all be sent
'''
return send_message_via_smtp(message, options.options.smtp_host, options.smtp_port,
options.options.smtp_host, options.options.smtp_password)
| gpl-3.0 |
hamonikr-root/blueman | blueman/plugins/applet/NMDUNSupport.py | 3 | 3781 | # Copyright (C) 2009 Valmantas Paliksa <walmis at balticum-tv dot lt>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from blueman.plugins.AppletPlugin import AppletPlugin
import dbus
import gobject
from blueman.main.SignalTracker import SignalTracker
from blueman.gui.Notification import Notification
from blueman.Sdp import *
from blueman.Functions import get_icon, composite_icon
import weakref
class ConnectionHandler:
def __init__(self, parent, device, uuid, reply, err):
self.parent = parent
self.device = device
self.uuid = uuid
self.reply = reply
self.err = err
self.rfcomm_dev = None
self.timeout = None
self.signals = SignalTracker()
self.signals.Handle("dbus", self.parent.bus,
self.on_mm_device_added,
"DeviceAdded",
"org.freedesktop.ModemManager")
#for some reason these handlers take a reference and don't give it back
#so i have to workaround :(
w = weakref.ref(self)
device.Services["serial"].Connect(uuid,
reply_handler=lambda *args: w() and w().on_connect_reply(*args),
error_handler=lambda *args: w() and w().on_connect_error(*args))
def __del__(self):
dprint("deleting")
def on_connect_reply(self, rfcomm):
self.rfcomm_dev = rfcomm
self.timeout = gobject.timeout_add(10000, self.on_timeout)
def on_connect_error(self, *args):
self.err(*args)
self.cleanup()
def cleanup(self):
if self.timeout:
gobject.source_remove(self.timeout)
self.signals.DisconnectAll()
del self.device
def on_mm_device_added(self, path):
dprint(path)
props = self.parent.bus.call_blocking("org.freedesktop.ModemManager",
path,
"org.freedesktop.DBus.Properties",
"GetAll",
"s",
["org.freedesktop.ModemManager.Modem"])
if self.rfcomm_dev and props["Driver"] == "bluetooth" and props["Device"] in self.rfcomm_dev:
dprint("It's our bluetooth modem!")
modem = get_icon("modem", 24)
blueman = get_icon("blueman", 48)
icon = composite_icon(blueman, [(modem, 24, 24, 255)])
Notification(_("Bluetooth Dialup"),
_("DUN connection on %s will now be available in Network Manager") % self.device.Alias,
pixbuf=icon,
status_icon=self.parent.Applet.Plugins.StatusIcon)
self.reply(self.rfcomm_dev)
self.cleanup()
def on_timeout(self):
self.timeout = None
self.err(dbus.DBusException(_("Modem Manager did not support the connection")))
self.cleanup()
class NMDUNSupport(AppletPlugin):
__depends__ = ["StatusIcon", "DBusService"]
__conflicts__ = ["PPPSupport", "NMIntegration"]
__icon__ = "modem"
__author__ = "Walmis"
__description__ = _("Provides support for Dial Up Networking (DUN) with ModemManager and NetworkManager 0.8")
__priority__ = 1
def on_load(self, applet):
self.bus = dbus.SystemBus()
def on_unload(self):
pass
def rfcomm_connect_handler(self, device, uuid, reply, err):
uuid16 = sdp_get_serial_type(device.Address, uuid)
if DIALUP_NET_SVCLASS_ID in uuid16:
ConnectionHandler(self, device, uuid, reply, err)
return True
else:
return False
| gpl-3.0 |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| mit |
supertask/UnitX | doc/tools/extensions/patchlevel.py | 50 | 1942 | # -*- coding: utf-8 -*-
"""
patchlevel.py
~~~~~~~~~~~~~
Extract version info from Include/patchlevel.h.
Adapted from Doc/tools/getversioninfo.
:copyright: 2007-2008 by Georg Brandl.
:license: Python license.
"""
import os
import re
import sys
def get_header_version_info(srcdir):
patchlevel_h = os.path.join(srcdir, '..', 'Include', 'patchlevel.h')
# This won't pick out all #defines, but it will pick up the ones we
# care about.
rx = re.compile(r'\s*#define\s+([a-zA-Z][a-zA-Z_0-9]*)\s+([a-zA-Z_0-9]+)')
d = {}
f = open(patchlevel_h)
try:
for line in f:
m = rx.match(line)
if m is not None:
name, value = m.group(1, 2)
d[name] = value
finally:
f.close()
release = version = '%s.%s' % (d['PY_MAJOR_VERSION'], d['PY_MINOR_VERSION'])
micro = int(d['PY_MICRO_VERSION'])
release += '.' + str(micro)
level = d['PY_RELEASE_LEVEL']
suffixes = {
'PY_RELEASE_LEVEL_ALPHA': 'a',
'PY_RELEASE_LEVEL_BETA': 'b',
'PY_RELEASE_LEVEL_GAMMA': 'rc',
}
if level != 'PY_RELEASE_LEVEL_FINAL':
release += suffixes[level] + str(int(d['PY_RELEASE_SERIAL']))
return version, release
def get_sys_version_info():
major, minor, micro, level, serial = sys.version_info
release = version = '%s.%s' % (major, minor)
release += '.%s' % micro
if level != 'final':
release += '%s%s' % (level[0], serial)
return version, release
def get_version_info():
try:
return get_header_version_info('.')
except (IOError, OSError):
version, release = get_sys_version_info()
print >>sys.stderr, 'Can\'t get version info from Include/patchlevel.h, ' \
'using version of this interpreter (%s).' % release
return version, release
if __name__ == '__main__':
print(get_header_version_info('.')[1])
| mit |
Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/test/compute/test_ecp.py | 1 | 5290 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.ecp import ECPNodeDriver
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import ECP_PARAMS
class ECPTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
ECPNodeDriver.connectionCls.conn_classes = (None,
ECPMockHttp)
self.driver = ECPNodeDriver(*ECP_PARAMS)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 2)
node = nodes[0]
self.assertEqual(node.id, '1')
self.assertEqual(node.name, 'dummy-1')
self.assertEqual(node.public_ips[0], "42.78.124.75")
self.assertEqual(node.state, NodeState.RUNNING)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.ram, 512)
self.assertEqual(size.disk, 0)
self.assertEqual(size.bandwidth, 0)
self.assertEqual(size.price, 0)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 2)
self.assertEqual(
images[0].name, "centos54: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2")
self.assertEqual(images[0].id, "1")
name = "centos54 two: AUTO import from /opt/enomalism2/repo/5d407a68-c76c-11de-86e5-000475cb7577.xvm2"
self.assertEqual(images[1].name, name)
self.assertEqual(images[1].id, "2")
def test_reboot_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.reboot_node(node)
def test_destroy_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.destroy_node(node)
def test_create_node(self):
# Raises exception on failure
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="api.ivan.net.nz", image=image, size=size)
self.assertEqual(node.name, "api.ivan.net.nz")
self.assertEqual(node.id, "1234")
class ECPMockHttp(MockHttp):
fixtures = ComputeFileFixtures('ecp')
def _modules_hosting(self, method, url, body, headers):
headers = {}
headers['set-cookie'] = 'vcloud-token=testtoken'
body = 'Anything'
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _rest_hosting_vm_1(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('vm_1_get.json')
if method == 'POST':
if body.find('delete', 0):
body = self.fixtures.load('vm_1_action_delete.json')
if body.find('stop', 0):
body = self.fixtures.load('vm_1_action_stop.json')
if body.find('start', 0):
body = self.fixtures.load('vm_1_action_start.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_hosting_vm(self, method, url, body, headers):
if method == 'PUT':
body = self.fixtures.load('vm_put.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_hosting_vm_list(self, method, url, body, headers):
body = self.fixtures.load('vm_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_hosting_htemplate_list(self, method, url, body, headers):
body = self.fixtures.load('htemplate_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_hosting_network_list(self, method, url, body, headers):
body = self.fixtures.load('network_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_hosting_ptemplate_list(self, method, url, body, headers):
body = self.fixtures.load('ptemplate_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
JamesDickenson/aima-python | submissions/aartiste/myNN.py | 4 | 3659 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
mlpc = MLPClassifier(
solver='sgd',
learning_rate = 'adaptive',
)
Examples = {
'TrumpDefault': {
'frame': trumpECHP,
},
'TrumpSGD': {
'frame': trumpECHP,
'mlpc': mlpc
},
} | mit |
frbapolkosnik/jsunpack-n | exampleImport.py | 23 | 4525 | #!/usr/bin/python
'''
Jsunpackn - A generic JavaScript Unpacker Network Edition
Copyright (C) 2010 Blake Hartstein
http://jsunpack.jeek.org/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
from hashlib import sha1
import datetime
import socket
import jsunpackn
# Error Reporting to /tmp
socket.setdefaulttimeout(10)
class cmdline_filler:
options = {
'timeout':30,
'redoevaltime':1,
'maxruntime':0,
'urlfetch':'',
'configfile':'options.config',
'saveallfiles':True, # for pcaps?
'saveallexes':False,
'quiet':True,
'verbose':True,
'veryverbose':True,
'graphfile':'',
'debug':False,
'active':True,
'interface':'',
'nojs':False,
'log_ips':'./maliciousips.txt',
'pre':'./pre.js',
'post':'./post.js',
'htmlparse':'./htmlparse.config',
'fasteval':False,
'proxy': '',
'currentproxy': '',
}
def __init__(self, inhash):
self.tmpdir = '/tmp' # these temporary files are necessary for decoding, but you can use any path and they will be deleted afterwards
self.logdir = self.outdir = '' # an empty storage filepath means no directory of output files will be created
self.decoded = '' #NO decoding logfile, otherwise = self.outdir + '/decoded.log'
for item in self.options:
setattr(self, item, self.options[item])
#Feel free to hard code all your files "rules", "rules.ascii", and "htmlparse.config" in this file instead, only problem is updating them
fin = open('rules', 'r')
if fin:
self.rules = fin.read()
fin.close()
fin = open('rules.ascii', 'r')
if fin:
self.rulesAscii = fin.read()
fin.close()
if self.options['htmlparse']:
fin = open(self.options['htmlparse'], 'r')
self.htmlparseconfig = fin.read()
fin.close()
def main(userdata):
'''userdata contains the javascript, html, or pdf data to decode'''
'''if you'd like to do other things with the results, then modify this function'''
HASH = sha1(str(datetime.datetime.now()) + userdata).hexdigest()
options = cmdline_filler(HASH)
root_of_tree = '' # This can be empty but its sometimes useful to specify a filename here
url_or_name = '/' # This can also be empty but if you have the URL, you'd want to set that here
prevRooturl = {} # This can also be empty but if you want to decode something with more context its useful to keep state
js = jsunpackn.jsunpack(root_of_tree, [url_or_name, userdata, root_of_tree], options, prevRooturl)
for url in js.rooturl: # set all the state variables for printing
js.rooturl[url].seen = {}
results = ''
for url in [js.start]: #recursive
print 'The key %s has the following output in recursive mode' % (url)
results = js.rooturl[url].tostring('', True)[0] + '\n'
print results
print 'Note that none of the files are actually created since self.outdir is empty.'
print 'Instead, you could go through each url and look at the decodings that it creates'
for url in js.rooturl:
print 'Looking at key %s, has %d files and %d messages, that follow:' % (url, len(js.rooturl[url].files), len(js.rooturl[url].msg))
for type, hash, data in js.rooturl[url].files:
print 'file type=%s, hash=%s, data=%d bytes' % (type, hash, len(data))
for printable, impact, msg in js.rooturl[url].msg:
print 'output message printable=%d, impact=%d, msg=%s' % (printable, impact, msg)
if __name__ == "__main__":
main('eval("var a=123;");')
| gpl-2.0 |
mibanescu/pulp | playpen/deploy/deploy-environment.py | 10 | 2701 | #!/usr/bin/env python
import argparse
import sys
import time
import traceback
from utils import os1_utils, setup_utils, config_utils
# Setup the CLI
description = 'Deploy a Pulp environment; this can be used in conjunction with the run-integrations-tests.py script'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--config', help='path to the configuration file to use to deploy the environment', nargs='+',
required=True)
parser.add_argument('--deployed-config', help='path to save the deployed instance configuration to; defaults to the'
' given config file with a json file extension.')
parser.add_argument('--test-branch', help='test suite branch to checkout on the tester instance')
parser.add_argument('--repo', help='path the the repository; will override repositories set in the configuration')
parser.add_argument('--no-teardown', action='store_true', help='do not clean up instances if an error occurs')
args = parser.parse_args()
print 'Parsing and validating the configuration file(s)...'
config = config_utils.parse_and_validate_config_files(args.config, args.repo, args.test_branch)
os1_auth = config.get(config_utils.CONFIG_OS1_CREDENTIALS, {})
print 'Done. \n\nAuthenticating with OS1...'
os1 = os1_utils.OS1Manager(**os1_auth)
print 'Done.\n'
try:
# This metadata is attached to all instances to allow cleanup to find
# stale instances made by this utility
instance_metadata = {
'pulp_instance': 'True',
'build_time': str(time.time()),
}
print 'Deploying instances...'
os1.build_instances(config, instance_metadata)
print 'Applying role-specific configurations...'
setup_utils.configure_instances(config)
# Save the configuration for later cleanup
if args.deployed_config is None:
args.deployed_config = args.config[0] + '.json'
config_utils.save_config(config, args.deployed_config)
# Print out machine information and configuration
print '\nThe following instances have been built:'
for instance in config_utils.config_generator(config):
print """
Instance name: %(instance_name)s
Role: %(role)s
SSH: %(host_string)s
""" % instance
print 'The configuration file has been written to ' + args.deployed_config
except (Exception, KeyboardInterrupt), e:
# Print exception message and quit
exception_type, exception_value, exception_tb = sys.exc_info()
print 'Error: %s - %s' % (exception_type, exception_value)
traceback.print_tb(exception_tb)
if not args.no_teardown:
os1.teardown_instances(config)
sys.exit(1)
| gpl-2.0 |
mluo613/osf.io | addons/box/tests/test_models.py | 32 | 1889 | import mock
import unittest
import pytest
from addons.base.tests.models import OAuthAddonNodeSettingsTestSuiteMixin
from addons.base.tests.models import OAuthAddonUserSettingTestSuiteMixin
from addons.box.models import NodeSettings
from addons.box.tests import factories
pytestmark = pytest.mark.django_db
class TestBoxNodeSettings(OAuthAddonNodeSettingsTestSuiteMixin, unittest.TestCase):
full_name = 'Box'
short_name = 'box'
ExternalAccountFactory = factories.BoxAccountFactory
NodeSettingsClass = NodeSettings
NodeSettingsFactory = factories.BoxNodeSettingsFactory
UserSettingsFactory = factories.BoxUserSettingsFactory
def setUp(self):
self.mock_data = mock.patch.object(
NodeSettings,
'_folder_data',
return_value=('12235', '/Foo')
)
self.mock_data.start()
super(TestBoxNodeSettings, self).setUp()
def tearDown(self):
self.mock_data.stop()
super(TestBoxNodeSettings, self).tearDown()
def test_folder_defaults_to_none(self):
node_settings = NodeSettings(user_settings=self.user_settings, owner=factories.ProjectFactory())
node_settings.save()
assert node_settings.folder_id is None
@mock.patch('addons.box.models.Provider.refresh_oauth_key')
def test_serialize_credentials(self, mock_refresh):
mock_refresh.return_value = True
super(TestBoxNodeSettings, self).test_serialize_credentials()
@mock.patch('addons.box.models.UserSettings.revoke_remote_oauth_access', mock.PropertyMock())
def test_complete_has_auth_not_verified(self):
super(TestBoxNodeSettings, self).test_complete_has_auth_not_verified()
class TestBoxUserSettings(OAuthAddonUserSettingTestSuiteMixin, unittest.TestCase):
full_name = 'Box'
short_name = 'box'
ExternalAccountFactory = factories.BoxAccountFactory
| apache-2.0 |
onitake/ansible | lib/ansible/modules/cloud/vultr/vultr_startup_script_facts.py | 27 | 3440 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_startup_script_facts
short_description: Gather facts about the Vultr startup scripts available.
description:
- Gather facts about vultr_startup_scripts available.
version_added: "2.7"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr startup scripts facts
local_action:
module: vultr_startup_script_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_startup_script_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: string
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: string
sample: "https://api.vultr.com"
vultr_startup_script_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
"vultr_startup_script_facts": [
{
"date_created": "2018-07-19 08:38:36",
"date_modified": "2018-07-19 08:38:36",
"id": 327133,
"name": "lolo",
"script": "#!/bin/bash\necho Hello World > /root/hello",
"type": "boot"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrStartupScriptFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrStartupScriptFacts, self).__init__(module, "vultr_startup_script_facts")
self.returns = {
"SCRIPTID": dict(key='id', convert_to='int'),
"date_created": dict(),
"date_modified": dict(),
"name": dict(),
"script": dict(),
"type": dict(),
}
def get_startupscripts(self):
return self.api_query(path="/v1/startupscript/list")
def parse_startupscript_list(startupscipts_list):
if not startupscipts_list:
return []
return [startupscript for id, startupscript in startupscipts_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
startupscript_facts = AnsibleVultrStartupScriptFacts(module)
result = startupscript_facts.get_result(parse_startupscript_list(startupscript_facts.get_startupscripts()))
ansible_facts = {
'vultr_startup_script_facts': result['vultr_startup_script_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
breznak/NAB | nab/labeler.py | 8 | 16181 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import itertools
import numpy
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.util import (absoluteFilePaths,
getProbationPeriod,
strf,
strp,
deepmap,
createPath,
writeJSON)
def bucket(rawTimes, buffer):
"""
Buckets (groups) timestamps that are within the amount of time specified by
buffer.
"""
bucket = []
rawBuckets = []
current = None
for t in rawTimes:
if current is None:
current = t
bucket = [current]
continue
if (t - current) <= buffer:
bucket.append(t)
else:
rawBuckets.append(bucket)
current = t
bucket = [current]
if bucket:
rawBuckets.append(bucket)
return rawBuckets
def merge(rawBuckets, threshold):
"""
Merges bucketed timestamps into one timestamp (most frequent, or earliest).
"""
truths = []
passed = []
for bucket in rawBuckets:
if len(bucket) >= threshold:
truths.append(max(bucket, key=bucket.count))
else:
passed.append(bucket)
return truths, passed
def checkForOverlap(labels, buffer, labelsFileName, dataFileName):
"""
Raise a ValueError if the difference between any consecutive labels is smaller
than the buffer.
"""
for i in xrange(len(labels)-1):
if labels[i+1] - labels[i] <= buffer:
# import pdb; pdb.set_trace()
raise ValueError("The labels {} and {} in \'{}\' labels for data file "
"\'{}\' are too close to each other to be considered distinct "
"anomalies. Please relabel."
.format(labels[i], labels[i+1], labelsFileName, dataFileName))
class CorpusLabel(object):
"""
Class to store and manipulate a single set of labels for the whole
benchmark corpus.
"""
def __init__(self, path, corpus):
"""
Initializes a CorpusLabel object by getting the anomaly windows and labels.
When this is done for combining raw user labels, we skip getLabels()
because labels are not yet created.
@param path (string) Name of file containing the set of labels.
@param corpus (nab.Corpus) Corpus object.
"""
self.path = path
self.windows = None
self.labels = None
self.corpus = corpus
self.getWindows()
if "raw" not in self.path:
# Do not get labels from files in the path nab/labels/raw
self.getLabels()
def getWindows(self):
"""
Read JSON label file. Get timestamps as dictionaries with key:value pairs of
a relative path and its corresponding list of windows.
"""
def found(t, data):
f = data["timestamp"][data["timestamp"] == pandas.tslib.Timestamp(t)]
exists = (len(f) == 1)
return exists
with open(os.path.join(self.path)) as windowFile:
windows = json.load(windowFile)
self.windows = {}
for relativePath in windows.keys():
self.windows[relativePath] = deepmap(strp, windows[relativePath])
if len(self.windows[relativePath]) == 0:
continue
data = self.corpus.dataFiles[relativePath].data
if "raw" in self.path:
timestamps = windows[relativePath]
else:
timestamps = list(itertools.chain.from_iterable(windows[relativePath]))
# Check that timestamps are present in dataset
if not all([found(t,data) for t in timestamps]):
raise ValueError("In the label file %s, one of the timestamps used for "
"the datafile %s doesn't match; it does not exist in "
"the file. Timestamps in json label files have to "
"exactly match timestamps in corresponding datafiles."
% (self.path, relativePath))
def validateLabels(self):
"""
This is run at the end of the label combining process (see
scripts/combine_labels.py) to validate the resulting ground truth windows,
specifically that they are distinct (unique, non-overlapping).
"""
with open(os.path.join(self.path)) as windowFile:
windows = json.load(windowFile)
self.windows = {}
for relativePath in windows.keys():
self.windows[relativePath] = deepmap(strp, windows[relativePath])
if len(self.windows[relativePath]) == 0:
continue
num_windows = len(self.windows[relativePath])
if num_windows > 1:
if not all([(self.windows[relativePath][i+1][0]
- self.windows[relativePath][i][1]).total_seconds() >= 0
for i in xrange(num_windows-1)]):
raise ValueError("In the label file %s, windows overlap." % self.path)
def getLabels(self):
"""
Get Labels as a dictionary of key-value pairs of a relative path and its
corresponding binary vector of anomaly labels. Labels are simply a more
verbose version of the windows.
"""
self.labels = {}
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if self.windows.has_key(relativePath):
windows = self.windows[relativePath]
labels = pandas.DataFrame({"timestamp": dataSet.data["timestamp"]})
labels['label'] = 0
for t1, t2 in windows:
moreThanT1 = labels[labels["timestamp"] >= t1]
betweenT1AndT2 = moreThanT1[moreThanT1["timestamp"] <= t2]
indices = betweenT1AndT2.loc[:,"label"].index
labels["label"].values[indices.values] = 1
self.labels[relativePath] = labels
else:
print "Warning: no label for datafile",relativePath
class LabelCombiner(object):
"""
This class is used to combine labels from multiple human labelers, and the set
of manual labels (known anomalies).
The output is a single ground truth label file containing anomalies where
there is enough human agreement. The class also computes the window around
each anomaly. The exact logic is described elsewhere in the NAB
documentation.
"""
def __init__(self, labelDir, corpus,
threshold, windowSize,
probationaryPercent, verbosity):
"""
@param labelDir (string) A directory name containing user label files.
This directory should contain one label file
per human labeler.
@param corpus (Corpus) Instance of Corpus class.
@param threshold (float) A percentage between 0 and 1, specifying the
agreement threshold. It describes the level
of agreement needed between individual
labelers before a particular point in a
data file is labeled as anomalous in the
combined file.
@param windowSize (float) Estimated size of an anomaly window, as a
ratio the dataset length.
@param verbosity (int) 0, 1, or 2 to print out select labeling
metrics; 0 is none, 2 is the most.
"""
self.labelDir = labelDir
self.corpus = corpus
self.threshold = threshold
self.windowSize = windowSize
self.probationaryPercent = probationaryPercent
self.verbosity = verbosity
self.userLabels = None
self.nLabelers = None
self.knownLabels = None
self.combinedWindows = None
def __str__(self):
ans = ""
ans += "labelDir: %s\n" % self.labelDir
ans += "corpus: %s\n" % self.corpus
ans += "number of labelers: %d\n" % self.nLabelers
ans += "agreement threshold: %d\n" % self.threshold
return ans
def write(self, labelsPath, windowsPath):
"""Write the combined labels and windows to destination directories."""
if not os.path.isdir(labelsPath):
createPath(labelsPath)
if not os.path.isdir(windowsPath):
createPath(windowsPath)
writeJSON(labelsPath, self.labelTimestamps)
writeJSON(windowsPath, self.combinedWindows)
def combine(self):
"""Combine raw and known labels in anomaly windows."""
self.getRawLabels()
self.combineLabels()
self.editPoorLabels()
self.applyWindows()
self.checkWindows()
def getRawLabels(self):
"""Collect the raw user labels from specified directory."""
labelPaths = absoluteFilePaths(self.labelDir)
self.userLabels = []
self.knownLabels = []
for path in labelPaths:
if "known" in path:
self.knownLabels.append(CorpusLabel(path, self.corpus))
else:
self.userLabels.append(CorpusLabel(path, self.corpus))
self.nLabelers = len(self.userLabels)
if self.nLabelers == 0:
raise ValueError("No users labels found")
def combineLabels(self):
"""
Combines raw user labels to create set of true anomaly labels.
A buffer is used to bucket labels that identify the same anomaly. The buffer
is half the estimated window size of an anomaly -- approximates an average
of two anomalies per dataset, and no window can have > 1 anomaly.
After bucketing, a label becomes a true anomaly if it was labeled by a
proportion of the users greater than the defined threshold. Then the bucket
is merged into one timestamp -- the ground truth label.
The set of known anomaly labels are added as well. These have been manually
labeled because we know the direct causes of the anomalies. They are added
as if they are the result of the bucket-merge process.
If verbosity > 0, the dictionary passedLabels -- the raw labels that did not
pass the threshold qualification -- is printed to the console.
"""
def setTruthLabels(dataSet, trueAnomalies):
"""Returns the indices of the ground truth anomalies for a data file."""
timestamps = dataSet.data["timestamp"]
labels = numpy.array(timestamps.isin(trueAnomalies), dtype=int)
return [i for i in range(len(labels)) if labels[i]==1]
self.labelTimestamps = {}
self.labelIndices = {}
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if ("Known" in relativePath) or ("artificial" in relativePath):
knownAnomalies = self.knownLabels[0].windows[relativePath]
self.labelTimestamps[relativePath] = [str(t) for t in knownAnomalies]
self.labelIndices[relativePath] = setTruthLabels(dataSet, knownAnomalies)
continue
# Calculate the window buffer -- used for bucketing labels identifying
# the same anomaly.
granularity = dataSet.data["timestamp"][1] - dataSet.data["timestamp"][0]
buffer = datetime.timedelta(minutes=
granularity.total_seconds()/60 * len(dataSet.data) * self.windowSize/10)
rawTimesLists = []
userCount = 0
for user in self.userLabels:
if relativePath in user.windows:
# the user has labels for this file
checkForOverlap(
user.windows[relativePath], buffer, user.path, relativePath)
rawTimesLists.append(user.windows[relativePath])
userCount += 1
if not rawTimesLists:
# no labeled anomalies for this data file
self.labelTimestamps[relativePath] = []
self.labelIndices[relativePath] = setTruthLabels(dataSet, [])
continue
else:
rawTimes = list(itertools.chain.from_iterable(rawTimesLists))
rawTimes.sort()
# Bucket and merge the anomaly timestamps.
threshold = userCount * self.threshold
trueAnomalies, passedAnomalies = merge(
bucket(rawTimes, buffer), threshold)
self.labelTimestamps[relativePath] = [str(t) for t in trueAnomalies]
self.labelIndices[relativePath] = setTruthLabels(dataSet, trueAnomalies)
if self.verbosity>0:
print "----"
print "For %s the passed raw labels and qualified true labels are,"\
" respectively:" % relativePath
print passedAnomalies
print trueAnomalies
return self.labelTimestamps, self.labelIndices
def editPoorLabels(self):
"""
This edits labels that have been flagged for manual revision. From
inspecting the data and anomaly windows, we have determined some combined
labels should be revised, or not included in the ground truth labels.
"""
count = 0
for relativePath, indices in self.labelIndices.iteritems():
if "iio_us-east-1_i-a2eb1cd9_NetworkIn" in relativePath:
self.labelIndices[relativePath] = [249, 339]
count += len(indices)
if self.verbosity > 0:
print "============================================================="
print "Total ground truth anomalies in benchmark dataset =", count
def applyWindows(self):
"""
This takes all the true anomalies, as calculated by combineLabels(), and
adds a standard window. The window length is the class variable windowSize,
and the location is centered on the anomaly timestamp.
If verbosity = 2, the window metrics are printed to the console.
"""
allWindows = {}
for relativePath, anomalies in self.labelIndices.iteritems():
data = self.corpus.dataFiles[relativePath].data
length = len(data)
num = len(anomalies)
if num:
windowLength = int(self.windowSize * length / len(anomalies))
else:
windowLength = int(self.windowSize * length)
if self.verbosity==2:
print "----"
print "Window metrics for file", relativePath
print "file length =", length, ";" \
"number of windows =", num, ";" \
"window length =", windowLength
windows = []
for a in anomalies:
front = max(a - windowLength/2, 0)
back = min(a + windowLength/2, length-1)
windowLimit = [strf(data["timestamp"][front]),
strf(data["timestamp"][back])]
windows.append(windowLimit)
allWindows[relativePath] = windows
self.combinedWindows = allWindows
def checkWindows(self):
"""
This takes the anomaly windows and checks for overlap with both each other
and with the probationary period. Overlapping windows are merged into a
single window. Windows overlapping with the probationary period are deleted.
"""
for relativePath, windows in self.combinedWindows.iteritems():
numWindows = len(windows)
if numWindows > 0:
fileLength = self.corpus.dataFiles[relativePath].data.shape[0]
probationIndex = getProbationPeriod(
self.probationaryPercent, fileLength)
probationTimestamp = self.corpus.dataFiles[relativePath].data[
"timestamp"][probationIndex]
if (pandas.to_datetime(windows[0][0])
-probationTimestamp).total_seconds() < 0:
del windows[0]
print ("The first window in {} overlaps with the probationary period "
", so we're deleting it.".format(relativePath))
i = 0
while len(windows)-1 > i:
if (pandas.to_datetime(windows[i+1][0])
- pandas.to_datetime(windows[i][1])).total_seconds() <= 0:
# merge windows
windows[i] = [windows[i][0], windows[i+1][1]]
del windows[i+1]
i += 1
| agpl-3.0 |
yishayv/lyacorr | bins_3d.py | 1 | 6348 | import numpy as np
from flux_accumulator import AccumulatorBase
class Bins3D(AccumulatorBase):
def __init__(self, dims, ranges, ar_existing_data=None, filename=''):
"""
:param dims: the shape of the bins
:param ranges: a 2-by-3 array representing the minimum and maximum ranges of the 3 coordinates
:type dims: np.ndarray
:type ranges np.ndarray
:type ar_existing_data np.ndarray
:type filename str
"""
if ar_existing_data is not None:
expected_shape = (dims[0], dims[1], dims[2], 3)
ravelled_shape = (dims[0] * dims[1] * dims[2] * 3,)
if ar_existing_data.shape == ravelled_shape:
self.ar_data = ar_existing_data.reshape(expected_shape)
else:
assert ar_existing_data.shape == expected_shape, "incompatible shape:{0}".format(
ar_existing_data.shape)
self.ar_data = ar_existing_data
else:
self.ar_data = np.zeros((dims[0], dims[1], dims[2], 3))
self.ar_flux = None
self.ar_weights = None
self.ar_count = None
self.update_array_slices()
self.dims = dims
self.index_type = ''
self.update_index_type()
self.filename = filename
self.ranges = ranges
self.bin_sizes = np.abs(ranges[1] - ranges[0]) / dims
def add_array_with_mask(self, ar_flux, ar_x, ar_y, ar_z, mask, ar_weights):
"""
add flux to x,y bins with weights and a filter mask.
note: the x,y values should be rescaled prior to calling this method.
:type ar_flux: np.multiarray.ndarray
:param ar_x: np.multiarray.ndarray
:param ar_y: np.multiarray.ndarray
:param ar_z: np.multiarray.ndarray
:param mask: np.multiarray.ndarray
:param ar_weights: np.multiarray.ndarray
"""
ar_x_int = ((ar_x - self.ranges[0, 0]) / self.bin_sizes[0]).astype(self.index_type)
ar_y_int = ((ar_y - self.ranges[0, 1]) / self.bin_sizes[1]).astype(self.index_type)
ar_z_int = ((ar_z - self.ranges[0, 2]) / self.bin_sizes[2]).astype(self.index_type)
# restrict the mask to pixels inside the bin range.
m = np.all((0 <= ar_x_int, ar_x_int < self.dims[0],
0 <= ar_y_int, ar_y_int < self.dims[1],
0 <= ar_z_int, ar_z_int < self.dims[2],
mask), axis=0)
ar_flux_masked = ar_flux[m]
ar_weights_masked = ar_weights[m]
ar_indices_x = ar_x_int[m]
ar_indices_y = ar_y_int[m]
ar_indices_z = ar_z_int[m]
# make sure we don't invert x, y and z
# z is the innermost coordinate, x is the outermost.
# represent bins in 1D. this is faster than a 2D numpy histogram
ar_indices_xyz = ar_indices_z + self.dims[2] * (ar_indices_y + (self.dims[1] * ar_indices_x))
# bin data according to x,y values
flux_hist_1d = np.bincount(ar_indices_xyz, weights=ar_flux_masked,
minlength=int(np.prod(self.dims)))
weights_hist_1d = np.bincount(ar_indices_xyz, weights=ar_weights_masked,
minlength=int(np.prod(self.dims)))
count_hist_1d = np.bincount(ar_indices_xyz, weights=None,
minlength=int(np.prod(self.dims)))
# return from 1D to a 2d array
flux_hist = flux_hist_1d.reshape(self.dims)
count_hist = count_hist_1d.reshape(self.dims)
weights_hist = weights_hist_1d.reshape(self.dims)
# accumulate new data
self.ar_flux += flux_hist
self.ar_weights += weights_hist
self.ar_count += count_hist
def merge(self, bins_3d_2):
assert self.ar_data.shape == bins_3d_2.ar_data.shape
assert np.all(self.ranges == bins_3d_2.ranges)
assert np.all(self.dims == bins_3d_2.dims)
self.ar_data += bins_3d_2.ar_data
return self
def merge_array(self, ar_data):
assert self.ar_data.shape == ar_data.shape
self.ar_data += ar_data
return self
def save(self, filename):
self.filename = filename
self.flush()
def from_4d_array(self, stacked_array):
self.ar_data = stacked_array
self.update_array_slices()
self.dims = self.ar_count.shape
self.update_index_type()
def load(self, filename):
# TODO: to static
stacked_array = np.load(filename)
self.from_4d_array(stacked_array)
def update_index_type(self):
# choose integer type according to number of bins
self.index_type = 'int32' if np.prod(self.dims) > 32767 else 'int16'
def update_array_slices(self):
self.ar_flux = self.ar_data[:, :, :, 0]
self.ar_count = self.ar_data[:, :, :, 1]
self.ar_weights = self.ar_data[:, :, :, 2]
def __radd__(self, other):
return self.merge(other)
def __add__(self, other):
return self.merge(other)
@classmethod
def init_as(cls, other):
"""
:type other: Bins3D
"""
new_obj = cls(other.dims, other.ranges, filename=other.filename)
return new_obj
@classmethod
def from_other(cls, other):
new_obj = cls.init_as(other)
new_obj.merge(other)
def set_filename(self, filename):
self.filename = filename
def to_4d_array(self):
return self.ar_data
def flush(self):
np.save(self.filename, self.to_4d_array())
def get_ranges(self):
return self.ranges
def get_dims(self):
return self.dims
def get_pair_count(self):
return self.ar_count.sum()
def get_data_as_array(self):
return self.to_4d_array()
def get_array_shape(self):
return self.ar_data.shape
def get_metadata(self):
return [self.dims, self.index_type,
self.filename,
self.ranges, self.bin_sizes]
@classmethod
def load_from(cls, ar, metadata):
new_bins = cls(dims=(1, 1, 1), ranges=((1, 1, 1), (1, 1, 1)))
(new_bins.dims, new_bins.index_type, new_bins.filename,
new_bins.ranges, new_bins.bin_sizes) = metadata
new_bins.ar_data = ar
new_bins.update_array_slices()
return new_bins
| mit |
enkaskal/hello | gtest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
Zhongqilong/mykbengineer | kbe/res/scripts/common/Lib/shelve.py | 83 | 8428 | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except (NameError, TypeError):
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
| lgpl-3.0 |
thefactory/marathon-logger | stores.py | 1 | 1282 | import collections
import logging
import logging.handlers
import urlparse
class InMemoryStore(object):
def __init__(self, url):
qa = urlparse.parse_qs(url.query)
max_length = int(qa.get('max_length', ['100'])[0])
print 'Configuring in-memory store with {settings}'.format(settings={"max_length": max_length})
self.events = collections.deque(maxlen=max_length)
def save(self, event):
print event
self.events.append(event)
def list(self):
return list(self.events)
class SyslogUdpStore(object):
def __init__(self, url):
server = url.hostname
port = url.port or logging.handlers.SYSLOG_UDP_PORT
address = (server, port)
print 'Configuring syslog-udp store with {settings}'.format(settings={"server": server, "port": port})
self.log = logging.getLogger('marathon-logger')
facility = logging.handlers.SysLogHandler.LOG_USER
h = logging.handlers.SysLogHandler(address, facility)
f = logging.Formatter('marathon-logger: %(message)s')
h.setFormatter(f)
self.log.addHandler(h)
self.log.setLevel(logging.getLevelName('INFO'))
def save(self, event):
self.log.info(event)
def list(self):
return [] | mit |
Averroes/raft | thirdparty/pdfminer/pdfminer/arcfour.py | 11 | 1141 | #!/usr/bin/env python2
""" Python implementation of Arcfour encryption algorithm.
This code is in the public domain.
"""
## Arcfour
##
class Arcfour(object):
"""
>>> Arcfour('Key').process('Plaintext').encode('hex')
'bbf316e8d940af0ad3'
>>> Arcfour('Wiki').process('pedia').encode('hex')
'1021bf0420'
>>> Arcfour('Secret').process('Attack at dawn').encode('hex')
'45a01f645fc35b383552544b9bf5'
"""
def __init__(self, key):
s = list(range(256))
j = 0
klen = len(key)
for i in range(256):
j = (j + s[i] + ord(key[i % klen])) % 256
(s[i], s[j]) = (s[j], s[i])
self.s = s
(self.i, self.j) = (0, 0)
return
def process(self, data):
(i, j) = (self.i, self.j)
s = self.s
r = ''
for c in data:
i = (i+1) % 256
j = (j+s[i]) % 256
(s[i], s[j]) = (s[j], s[i])
k = s[(s[i]+s[j]) % 256]
r += chr(ord(c) ^ k)
(self.i, self.j) = (i, j)
return r
# test
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
heran7/edx-platform | common/lib/extract_tar.py | 15 | 2148 | """
Safe version of tarfile.extractall which does not extract any files that would
be, or symlink to a file that is, outside of the directory extracted in.
Adapted from:
http://stackoverflow.com/questions/10060069/safely-extract-zip-or-tar-using-python
"""
from os.path import abspath, realpath, dirname, join as joinpath
from django.core.exceptions import SuspiciousOperation
import logging
log = logging.getLogger(__name__) #pylint: disable=C0103
def resolved(rpath):
"""
Returns the canonical absolute path of `rpath`.
"""
return realpath(abspath(rpath))
def _is_bad_path(path, base):
"""
Is (the canonical absolute path of) `path` outside `base`?
"""
return not resolved(joinpath(base, path)).startswith(base)
def _is_bad_link(info, base):
"""
Does the file sym- ord hard-link to files outside `base`?
"""
# Links are interpreted relative to the directory containing the link
tip = resolved(joinpath(base, dirname(info.name)))
return _is_bad_path(info.linkname, base=tip)
def safemembers(members):
"""
Check that all elements of a tar file are safe.
"""
base = resolved(".")
for finfo in members:
if _is_bad_path(finfo.name, base):
log.debug("File %r is blocked (illegal path)", finfo.name)
raise SuspiciousOperation("Illegal path")
elif finfo.issym() and _is_bad_link(finfo, base):
log.debug( "File %r is blocked: Hard link to %r", finfo.name, finfo.linkname)
raise SuspiciousOperation("Hard link")
elif finfo.islnk() and _is_bad_link(finfo, base):
log.debug("File %r is blocked: Symlink to %r", finfo.name,
finfo.linkname)
raise SuspiciousOperation("Symlink")
elif finfo.isdev():
log.debug("File %r is blocked: FIFO, device or character file",
finfo.name)
raise SuspiciousOperation("Dev file")
return members
def safetar_extractall(tarf, *args, **kwargs):
"""
Safe version of `tarf.extractall()`.
"""
return tarf.extractall(members=safemembers(tarf), *args, **kwargs)
| agpl-3.0 |
switchboardOp/ansible | lib/ansible/modules/network/lenovo/cnos_template.py | 59 | 7077 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send CLI templates to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_template
author: "Dave Kasberg (@dkasberg)"
short_description: Manage switch configuration using templates on devices running Lenovo CNOS
description:
- This module allows you to work with the running configuration of a switch. It provides a way
to execute a set of CNOS commands on a switch by evaluating the current running configuration
and executing the commands only if the specific settings have not been already configured.
The configuration source can be a set of commands or a template written in the Jinja2 templating language.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_template.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
commandfile:
description:
- This specifies the path to the CNOS command file which needs to be applied. This usually
comes from the commands folder. Generally this file is the output of the variables applied
on a template file. So this command is preceded by a template module.
Note The command file must contain the Ansible keyword {{ inventory_hostname }} in its
filename to ensure that the command file is unique for each switch and condition.
If this is omitted, the command file will be overwritten during iteration. For example,
commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_commands.txt
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_template. These are written in the main.yml file of the tasks directory.
---
- name: Replace Config CLI command template with values
template:
src: demo_template.j2
dest: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
vlanid1: 13
slot_chassis_number1: "1/2"
portchannel_interface_number1: 100
portchannel_mode1: "active"
- name: Applying CLI commands on Switches
cnos_template:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
commandfile: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
outputfile: "./results/demo_template_command_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Template Applied."
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
commandfile=dict(required=True),
outputfile=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
commandfile = module.params['commandfile']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send commands one by one
#with open(commandfile, "r") as f:
f = open(commandfile, "r")
for line in f:
# Omit the comment lines in template file
if not line.startswith("#"):
command = line
if not line.endswith("\n"):
command = command+"\n"
response = cnos.waitForDeviceResponse(command, "#", 2, remote_conn)
errorMsg = cnos.checkOutputForError(response)
output = output + response
if(errorMsg is not None):
break # To cater to Mufti case
# Write to memory
output = output + cnos.waitForDeviceResponse("save\n", "#", 3, remote_conn)
# Write output to file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Template Applied")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
knowsis/django-allauth | allauth/socialaccount/south_migrations/0008_client_id.py | 78 | 6323 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for app in orm.SocialApp.objects.all():
app.client_id = app.key
app.key = ''
app.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 22, 12, 51, 18, 10544)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 22, 12, 51, 18, 10426)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount']
| mit |
dafei2015/hugula | Client/tools/site-packages/PIL/OleFileIO.py | 13 | 16301 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id: OleFileIO.py 2339 2005-03-25 08:02:17Z fredrik $
#
# stuff to deal with OLE2 Structured Storage files. this module is
# used by PIL to read Image Composer and FlashPix files, but can also
# be used to read other files of this type.
#
# History:
# 1997-01-20 fl Created
# 1997-01-22 fl Fixed 64-bit portability quirk
# 2003-09-09 fl Fixed typo in OleFileIO.loadfat (noted by Daniel Haertle)
# 2004-02-29 fl Changed long hex constants to signed integers
#
# Notes:
# FIXME: sort out sign problem (eliminate long hex constants)
# FIXME: change filename to use "a/b/c" instead of ["a", "b", "c"]
# FIXME: provide a glob mechanism function (using fnmatchcase)
#
# Literature:
#
# "FlashPix Format Specification, Appendix A", Kodak and Microsoft,
# September 1996.
#
# Quotes:
#
# "If this document and functionality of the Software conflict,
# the actual functionality of the Software represents the correct
# functionality" -- Microsoft, in the OLE format specification
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
import string, StringIO
def i16(c, o = 0):
return ord(c[o])+(ord(c[o+1])<<8)
def i32(c, o = 0):
return ord(c[o])+(ord(c[o+1])<<8)+(ord(c[o+2])<<16)+(ord(c[o+3])<<24)
MAGIC = '\320\317\021\340\241\261\032\341'
#
# --------------------------------------------------------------------
# property types
VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6;
VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11;
VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17;
VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23;
VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28;
VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64;
VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68;
VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72;
VT_VECTOR=0x1000;
# map property id to name (for debugging purposes)
VT = {}
for k, v in vars().items():
if k[:3] == "VT_":
VT[v] = k
#
# --------------------------------------------------------------------
# Some common document types (root.clsid fields)
WORD_CLSID = "00020900-0000-0000-C000-000000000046"
#
# --------------------------------------------------------------------
class _OleStream(StringIO.StringIO):
"""OLE2 Stream
Returns a read-only file object which can be used to read
the contents of a OLE stream. To open a stream, use the
openstream method in the OleFile class.
This function can be used with either ordinary streams,
or ministreams, depending on the offset, sectorsize, and
fat table arguments.
"""
# FIXME: should store the list of sects obtained by following
# the fat chain, and load new sectors on demand instead of
# loading it all in one go.
def __init__(self, fp, sect, size, offset, sectorsize, fat):
data = []
while sect != -2: # 0xFFFFFFFEL:
fp.seek(offset + sectorsize * sect)
data.append(fp.read(sectorsize))
sect = fat[sect]
data = string.join(data, "")
# print len(data), size
StringIO.StringIO.__init__(self, data[:size])
#
# --------------------------------------------------------------------
# FIXME: should add a counter in here to avoid looping forever
# if the tree is broken.
class _OleDirectoryEntry:
"""OLE2 Directory Entry
Encapsulates a stream directory entry. Note that the
constructor builds a tree of all subentries, so we only
have to call it with the root object.
"""
def __init__(self, sidlist, sid):
# store directory parameters. the caller provides
# a complete list of directory entries, as read from
# the directory stream.
name, type, sect, size, sids, clsid = sidlist[sid]
self.sid = sid
self.name = name
self.type = type # 1=storage 2=stream
self.sect = sect
self.size = size
self.clsid = clsid
# process child nodes, if any
self.kids = []
sid = sidlist[sid][4][2]
if sid != -1:
# the directory entries are organized as a red-black tree.
# the following piece of code does an ordered traversal of
# such a tree (at least that's what I hope ;-)
stack = [self.sid]
# start at leftmost position
left, right, child = sidlist[sid][4]
while left != -1: # 0xFFFFFFFFL:
stack.append(sid)
sid = left
left, right, child = sidlist[sid][4]
while sid != self.sid:
self.kids.append(_OleDirectoryEntry(sidlist, sid))
# try to move right
left, right, child = sidlist[sid][4]
if right != -1: # 0xFFFFFFFFL:
# and then back to the left
sid = right
while 1:
left, right, child = sidlist[sid][4]
if left == -1: # 0xFFFFFFFFL:
break
stack.append(sid)
sid = left
else:
# couldn't move right; move up instead
while 1:
ptr = stack[-1]
del stack[-1]
left, right, child = sidlist[ptr][4]
if right != sid:
break
sid = right
left, right, child = sidlist[sid][4]
if right != ptr:
sid = ptr
# in the OLE file, entries are sorted on (length, name).
# for convenience, we sort them on name instead.
self.kids.sort()
def __cmp__(self, other):
"Compare entries by name"
return cmp(self.name, other.name)
def dump(self, tab = 0):
"Dump this entry, and all its subentries (for debug purposes only)"
TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
"(property)", "(root)"]
print " "*tab + repr(self.name), TYPES[self.type],
if self.type in (2, 5):
print self.size, "bytes",
print
if self.type in (1, 5) and self.clsid:
print " "*tab + "{%s}" % self.clsid
for kid in self.kids:
kid.dump(tab + 2)
#
# --------------------------------------------------------------------
##
# This class encapsulates the interface to an OLE 2 structured
# storage file. Use the {@link listdir} and {@link openstream}
# methods to access the contents of this file.
class OleFileIO:
"""OLE container object
This class encapsulates the interface to an OLE 2 structured
storage file. Use the listdir and openstream methods to access
the contents of this file.
Object names are given as a list of strings, one for each subentry
level. The root entry should be omitted. For example, the following
code extracts all image streams from a Microsoft Image Composer file:
ole = OleFileIO("fan.mic")
for entry in ole.listdir():
if entry[1:2] == "Image":
fin = ole.openstream(entry)
fout = open(entry[0:1], "wb")
while 1:
s = fin.read(8192)
if not s:
break
fout.write(s)
You can use the viewer application provided with the Python Imaging
Library to view the resulting files (which happens to be standard
TIFF files).
"""
def __init__(self, filename = None):
if filename:
self.open(filename)
##
# Open an OLE2 file.
def open(self, filename):
"""Open an OLE2 file"""
if type(filename) == type(""):
self.fp = open(filename, "rb")
else:
self.fp = filename
header = self.fp.read(512)
if len(header) != 512 or header[:8] != MAGIC:
raise IOError, "not an OLE2 structured storage file"
# file clsid (probably never used, so we don't store it)
clsid = self._clsid(header[8:24])
# FIXME: could check version and byte order fields
self.sectorsize = 1 << i16(header, 30)
self.minisectorsize = 1 << i16(header, 32)
self.minisectorcutoff = i32(header, 56)
# Load file allocation tables
self.loadfat(header)
# Load direcory. This sets both the sidlist (ordered by id)
# and the root (ordered by hierarchy) members.
self.loaddirectory(i32(header, 48))
self.ministream = None
self.minifatsect = i32(header, 60)
def loadfat(self, header):
# Load the FAT table. The header contains a sector numbers
# for the first 109 FAT sectors. Additional sectors are
# described by DIF blocks (FIXME: not yet implemented)
sect = header[76:512]
fat = []
for i in range(0, len(sect), 4):
ix = i32(sect, i)
if ix == -2 or ix == -1: # ix == 0xFFFFFFFEL or ix == 0xFFFFFFFFL:
break
s = self.getsect(ix)
fat = fat + map(lambda i, s=s: i32(s, i), range(0, len(s), 4))
self.fat = fat
def loadminifat(self):
# Load the MINIFAT table. This is stored in a standard sub-
# stream, pointed to by a header field.
s = self._open(self.minifatsect).read()
self.minifat = map(lambda i, s=s: i32(s, i), range(0, len(s), 4))
def getsect(self, sect):
# Read given sector
self.fp.seek(512 + self.sectorsize * sect)
return self.fp.read(self.sectorsize)
def _unicode(self, s):
# Map unicode string to Latin 1
# FIXME: some day, Python will provide an official way to handle
# Unicode strings, but until then, this will have to do...
return filter(ord, s)
def loaddirectory(self, sect):
# Load the directory. The directory is stored in a standard
# substream, independent of its size.
# read directory stream
fp = self._open(sect)
# create list of sid entries
self.sidlist = []
while 1:
entry = fp.read(128)
if not entry:
break
type = ord(entry[66])
name = self._unicode(entry[0:0+i16(entry, 64)])
ptrs = i32(entry, 68), i32(entry, 72), i32(entry, 76)
sect, size = i32(entry, 116), i32(entry, 120)
clsid = self._clsid(entry[80:96])
self.sidlist.append((name, type, sect, size, ptrs, clsid))
# create hierarchical list of directory entries
self.root = _OleDirectoryEntry(self.sidlist, 0)
def dumpdirectory(self):
# Dump directory (for debugging only)
self.root.dump()
def _clsid(self, clsid):
if clsid == "\0" * len(clsid):
return ""
return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) %
((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) +
tuple(map(ord, clsid[8:16]))))
def _list(self, files, prefix, node):
# listdir helper
prefix = prefix + [node.name]
for entry in node.kids:
if entry.kids:
self._list(files, prefix, entry)
else:
files.append(prefix[1:] + [entry.name])
def _find(self, filename):
# openstream helper
node = self.root
for name in filename:
for kid in node.kids:
if kid.name == name:
break
else:
raise IOError, "file not found"
node = kid
return node.sid
def _open(self, start, size = 0x7FFFFFFF):
# openstream helper.
if size < self.minisectorcutoff:
# ministream object
if not self.ministream:
self.loadminifat()
self.ministream = self._open(self.sidlist[0][2])
return _OleStream(self.ministream, start, size, 0,
self.minisectorsize, self.minifat)
# standard stream
return _OleStream(self.fp, start, size, 512,
self.sectorsize, self.fat)
##
# Returns a list of streams stored in this file.
def listdir(self):
"""Return a list of streams stored in this file"""
files = []
self._list(files, [], self.root)
return files
##
# Opens a stream as a read-only file object.
def openstream(self, filename):
"""Open a stream as a read-only file object"""
slot = self._find(filename)
name, type, sect, size, sids, clsid = self.sidlist[slot]
if type != 2:
raise IOError, "this file is not a stream"
return self._open(sect, size)
##
# Gets a list of properties described in substream.
def getproperties(self, filename):
"""Return properties described in substream"""
fp = self.openstream(filename)
data = {}
# header
s = fp.read(28)
clsid = self._clsid(s[8:24])
# format id
s = fp.read(20)
fmtid = self._clsid(s[:16])
fp.seek(i32(s, 16))
# get section
s = "****" + fp.read(i32(fp.read(4))-4)
for i in range(i32(s, 4)):
id = i32(s, 8+i*8)
offset = i32(s, 12+i*8)
type = i32(s, offset)
# test for common types first (should perhaps use
# a dictionary instead?)
if type == VT_I2:
value = i16(s, offset+4)
if value >= 32768:
value = value - 65536
elif type == VT_UI2:
value = i16(s, offset+4)
elif type in (VT_I4, VT_ERROR):
value = i32(s, offset+4)
elif type == VT_UI4:
value = i32(s, offset+4) # FIXME
elif type in (VT_BSTR, VT_LPSTR):
count = i32(s, offset+4)
value = s[offset+8:offset+8+count-1]
elif type == VT_BLOB:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
elif type == VT_LPWSTR:
count = i32(s, offset+4)
value = self._unicode(s[offset+8:offset+8+count*2])
elif type == VT_FILETIME:
value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32)
# FIXME: this is a 64-bit int: "number of 100ns periods
# since Jan 1,1601". Should map this to Python time
value = value / 10000000L # seconds
elif type == VT_UI1:
value = ord(s[offset+4])
elif type == VT_CLSID:
value = self._clsid(s[offset+4:offset+20])
elif type == VT_CF:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
else:
value = None # everything else yields "None"
# FIXME: add support for VT_VECTOR
#print "%08x" % id, repr(value),
#print "(%s)" % VT[i32(s, offset) & 0xFFF]
data[id] = value
return data
#
# --------------------------------------------------------------------
# This script can be used to dump the directory of any OLE2 structured
# storage file.
if __name__ == "__main__":
import sys
for file in sys.argv[1:]:
try:
ole = OleFileIO(file)
print "-" * 68
print file
print "-" * 68
ole.dumpdirectory()
for file in ole.listdir():
if file[-1][0] == "\005":
print file
props = ole.getproperties(file)
props = props.items()
props.sort()
for k, v in props:
print " ", k, v
except IOError, v:
print "***", "cannot read", file, "-", v
| mit |
joram/sickbeard-orange | lib/unidecode/x0d3.py | 253 | 4705 | data = (
'tim', # 0x00
'tib', # 0x01
'tibs', # 0x02
'tis', # 0x03
'tiss', # 0x04
'ting', # 0x05
'tij', # 0x06
'tic', # 0x07
'tik', # 0x08
'tit', # 0x09
'tip', # 0x0a
'tih', # 0x0b
'pa', # 0x0c
'pag', # 0x0d
'pagg', # 0x0e
'pags', # 0x0f
'pan', # 0x10
'panj', # 0x11
'panh', # 0x12
'pad', # 0x13
'pal', # 0x14
'palg', # 0x15
'palm', # 0x16
'palb', # 0x17
'pals', # 0x18
'palt', # 0x19
'palp', # 0x1a
'palh', # 0x1b
'pam', # 0x1c
'pab', # 0x1d
'pabs', # 0x1e
'pas', # 0x1f
'pass', # 0x20
'pang', # 0x21
'paj', # 0x22
'pac', # 0x23
'pak', # 0x24
'pat', # 0x25
'pap', # 0x26
'pah', # 0x27
'pae', # 0x28
'paeg', # 0x29
'paegg', # 0x2a
'paegs', # 0x2b
'paen', # 0x2c
'paenj', # 0x2d
'paenh', # 0x2e
'paed', # 0x2f
'pael', # 0x30
'paelg', # 0x31
'paelm', # 0x32
'paelb', # 0x33
'paels', # 0x34
'paelt', # 0x35
'paelp', # 0x36
'paelh', # 0x37
'paem', # 0x38
'paeb', # 0x39
'paebs', # 0x3a
'paes', # 0x3b
'paess', # 0x3c
'paeng', # 0x3d
'paej', # 0x3e
'paec', # 0x3f
'paek', # 0x40
'paet', # 0x41
'paep', # 0x42
'paeh', # 0x43
'pya', # 0x44
'pyag', # 0x45
'pyagg', # 0x46
'pyags', # 0x47
'pyan', # 0x48
'pyanj', # 0x49
'pyanh', # 0x4a
'pyad', # 0x4b
'pyal', # 0x4c
'pyalg', # 0x4d
'pyalm', # 0x4e
'pyalb', # 0x4f
'pyals', # 0x50
'pyalt', # 0x51
'pyalp', # 0x52
'pyalh', # 0x53
'pyam', # 0x54
'pyab', # 0x55
'pyabs', # 0x56
'pyas', # 0x57
'pyass', # 0x58
'pyang', # 0x59
'pyaj', # 0x5a
'pyac', # 0x5b
'pyak', # 0x5c
'pyat', # 0x5d
'pyap', # 0x5e
'pyah', # 0x5f
'pyae', # 0x60
'pyaeg', # 0x61
'pyaegg', # 0x62
'pyaegs', # 0x63
'pyaen', # 0x64
'pyaenj', # 0x65
'pyaenh', # 0x66
'pyaed', # 0x67
'pyael', # 0x68
'pyaelg', # 0x69
'pyaelm', # 0x6a
'pyaelb', # 0x6b
'pyaels', # 0x6c
'pyaelt', # 0x6d
'pyaelp', # 0x6e
'pyaelh', # 0x6f
'pyaem', # 0x70
'pyaeb', # 0x71
'pyaebs', # 0x72
'pyaes', # 0x73
'pyaess', # 0x74
'pyaeng', # 0x75
'pyaej', # 0x76
'pyaec', # 0x77
'pyaek', # 0x78
'pyaet', # 0x79
'pyaep', # 0x7a
'pyaeh', # 0x7b
'peo', # 0x7c
'peog', # 0x7d
'peogg', # 0x7e
'peogs', # 0x7f
'peon', # 0x80
'peonj', # 0x81
'peonh', # 0x82
'peod', # 0x83
'peol', # 0x84
'peolg', # 0x85
'peolm', # 0x86
'peolb', # 0x87
'peols', # 0x88
'peolt', # 0x89
'peolp', # 0x8a
'peolh', # 0x8b
'peom', # 0x8c
'peob', # 0x8d
'peobs', # 0x8e
'peos', # 0x8f
'peoss', # 0x90
'peong', # 0x91
'peoj', # 0x92
'peoc', # 0x93
'peok', # 0x94
'peot', # 0x95
'peop', # 0x96
'peoh', # 0x97
'pe', # 0x98
'peg', # 0x99
'pegg', # 0x9a
'pegs', # 0x9b
'pen', # 0x9c
'penj', # 0x9d
'penh', # 0x9e
'ped', # 0x9f
'pel', # 0xa0
'pelg', # 0xa1
'pelm', # 0xa2
'pelb', # 0xa3
'pels', # 0xa4
'pelt', # 0xa5
'pelp', # 0xa6
'pelh', # 0xa7
'pem', # 0xa8
'peb', # 0xa9
'pebs', # 0xaa
'pes', # 0xab
'pess', # 0xac
'peng', # 0xad
'pej', # 0xae
'pec', # 0xaf
'pek', # 0xb0
'pet', # 0xb1
'pep', # 0xb2
'peh', # 0xb3
'pyeo', # 0xb4
'pyeog', # 0xb5
'pyeogg', # 0xb6
'pyeogs', # 0xb7
'pyeon', # 0xb8
'pyeonj', # 0xb9
'pyeonh', # 0xba
'pyeod', # 0xbb
'pyeol', # 0xbc
'pyeolg', # 0xbd
'pyeolm', # 0xbe
'pyeolb', # 0xbf
'pyeols', # 0xc0
'pyeolt', # 0xc1
'pyeolp', # 0xc2
'pyeolh', # 0xc3
'pyeom', # 0xc4
'pyeob', # 0xc5
'pyeobs', # 0xc6
'pyeos', # 0xc7
'pyeoss', # 0xc8
'pyeong', # 0xc9
'pyeoj', # 0xca
'pyeoc', # 0xcb
'pyeok', # 0xcc
'pyeot', # 0xcd
'pyeop', # 0xce
'pyeoh', # 0xcf
'pye', # 0xd0
'pyeg', # 0xd1
'pyegg', # 0xd2
'pyegs', # 0xd3
'pyen', # 0xd4
'pyenj', # 0xd5
'pyenh', # 0xd6
'pyed', # 0xd7
'pyel', # 0xd8
'pyelg', # 0xd9
'pyelm', # 0xda
'pyelb', # 0xdb
'pyels', # 0xdc
'pyelt', # 0xdd
'pyelp', # 0xde
'pyelh', # 0xdf
'pyem', # 0xe0
'pyeb', # 0xe1
'pyebs', # 0xe2
'pyes', # 0xe3
'pyess', # 0xe4
'pyeng', # 0xe5
'pyej', # 0xe6
'pyec', # 0xe7
'pyek', # 0xe8
'pyet', # 0xe9
'pyep', # 0xea
'pyeh', # 0xeb
'po', # 0xec
'pog', # 0xed
'pogg', # 0xee
'pogs', # 0xef
'pon', # 0xf0
'ponj', # 0xf1
'ponh', # 0xf2
'pod', # 0xf3
'pol', # 0xf4
'polg', # 0xf5
'polm', # 0xf6
'polb', # 0xf7
'pols', # 0xf8
'polt', # 0xf9
'polp', # 0xfa
'polh', # 0xfb
'pom', # 0xfc
'pob', # 0xfd
'pobs', # 0xfe
'pos', # 0xff
)
| gpl-3.0 |
dezelin/vbox-haiku | src/VBox/Additions/common/crOpenGL/defs64.py | 2 | 8822 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightDef()
print "LIBRARY VBoxOGL"
print "DESCRIPTION \"\""
print "EXPORTS"
# XXX can't these values be automatically computed by analyzing parameters?
exports_special = [
'Accum',
'AlphaFunc',
'AreTexturesResident',
'ArrayElement',
'Begin',
'BindTexture',
'Bitmap',
'BlendFunc',
'CallList',
'CallLists',
'Clear',
'ClearAccum',
'ClearColor',
'ClearDepth',
'ClearIndex',
'ClearStencil',
'ClipPlane',
'Color3b',
'Color3bv',
'Color3d',
'Color3dv',
'Color3f',
'Color3fv',
'Color3i',
'Color3iv',
'Color3s',
'Color3sv',
'Color3ub',
'Color3ubv',
'Color3ui',
'Color3uiv',
'Color3us',
'Color3usv',
'Color4b',
'Color4bv',
'Color4d',
'Color4dv',
'Color4f',
'Color4fv',
'Color4i',
'Color4iv',
'Color4s',
'Color4sv',
'Color4ub',
'Color4ubv',
'Color4ui',
'Color4uiv',
'Color4us',
'Color4usv',
'ColorMask',
'ColorMaterial',
'ColorPointer',
'CopyPixels',
'CopyTexImage1D',
'CopyTexImage2D',
'CopyTexSubImage1D',
'CopyTexSubImage2D',
'CullFace',
'DebugEntry',
'DeleteLists',
'DeleteTextures',
'DepthFunc',
'DepthMask',
'DepthRange',
'Disable',
'DisableClientState',
'DrawArrays',
'DrawBuffer',
'DrawElements',
'DrawPixels',
'EdgeFlag',
'EdgeFlagPointer',
'EdgeFlagv',
'Enable',
'EnableClientState',
'End',
'EndList',
'EvalCoord1d',
'EvalCoord1dv',
'EvalCoord1f',
'EvalCoord1fv',
'EvalCoord2d',
'EvalCoord2dv',
'EvalCoord2f',
'EvalCoord2fv',
'EvalMesh1',
'EvalMesh2',
'EvalPoint1',
'EvalPoint2',
'FeedbackBuffer',
'Finish',
'Flush',
'Fogf',
'Fogfv',
'Fogi',
'Fogiv',
'FrontFace',
'Frustum',
'GenLists',
'GenTextures',
'GetBooleanv',
'GetClipPlane',
'GetDoublev',
'GetError',
'GetFloatv',
'GetIntegerv',
'GetLightfv',
'GetLightiv',
'GetMapdv',
'GetMapfv',
'GetMapiv',
'GetMaterialfv',
'GetMaterialiv',
'GetPixelMapfv',
'GetPixelMapuiv',
'GetPixelMapusv',
'GetPointerv',
'GetPolygonStipple',
'GetString',
'GetTexEnvfv',
'GetTexEnviv',
'GetTexGendv',
'GetTexGenfv',
'GetTexGeniv',
'GetTexImage',
'GetTexLevelParameterfv',
'GetTexLevelParameteriv',
'GetTexParameterfv',
'GetTexParameteriv',
'Hint',
'IndexMask',
'IndexPointer',
'Indexd',
'Indexdv',
'Indexf',
'Indexfv',
'Indexi',
'Indexiv',
'Indexs',
'Indexsv',
'Indexub',
'Indexubv',
'InitNames',
'InterleavedArrays',
'IsEnabled',
'IsList',
'IsTexture',
'LightModelf',
'LightModelfv',
'LightModeli',
'LightModeliv',
'Lightf',
'Lightfv',
'Lighti',
'Lightiv',
'LineStipple',
'LineWidth',
'ListBase',
'LoadIdentity',
'LoadMatrixd',
'LoadMatrixf',
'LoadName',
'LogicOp',
'Map1d',
'Map1f',
'Map2d',
'Map2f',
'MapGrid1d',
'MapGrid1f',
'MapGrid2d',
'MapGrid2f',
'Materialf',
'Materialfv',
'Materiali',
'Materialiv',
'MatrixMode',
'MultMatrixd',
'MultMatrixf',
'NewList',
'Normal3b',
'Normal3bv',
'Normal3d',
'Normal3dv',
'Normal3f',
'Normal3fv',
'Normal3i',
'Normal3iv',
'Normal3s',
'Normal3sv',
'NormalPointer',
'Ortho',
'PassThrough',
'PixelMapfv',
'PixelMapuiv',
'PixelMapusv',
'PixelStoref',
'PixelStorei',
'PixelTransferf',
'PixelTransferi',
'PixelZoom',
'PointSize',
'PolygonMode',
'PolygonOffset',
'PolygonStipple',
'PopAttrib',
'PopClientAttrib',
'PopMatrix',
'PopName',
'PrioritizeTextures',
'PushAttrib',
'PushClientAttrib',
'PushMatrix',
'PushName',
'RasterPos2d',
'RasterPos2dv',
'RasterPos2f',
'RasterPos2fv',
'RasterPos2i',
'RasterPos2iv',
'RasterPos2s',
'RasterPos2sv',
'RasterPos3d',
'RasterPos3dv',
'RasterPos3f',
'RasterPos3fv',
'RasterPos3i',
'RasterPos3iv',
'RasterPos3s',
'RasterPos3sv',
'RasterPos4d',
'RasterPos4dv',
'RasterPos4f',
'RasterPos4fv',
'RasterPos4i',
'RasterPos4iv',
'RasterPos4s',
'RasterPos4sv',
'ReadBuffer',
'ReadPixels',
'Rectd',
'Rectdv',
'Rectf',
'Rectfv',
'Recti',
'Rectiv',
'Rects',
'Rectsv',
'RenderMode',
'Rotated',
'Rotatef',
'Scaled',
'Scalef',
'Scissor',
'SelectBuffer',
'ShadeModel',
'StencilFunc',
'StencilMask',
'StencilOp',
'TexCoord1d',
'TexCoord1dv',
'TexCoord1f',
'TexCoord1fv',
'TexCoord1i',
'TexCoord1iv',
'TexCoord1s',
'TexCoord1sv',
'TexCoord2d',
'TexCoord2dv',
'TexCoord2f',
'TexCoord2fv',
'TexCoord2i',
'TexCoord2iv',
'TexCoord2s',
'TexCoord2sv',
'TexCoord3d',
'TexCoord3dv',
'TexCoord3f',
'TexCoord3fv',
'TexCoord3i',
'TexCoord3iv',
'TexCoord3s',
'TexCoord3sv',
'TexCoord4d',
'TexCoord4dv',
'TexCoord4f',
'TexCoord4fv',
'TexCoord4i',
'TexCoord4iv',
'TexCoord4s',
'TexCoord4sv',
'TexCoordPointer',
'TexEnvf',
'TexEnvfv',
'TexEnvi',
'TexEnviv',
'TexGend',
'TexGendv',
'TexGenf',
'TexGenfv',
'TexGeni',
'TexGeniv',
'TexImage1D',
'TexImage2D',
'TexImage3D',
'TexParameterf',
'TexParameterfv',
'TexParameteri',
'TexParameteriv',
'TexSubImage1D',
'TexSubImage2D',
'Translated',
'Translatef',
'Vertex2d',
'Vertex2dv',
'Vertex2f',
'Vertex2fv',
'Vertex2i',
'Vertex2iv',
'Vertex2s',
'Vertex2sv',
'Vertex3d',
'Vertex3dv',
'Vertex3f',
'Vertex3fv',
'Vertex3i',
'Vertex3iv',
'Vertex3s',
'Vertex3sv',
'Vertex4d',
'Vertex4dv',
'Vertex4f',
'Vertex4fv',
'Vertex4i',
'Vertex4iv',
'Vertex4s',
'Vertex4sv',
'VertexPointer',
'Viewport',
'wglChoosePixelFormat',
'wglCopyContext',
'wglCreateContext',
'wglCreateLayerContext',
'wglDeleteContext',
'wglDescribeLayerPlane',
'wglDescribePixelFormat',
'wglGetCurrentContext',
'wglGetCurrentDC',
'wglGetDefaultProcAddress',
'wglGetLayerPaletteEntries',
'wglGetPixelFormat',
'wglGetProcAddress',
'wglMakeCurrent',
'wglRealizeLayerPalette',
'wglSetLayerPaletteEntries',
'wglSetPixelFormat',
'wglShareLists',
'wglSwapBuffers',
'wglSwapLayerBuffers',
'wglSwapMultipleBuffers',
'wglUseFontBitmapsA',
'wglUseFontBitmapsW',
'wglUseFontOutlinesA',
'wglUseFontOutlinesW',
'wglChoosePixelFormatEXT',
'wglGetPixelFormatAttribivEXT',
'wglGetPixelFormatAttribfvEXT',
'wglGetExtensionsStringEXT',
'CopyContext',
'CreateContext',
'CreateLayerContext',
'DeleteContext',
'DescribeLayerPlane',
'DescribePixelFormat',
'GetLayerPaletteEntries',
'GetProcAddress',
'RealizeLayerPalette',
'ReleaseContext',
'SetContext',
'SetLayerPaletteEntries',
'SetPixelFormat',
'ShareLists',
'SwapBuffers',
'SwapLayerBuffers',
'ValidateVersion',
]
noexport_special = [
"BoundsInfoCR",
"CreateContext",
"DestroyContext",
"MakeCurrent",
"WindowCreate",
"WindowDestroy",
"WindowSize",
"WindowPosition",
"WindowVisibleRegion",
"WindowShow",
"SwapBuffers"
]
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
if func_name in noexport_special:
continue
if func_name in exports_special:
print "gl%s = cr_gl%s" % (func_name,func_name)
for func_name in ( "wglChoosePixelFormat",
"wglCopyContext",
"wglCreateContext",
"wglCreateLayerContext",
"wglDeleteContext",
"wglDescribeLayerPlane",
"wglDescribePixelFormat",
"wglGetCurrentContext",
"wglGetCurrentDC",
"wglGetLayerPaletteEntries",
"wglGetPixelFormat",
"wglGetProcAddress",
"wglMakeCurrent",
"wglRealizeLayerPalette",
"wglSetLayerPaletteEntries",
"wglSetPixelFormat",
"wglShareLists",
"wglSwapBuffers",
"wglSwapLayerBuffers",
"wglSwapMultipleBuffers",
"wglUseFontBitmapsA",
"wglUseFontBitmapsW",
"wglUseFontOutlinesA",
"wglUseFontOutlinesW",
"wglChoosePixelFormatEXT",
"wglGetPixelFormatAttribivEXT",
"wglGetPixelFormatAttribfvEXT",
"wglGetExtensionsStringEXT"):
print "%s = %s_prox" % (func_name,func_name)
print """DrvCopyContext
DrvCreateContext
DrvCreateLayerContext
DrvDeleteContext
DrvDescribeLayerPlane
DrvDescribePixelFormat
DrvGetLayerPaletteEntries
DrvGetProcAddress = wglGetProcAddress_prox
DrvRealizeLayerPalette
DrvSetLayerPaletteEntries
DrvShareLists
DrvSwapBuffers
DrvSwapLayerBuffers
DrvReleaseContext = DrvReleaseContext
DrvSetContext = DrvSetContext
DrvValidateVersion = DrvValidateVersion
DrvSetPixelFormat = DrvSetPixelFormat"""
print """crCreateContext
crMakeCurrent
crSwapBuffers
crGetProcAddress"""
#print "DllMain"
| gpl-2.0 |
jooddang/Boxpedia | web/dropbox-python-sdk-1.6/build/lib/dropbox/client.py | 6 | 53823 | from __future__ import absolute_import
import base64
import re
import os
import sys
import urllib
from StringIO import StringIO
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient
from .session import BaseSession, DropboxSession, DropboxOAuth2Session
def format_path(path):
"""Normalize path for use with the Dropbox API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r'/+', '/', path)
if path == '/':
return (u"" if isinstance(path, unicode) else "")
else:
return '/' + path.strip('/')
class DropboxClient(object):
"""
The class that lets you make Dropbox API calls. You'll need to obtain an
OAuth 2 access token first. You can get an access token using either
:class:`DropboxOAuth2Flow` or :class:`DropboxOAuth2FlowNoRedirect`.
Args:
- ``oauth2_access_token``: An OAuth 2 access token (string).
- ``rest_client``: A :class:`dropbox.rest.RESTClient`-like object to use for making
requests. [optional]
All of the API call methods can raise a :class:`dropbox.rest.ErrorResponse` exception if
the server returns a non-200 or invalid HTTP response. Note that a 401
return status at any point indicates that the access token you're using
is no longer valid and the user must be put through the OAuth 2
authorization flow again.
"""
def __init__(self, oauth2_access_token, locale=None, rest_client=None):
if rest_client is None: rest_client = RESTClient
if isinstance(oauth2_access_token, basestring):
self.session = DropboxOAuth2Session(oauth2_access_token, locale)
elif isinstance(oauth2_access_token, DropboxSession):
# Backwards compatibility with OAuth 1
if locale is not None:
raise ValueError("The 'locale' parameter to DropboxClient is only useful "
"when also passing in an OAuth 2 access token")
self.session = oauth2_access_token
else:
raise ValueError("'oauth2_access_token' must either be a string or a DropboxSession")
self.rest_client = rest_client
def request(self, target, params=None, method='POST', content_server=False):
"""
An internal method that builds the url, headers, and params for a Dropbox API request.
It is exposed if you need to make API calls not implemented in this library or if you
need to debug requests.
Args:
- ``target``: The target URL with leading slash (e.g. '/files')
- ``params``: A dictionary of parameters to add to the request
- ``method``: An HTTP method (e.g. 'GET' or 'POST')
- ``content_server``: A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
Returns:
- A tuple of ``(url, params, headers)`` that should be used to make the request.
OAuth will be added as needed within these fields.
"""
assert method in ['GET','POST', 'PUT'], "Only 'GET', 'POST', and 'PUT' are allowed."
if params is None:
params = {}
host = self.session.API_CONTENT_HOST if content_server else self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(method, base, params)
if method in ('GET', 'PUT'):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
def account_info(self):
"""Retrieve information about the user's account.
Returns:
- A dictionary containing account information.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#account-info
"""
url, params, headers = self.request("/account/info", method='GET')
return self.rest_client.GET(url, headers)
def get_chunked_uploader(self, file_obj, length):
"""Creates a ChunkedUploader to upload the given file-like object.
Args:
- ``file_obj``: The file-like object which is the source of the data
being uploaded.
- ``length``: The number of bytes to upload.
The expected use of this function is as follows:
.. code-block:: python
bigFile = open("data.txt", 'rb')
uploader = myclient.get_chunked_uploader(bigFile, size)
print "uploading: ", size
while uploader.offset < size:
try:
upload = uploader.upload_chunked()
except rest.ErrorResponse, e:
# perform error handling and retry logic
uploader.finish('/bigFile.txt')
The SDK leaves the error handling and retry logic to the developer
to implement, as the exact requirements will depend on the application
involved.
"""
return DropboxClient.ChunkedUploader(self, file_obj, length)
class ChunkedUploader(object):
"""Contains the logic around a chunked upload, which uploads a
large file to Dropbox via the /chunked_upload endpoint
"""
def __init__(self, client, file_obj, length):
self.client = client
self.offset = 0
self.upload_id = None
self.last_block = None
self.file_obj = file_obj
self.target_length = length
def upload_chunked(self, chunk_size = 4 * 1024 * 1024):
"""Uploads data from this ChunkedUploader's file_obj in chunks, until
an error occurs. Throws an exception when an error occurs, and can
be called again to resume the upload.
Args:
- ``chunk_size``: The number of bytes to put in each chunk. [default 4 MB]
"""
while self.offset < self.target_length:
next_chunk_size = min(chunk_size, self.target_length - self.offset)
if self.last_block == None:
self.last_block = self.file_obj.read(next_chunk_size)
try:
(self.offset, self.upload_id) = self.client.upload_chunk(StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)
self.last_block = None
except ErrorResponse, e:
reply = e.body
if "offset" in reply and reply['offset'] != 0:
if reply['offset'] > self.offset:
self.last_block = None
self.offset = reply['offset']
def finish(self, path, overwrite=False, parent_rev=None):
"""Commits the bytes uploaded by this ChunkedUploader to a file
in the users dropbox.
Args:
- ``path``: The full path of the file in the Dropbox.
- ``overwrite``: Whether to overwrite an existing file at the given path. [default False]
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
- ``parent_rev``: The rev field from the 'parent' of this upload. [optional]
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most-recent parent_rev,
and it will never be overwritten if you send a less-recent one.
"""
path = "/commit_chunked_upload/%s%s" % (self.client.session.root, format_path(path))
params = dict(
overwrite = bool(overwrite),
upload_id = self.upload_id
)
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.client.request(path, params, content_server=True)
return self.client.rest_client.POST(url, params, headers)
def upload_chunk(self, file_obj, length, offset=0, upload_id=None):
"""Uploads a single chunk of data from the given file like object. The majority of users
should use the ChunkedUploader object, which provides a simpler interface to the
chunked_upload API endpoint.
Args:
- ``file_obj``: The source of the data to upload
- ``length``: The number of bytes to upload in one chunk.
Returns:
- The reply from the server, as a dictionary
"""
params = dict()
if upload_id:
params['upload_id'] = upload_id
params['offset'] = offset
url, ignored_params, headers = self.request("/chunked_upload", params, method='PUT', content_server=True)
try:
reply = self.rest_client.PUT(url, file_obj, headers)
return reply['offset'], reply['upload_id']
except ErrorResponse, e:
raise e
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
"""Upload a file.
A typical use case would be as follows:
.. code-block:: python
f = open('working-draft.txt', 'rb')
response = client.put_file('/magnum-opus.txt', f)
print "uploaded:", response
which would return the metadata of the uploaded file, similar to:
.. code-block:: python
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
Args:
- ``full_path``: The full path to upload the file to, *including the file name*.
If the destination directory does not yet exist, it will be created.
- ``file_obj``: A file-like object to upload. If you would like, you can pass a string as file_obj.
- ``overwrite``: Whether to overwrite an existing file at the given path. [default False]
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
- ``parent_rev``: The rev field from the 'parent' of this upload. [optional]
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most-recent parent_rev,
and it will never be overwritten if you send a less-recent one.
Returns:
- A dictionary containing the metadata of the newly uploaded file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#files-put
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 503: User over quota
Note: In Python versions below version 2.6, httplib doesn't handle file-like objects.
In that case, this code will read the entire file into memory (!).
"""
path = "/files_put/%s%s" % (self.session.root, format_path(full_path))
params = {
'overwrite': bool(overwrite),
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request(path, params, method='PUT', content_server=True)
return self.rest_client.PUT(url, file_obj, headers)
def get_file(self, from_path, rev=None):
"""Download a file.
Unlike most other calls, get_file returns a raw HTTPResponse with the connection open.
You should call .read() and perform any processing you need, then close the HTTPResponse.
A typical usage looks like this:
.. code-block:: python
out = open('magnum-opus.txt', 'w')
f = client.get_file('/magnum-opus.txt').read()
out.write(f)
which would download the file ``magnum-opus.txt`` and write the contents into
the file ``magnum-opus.txt`` on the local filesystem.
Args:
- ``from_path``: The path to the file to be downloaded.
- ``rev``: A previous rev value of the file to be downloaded. [optional]
Returns:
- An httplib.HTTPResponse that is the result of the request.
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
path = "/files/%s%s" % (self.session.root, format_path(from_path))
params = {}
if rev is not None:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def get_file_and_metadata(self, from_path, rev=None):
"""Download a file alongwith its metadata.
Acts as a thin wrapper around get_file() (see :meth:`get_file()` comments for
more details)
A typical usage looks like this:
.. code-block:: python
out = open('magnum-opus.txt', 'w')
f, metadata = client.get_file_and_metadata('/magnum-opus.txt')
out.write(f)
Args:
- ``from_path``: The path to the file to be downloaded.
- ``rev``: A previous rev value of the file to be downloaded. [optional]
Returns:
- An httplib.HTTPResponse that is the result of the request.
- A dictionary containing the metadata of the file (see
https://www.dropbox.com/developers/reference/api#metadata for details).
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
file_res = self.get_file(from_path, rev)
metadata = DropboxClient.__parse_metadata_as_dict(file_res)
return file_res, metadata
@staticmethod
def __parse_metadata_as_dict(dropbox_raw_response):
"""Parses file metadata from a raw dropbox HTTP response, raising a
:class:`dropbox.rest.ErrorResponse` if parsing fails.
"""
metadata = None
for header, header_val in dropbox_raw_response.getheaders():
if header.lower() == 'x-dropbox-metadata':
try:
metadata = json.loads(header_val)
except ValueError:
raise ErrorResponse(dropbox_raw_response)
if not metadata: raise ErrorResponse(dropbox_raw_response)
return metadata
def delta(self, cursor=None):
"""A way of letting you keep up with changes to files and folders in a
user's Dropbox. You can periodically call delta() to get a list of "delta
entries", which are instructions on how to update your local state to
match the server's state.
Args:
- ``cursor``: On the first call, omit this argument (or pass in ``None``). On
subsequent calls, pass in the ``cursor`` string returned by the previous
call.
Returns: A dict with three fields.
- ``entries``: A list of "delta entries" (described below)
- ``reset``: If ``True``, you should your local state to be an empty folder
before processing the list of delta entries. This is only ``True`` only
in rare situations.
- ``cursor``: A string that is used to keep track of your current state.
On the next call to delta(), pass in this value to return entries
that were recorded since the cursor was returned.
- ``has_more``: If ``True``, then there are more entries available; you can
call delta() again immediately to retrieve those entries. If ``False``,
then wait at least 5 minutes (preferably longer) before checking again.
Delta Entries: Each entry is a 2-item list of one of following forms:
- [*path*, *metadata*]: Indicates that there is a file/folder at the given
path. You should add the entry to your local path. (The *metadata*
value is the same as what would be returned by the ``metadata()`` call.)
- If the new entry includes parent folders that don't yet exist in your
local state, create those parent folders in your local state. You
will eventually get entries for those parent folders.
- If the new entry is a file, replace whatever your local state has at
*path* with the new entry.
- If the new entry is a folder, check what your local state has at
*path*. If it's a file, replace it with the new entry. If it's a
folder, apply the new *metadata* to the folder, but do not modify
the folder's children.
- [*path*, ``nil``]: Indicates that there is no file/folder at the *path* on
Dropbox. To update your local state to match, delete whatever is at *path*,
including any children (you will sometimes also get "delete" delta entries
for the children, but this is not guaranteed). If your local state doesn't
have anything at *path*, ignore this entry.
Remember: Dropbox treats file names in a case-insensitive but case-preserving
way. To facilitate this, the *path* strings above are lower-cased versions of
the actual path. The *metadata* dicts have the original, case-preserved path.
"""
path = "/delta"
params = {}
if cursor is not None:
params['cursor'] = cursor
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def create_copy_ref(self, from_path):
"""Creates and returns a copy ref for a specific file. The copy ref can be
used to instantly copy that file to the Dropbox of another account.
Args:
- ``path``: The path to the file for a copy ref to be created on.
Returns:
- A dictionary that looks like the following example:
``{"expires":"Fri, 31 Jan 2042 21:01:05 +0000", "copy_ref":"z1X6ATl6aWtzOGq0c3g5Ng"}``
"""
path = "/copy_ref/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {}, method='GET')
return self.rest_client.GET(url, headers)
def add_copy_ref(self, copy_ref, to_path):
"""Adds the file referenced by the copy ref to the specified path
Args:
- ``copy_ref``: A copy ref string that was returned from a create_copy_ref call.
The copy_ref can be created from any other Dropbox account, or from the same account.
- ``path``: The path to where the file will be created.
Returns:
- A dictionary containing the metadata of the new copy of the file.
"""
path = "/fileops/copy"
params = {'from_copy_ref': copy_ref,
'to_path': format_path(to_path),
'root': self.session.root}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def file_copy(self, from_path, to_path):
"""Copy a file or folder to a new location.
Args:
- ``from_path``: The path to the file or folder to be copied.
- ``to_path``: The destination path of the file or folder to be copied.
This parameter should include the destination filename (e.g.
from_path: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path it will raise an ErrorResponse.
Returns:
- A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-copy
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details)
- 403: An invalid move operation was attempted (e.g. there is already a file at the given destination, or moving a shared folder into a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path),
}
url, params, headers = self.request("/fileops/copy", params)
return self.rest_client.POST(url, params, headers)
def file_create_folder(self, path):
"""Create a folder.
Args:
- ``path``: The path of the new folder.
Returns:
- A dictionary containing the metadata of the newly created folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-create-folder
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 403: A folder at that path already exists.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/create_folder", params)
return self.rest_client.POST(url, params, headers)
def file_delete(self, path):
"""Delete a file or folder.
Args:
- ``path``: The path of the file or folder.
Returns:
- A dictionary containing the metadata of the just deleted file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-delete
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/delete", params)
return self.rest_client.POST(url, params, headers)
def file_move(self, from_path, to_path):
"""Move a file or folder to a new location.
Args:
- ``from_path``: The path to the file or folder to be moved.
- ``to_path``: The destination path of the file or folder to be moved.
This parameter should include the destination filename (e.g. if
``from_path`` is ``'/test.txt'``, ``to_path`` might be
``'/dir/test.txt'``). If there's already a file at the
``to_path``, this file or folder will be renamed to be unique.
Returns:
- A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-move
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root, 'from_path': format_path(from_path), 'to_path': format_path(to_path)}
url, params, headers = self.request("/fileops/move", params)
return self.rest_client.POST(url, params, headers)
def metadata(self, path, list=True, file_limit=25000, hash=None, rev=None, include_deleted=False):
"""Retrieve metadata for a file or folder.
A typical use would be:
.. code-block:: python
folder_metadata = client.metadata('/')
print "metadata:", folder_metadata
which would return the metadata of the root directory. This
will look something like:
.. code-block:: python
{
'bytes': 0,
'contents': [
{
'bytes': 0,
'icon': 'folder',
'is_dir': True,
'modified': 'Thu, 25 Aug 2011 00:03:15 +0000',
'path': '/Sample Folder',
'rev': '803beb471',
'revision': 8,
'root': 'dropbox',
'size': '0 bytes',
'thumb_exists': False
},
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
],
'hash': 'efdac89c4da886a9cece1927e6c22977',
'icon': 'folder',
'is_dir': True,
'path': '/',
'root': 'app_folder',
'size': '0 bytes',
'thumb_exists': False
}
In this example, the root directory contains two things: ``Sample Folder``,
which is a folder, and ``/magnum-opus.txt``, which is a text file 77 bytes long
Args:
- ``path``: The path to the file or folder.
- ``list``: Whether to list all contained files (only applies when
path refers to a folder).
- ``file_limit``: The maximum number of file entries to return within
a folder. If the number of files in the directory exceeds this
limit, an exception is raised. The server will return at max
25,000 files within a folder.
- ``hash``: Every directory listing has a hash parameter attached that
can then be passed back into this function later to save on
bandwidth. Rather than returning an unchanged folder's contents,
the server will instead return a 304.
- ``rev``: The revision of the file to retrieve the metadata for. [optional]
This parameter only applies for files. If omitted, you'll receive
the most recent revision metadata.
Returns:
- A dictionary containing the metadata of the file or folder
(and contained files if appropriate).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#metadata
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 304: Current directory hash matches hash parameters, so contents are unchanged.
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given path.
- 406: Too many file entries to return.
"""
path = "/metadata/%s%s" % (self.session.root, format_path(path))
params = {'file_limit': file_limit,
'list': 'true',
'include_deleted': include_deleted,
}
if not list:
params['list'] = 'false'
if hash is not None:
params['hash'] = hash
if rev:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def thumbnail(self, from_path, size='large', format='JPEG'):
"""Download a thumbnail for an image.
Unlike most other calls, thumbnail returns a raw HTTPResponse with the connection open.
You should call .read() and perform any processing you need, then close the HTTPResponse.
Args:
- ``from_path``: The path to the file to be thumbnailed.
- ``size``: A string describing the desired thumbnail size.
At this time, 'small', 'medium', and 'large' are
officially supported sizes (32x32, 64x64, and 128x128
respectively), though others may be available. Check
https://www.dropbox.com/developers/reference/api#thumbnails for
more details.
Returns:
- An httplib.HTTPResponse that is the result of the request.
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given from_path, or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
"""
assert format in ['JPEG', 'PNG'], "expected a thumbnail format of 'JPEG' or 'PNG', got %s" % format
path = "/thumbnails/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {'size': size, 'format': format}, method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def thumbnail_and_metadata(self, from_path, size='large', format='JPEG'):
"""Download a thumbnail for an image alongwith its metadata.
Acts as a thin wrapper around thumbnail() (see :meth:`thumbnail()` comments for
more details)
Args:
- ``from_path``: The path to the file to be thumbnailed.
- ``size``: A string describing the desired thumbnail size. See :meth:`thumbnail()`
for details.
Returns:
- An httplib.HTTPResponse that is the result of the request.
- A dictionary containing the metadata of the file whose thumbnail
was downloaded (see https://www.dropbox.com/developers/reference/api#metadata
for details).
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given from_path, or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
- 200: Request was okay but response was malformed in some way.
"""
thumbnail_res = self.thumbnail(from_path, size, format)
metadata = DropboxClient.__parse_metadata_as_dict(thumbnail_res)
return thumbnail_res, metadata
def search(self, path, query, file_limit=1000, include_deleted=False):
"""Search directory for filenames matching query.
Args:
- ``path``: The directory to search within.
- ``query``: The query to search on (minimum 3 characters).
- ``file_limit``: The maximum number of file entries to return within a folder.
The server will return at max 1,000 files.
- ``include_deleted``: Whether to include deleted files in search results.
Returns:
- A list of the metadata of all matching files (up to
file_limit entries). For a detailed description of what
this call returns, visit:
https://www.dropbox.com/developers/reference/api#search
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
"""
path = "/search/%s%s" % (self.session.root, format_path(path))
params = {
'query': query,
'file_limit': file_limit,
'include_deleted': include_deleted,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def revisions(self, path, rev_limit=1000):
"""Retrieve revisions of a file.
Args:
- ``path``: The file to fetch revisions for. Note that revisions
are not available for folders.
- ``rev_limit``: The maximum number of file entries to return within
a folder. The server will return at max 1,000 revisions.
Returns:
- A list of the metadata of all matching files (up to rev_limit entries).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#revisions
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No revisions were found at the given path.
"""
path = "/revisions/%s%s" % (self.session.root, format_path(path))
params = {
'rev_limit': rev_limit,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def restore(self, path, rev):
"""Restore a file to a previous revision.
Args:
- ``path``: The file to restore. Note that folders can't be restored.
- ``rev``: A previous rev value of the file to be restored to.
Returns:
- A dictionary containing the metadata of the newly restored file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#restore
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given revision.
"""
path = "/restore/%s%s" % (self.session.root, format_path(path))
params = {
'rev': rev,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def media(self, path):
"""Get a temporary unauthenticated URL for a media file.
All of Dropbox's API methods require OAuth, which may cause problems in
situations where an application expects to be able to hit a URL multiple times
(for example, a media player seeking around a video file). This method
creates a time-limited URL that can be accessed without any authentication,
and returns that to you, along with an expiration time.
Args:
- ``path``: The file to return a URL for. Folders are not supported.
Returns:
- A dictionary that looks like the following example:
``{'url': 'https://dl.dropbox.com/0/view/wvxv1fw6on24qw7/file.mov', 'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}``
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#media
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given path.
"""
path = "/media/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
def share(self, path, short_url=True):
"""Create a shareable link to a file or folder.
Shareable links created on Dropbox are time-limited, but don't require any
authentication, so they can be given out freely. The time limit should allow
at least a day of shareability, though users have the ability to disable
a link from their account if they like.
Args:
- ``path``: The file or folder to share.
Returns:
- A dictionary that looks like the following example:
``{'url': 'http://www.dropbox.com/s/m/a2mbDa2', 'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}``
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#shares
Raises:
- A :class:`dropbox.rest.ErrorResponse` with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given path.
"""
path = "/shares/%s%s" % (self.session.root, format_path(path))
params = {
'short_url': short_url,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
class DropboxOAuth2FlowBase(object):
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=RESTClient):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.locale = locale
self.rest_client = rest_client
def _get_authorize_url(self, redirect_uri, state):
params = dict(response_type='code',
client_id=self.consumer_key)
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
if state is not None:
params['state'] = state
return self.build_url(BaseSession.WEB_HOST, '/oauth2/authorize', params)
def _finish(self, code, redirect_uri):
url = self.build_url(BaseSession.API_HOST, '/oauth2/token')
params = {'grant_type': 'authorization_code',
'code': code,
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
}
if self.locale is not None:
params['locale'] = self.locale
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
response = self.rest_client.POST(url, params=params)
access_token = response["access_token"]
user_id = response["uid"]
return access_token, user_id
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
Args:
- ``target``: A target url (e.g. '/files') to build upon.
- ``params``: A dictionary of parameters (name to value). [optional]
Returns:
- The path and parameters components of an API URL.
"""
if sys.version_info < (3,) and type(target) == unicode:
target = target.encode("utf8")
target_path = urllib.quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
return "/%d%s?%s" % (BaseSession.API_VERSION, target_path, urllib.urlencode(params))
else:
return "/%d%s" % (BaseSession.API_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
Args:
- ``target``: A target url (e.g. '/files') to build upon.
- ``params``: A dictionary of parameters (name to value). [optional]
Returns:
- The full API URL.
"""
return "https://%s%s" % (host, self.build_path(target, params))
class DropboxOAuth2FlowNoRedirect(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper for apps that can't provide a redirect URI
(such as the command-line example apps).
Args:
- ``consumer_key``: Your API app's "app key"
- ``consumer_secret``: Your API app's "app secret"
.. code-block:: python
from dropbox.client import DropboxOAuth2FlowNoRedirect, DropboxClient
from dropbox import rest as dbrest
oauth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = oauth_flow.start()
print "1. Go to: " + authorize_url
print "2. Click \\"Allow\\" (you might have to log in first)."
print "3. Copy the authorization code."
auth_code = raw_input("Enter the authorization code here: ").strip()
try:
access_token, user_id = auth_flow.finish(auth_code)
except dbrest.ErrorResponse, e:
print('Error: %s' % (e,))
return
c = DropboxClient(access_token)
"""
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=None):
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2FlowNoRedirect, self).__init__(consumer_key, consumer_secret, locale, rest_client)
def start(self):
"""
Returns the URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
return self._get_authorize_url(None, None)
def finish(self, code):
"""
If the user approves your app, they will be presented with an "authorization code". Have
the user copy/paste that authorization code into your app and then call this method to
get an access token.
Args:
- ``code``: The authorization code shown to the user when they approved your app.
Returns a pair of ``(access_token, user_id)``. ``access_token`` is a string that
can be passed to DropboxClient. ``user_id`` is the Dropbox user ID (string) of the user that
just approved your app.
"""
return self._finish(code, None)
class DropboxOAuth2Flow(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper. Use this for web apps.
OAuth 2 has a two-step authorization process. The first step is having the user authorize
your app. The second involves getting an OAuth 2 access token from Dropbox.
Args:
- ``consumer_key``: Your API app's "app key".
- ``consumer_secret``: Your API app's "app secret".
- ``redirect_uri``: The URI that the Dropbox server will redirect the user to after the user
finishes authorizing your app. This URI must be HTTPS-based and pre-registered with
the Dropbox servers, though localhost URIs are allowed without pre-registration and can
be either HTTP or HTTPS.
- ``session``: A dict-like object that represents the current user's web session (will be
used to save the CSRF token).
- ``csrf_token_key``: The key to use when storing the CSRF token in the session (for
example: "dropbox-auth-csrf-token").
.. code-block:: python
from dropbox.client import DropboxOAuth2Flow, DropboxClient
def get_dropbox_auth_flow(web_app_session):
redirect_uri = "https://my-web-server.org/dropbox-auth-finish")
return DropboxOAuth2Flow(APP_KEY, APP_SECRET, redirect_uri,
web_app_session, "dropbox-auth-csrf-token")
# URL handler for /dropbox-auth-start
def dropbox_auth_start(web_app_session, request):
authorize_url = get_dropbox_auth_flow(web_app_session).start()
redirect_to(authorize_url)
# URL handler for /dropbox-auth-finish
def dropbox_auth_finish(web_app_session, request):
try:
access_token, user_id, url_state = \\
get_dropbox_auth_flow(web_app_session).finish(request.query_params)
except DropboxOAuth2Flow.BadRequestException, e:
http_status(400)
except DropboxOAuth2Flow.BadStateException, e:
# Start the auth flow again.
redirect_to("/dropbox-auth-start")
except DropboxOAuth2Flow.CsrfException, e:
http_status(403)
except DropboxOAuth2Flow.NotApprovedException, e:
flash('Not approved? Why not, bro?')
return redirect_to("/home")
except DropboxOAuth2Flow.ProviderException, e:
logger.log("Auth error: %s" % (e,))
http_status(403)
"""
def __init__(self, consumer_key, consumer_secret, redirect_uri, session, csrf_token_session_key,
locale=None, rest_client=None):
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2Flow, self).__init__(consumer_key, consumer_secret, locale, rest_client)
self.redirect_uri = redirect_uri
self.session = session
self.csrf_token_session_key = csrf_token_session_key
def start(self, url_state=None):
"""
Starts the OAuth 2 authorization process.
This function builds an "authorization URL". You should redirect your user's browser to
this URL, which will give them an opportunity to grant your app access to their Dropbox
account. When the user completes this process, they will be automatically redirected to
the ``redirect_uri`` you passed in to the constructor.
This function will also save a CSRF token to ``session[csrf_token_session_key]`` (as
provided to the constructor). This CSRF token will be checked on :meth:`finish()` to
prevent request forgery.
Args:
- ``url_state``: Any data that you would like to keep in the URL through the
authorization process. This exact value will be returned to you by :meth:`finish()`.
Returns:
-
"""
csrf_token = base64.urlsafe_b64encode(os.urandom(16))
state = csrf_token
if url_state is not None:
state += "|" + url_state
self.session[self.csrf_token_session_key] = csrf_token
return self._get_authorize_url(self.redirect_uri, state)
def finish(self, query_params):
"""
Call this after the user has visited the authorize URL (see :meth:`start()`), approved your
app and was redirected to your redirect URI.
- ``query_params``: The query parameters on the GET request to your redirect URI.
Returns a tuple of ``(access_token, user_id, url_state)``. ``access_token`` can be used to
construct a :class:`DropboxClient`. ``user_id`` is the Dropbox user ID (string) of the user
that just approved your app. ``url_state`` is the value you originally passed in to
:meth:`start()`.
Can throw
:class:`BadRequestException`,
:class:`BadStateException`,
:class:`CsrfException`,
:class:`NotApprovedException`,
:class:`ProviderException`.
"""
csrf_token_from_session = self.session[self.csrf_token_session_key]
# Check well-formedness of request.
state = query_params.get('state')
if state is None:
raise self.BadRequestException("Missing query parameter 'state'.")
error = query_params.get('error')
error_description = query_params.get('error_description')
code = query_params.get('code')
if error is not None and code is not None:
raise self.BadRequestException("Query parameters 'code' and 'error' are both set; "
" only one must be set.")
if error is None and code is None:
raise self.BadRequestException("Neither query parameter 'code' or 'error' is set.")
# Check CSRF token
if csrf_token_from_session is None:
raise self.BadStateError("Missing CSRF token in session.")
if len(csrf_token_from_session) <= 20:
raise AssertionError("CSRF token unexpectedly short: %r" % (csrf_token_from_session,))
split_pos = state.find('|')
if split_pos < 0:
given_csrf_token = state
url_state = None
else:
given_csrf_token = state[0:split_pos]
url_state = state[split_pos+1:]
if not _safe_equals(csrf_token_from_session, given_csrf_token):
raise self.CsrfException("expected %r, got %r" % (csrf_token_from_session, given_csrf_token))
del self.session[self.csrf_token_session_key]
# Check for error identifier
if error is not None:
if error == 'access_denied':
# The user clicked "Deny"
if error_description is None:
raise self.NotApprovedException("No additional description from Dropbox")
else:
raise self.NotApprovedException("Additional description from Dropbox: " + error_description)
else:
# All other errors
full_message = error
if error_description is not None:
full_message += ": " + error_description
raise self.ProviderError(full_message)
# If everything went ok, make the network call to get an access token.
access_token, user_id = self._finish(code, self.redirect_uri)
return access_token, user_id, url_state
class BadRequestException(Exception):
"""
Thrown if the redirect URL was missing parameters or if the given parameters were not valid.
The recommended action is to show an HTTP 400 error page.
"""
pass
class BadStateException(Exception):
"""
Thrown if all the parameters are correct, but there's no CSRF token in the session. This
probably means that the session expired.
The recommended action is to redirect the user's browser to try the approval process again.
"""
pass
class CsrfException(Exception):
"""
Thrown if the given 'state' parameter doesn't contain the CSRF token from the user's session.
This is blocked to prevent CSRF attacks.
The recommended action is to respond with an HTTP 403 error page.
"""
pass
class NotApprovedException(Exception):
"""
The user chose not to approve your app.
"""
pass
class ProviderException(Exception):
"""
Dropbox redirected to your redirect URI with some unexpected error identifier and error
message.
The recommended action is to log the error, tell the user something went wrong, and let
them try again.
"""
pass
def _safe_equals(a, b):
if len(a) != len(b): return False
res = 0
for ca, cb in zip(a, b):
res |= ord(ca) ^ ord(cb)
return res == 0
| mit |
hale36/SRTV | lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| gpl-3.0 |
Ban3/Limnoria | src/drivers/Twisted.py | 4 | 6196 | ###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from .. import conf, drivers
from twisted.names import client
from twisted.internet import reactor, error
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import ReconnectingClientFactory
# This hack prevents the standard Twisted resolver from starting any
# threads, which allows for a clean shut-down in Twisted>=2.0
reactor.installResolver(client.createResolver())
try:
from OpenSSL import SSL
from twisted.internet import ssl
except ImportError:
drivers.log.debug('PyOpenSSL is not available, '
'cannot connect to SSL servers.')
SSL = None
class TwistedRunnerDriver(drivers.IrcDriver):
def name(self):
return self.__class__.__name__
def run(self):
try:
reactor.iterate(conf.supybot.drivers.poll())
except:
drivers.log.exception('Uncaught exception outside reactor:')
class SupyIrcProtocol(LineReceiver):
delimiter = '\n'
MAX_LENGTH = 1024
def __init__(self):
self.mostRecentCall = reactor.callLater(0.1, self.checkIrcForMsgs)
def lineReceived(self, line):
msg = drivers.parseMsg(line)
if msg is not None:
self.irc.feedMsg(msg)
def checkIrcForMsgs(self):
if self.connected:
msg = self.irc.takeMsg()
while msg:
self.transport.write(str(msg))
msg = self.irc.takeMsg()
self.mostRecentCall = reactor.callLater(0.1, self.checkIrcForMsgs)
def connectionLost(self, r):
self.mostRecentCall.cancel()
if r.check(error.ConnectionDone):
drivers.log.disconnect(self.factory.currentServer)
else:
drivers.log.disconnect(self.factory.currentServer, errorMsg(r))
if self.irc.zombie:
self.factory.stopTrying()
while self.irc.takeMsg():
continue
else:
self.irc.reset()
def connectionMade(self):
self.factory.resetDelay()
self.irc.driver = self
def die(self):
drivers.log.die(self.irc)
self.factory.stopTrying()
self.transport.loseConnection()
def reconnect(self, wait=None):
# We ignore wait here, because we handled our own waiting.
drivers.log.reconnect(self.irc.network)
self.transport.loseConnection()
def errorMsg(reason):
return reason.getErrorMessage()
class SupyReconnectingFactory(ReconnectingClientFactory, drivers.ServersMixin):
maxDelay = property(lambda self: conf.supybot.drivers.maxReconnectWait())
protocol = SupyIrcProtocol
def __init__(self, irc):
drivers.log.warning('Twisted driver is deprecated. You should '
'consider switching to Socket (set '
'supybot.drivers.module to Socket).')
self.irc = irc
drivers.ServersMixin.__init__(self, irc)
(server, port) = self._getNextServer()
vhost = conf.supybot.protocols.irc.vhost()
if self.networkGroup.get('ssl').value:
self.connectSSL(server, port, vhost)
else:
self.connectTCP(server, port, vhost)
def connectTCP(self, server, port, vhost):
"""Connect to the server with a standard TCP connection."""
reactor.connectTCP(server, port, self, bindAddress=(vhost, 0))
def connectSSL(self, server, port, vhost):
"""Connect to the server using an SSL socket."""
drivers.log.info('Attempting an SSL connection.')
if SSL:
reactor.connectSSL(server, port, self,
ssl.ClientContextFactory(), bindAddress=(vhost, 0))
else:
drivers.log.error('PyOpenSSL is not available. Not connecting.')
def clientConnectionFailed(self, connector, r):
drivers.log.connectError(self.currentServer, errorMsg(r))
(connector.host, connector.port) = self._getNextServer()
ReconnectingClientFactory.clientConnectionFailed(self, connector,r)
def clientConnectionLost(self, connector, r):
(connector.host, connector.port) = self._getNextServer()
ReconnectingClientFactory.clientConnectionLost(self, connector, r)
def startedConnecting(self, connector):
drivers.log.connect(self.currentServer)
def buildProtocol(self, addr):
protocol = ReconnectingClientFactory.buildProtocol(self, addr)
protocol.irc = self.irc
return protocol
Driver = SupyReconnectingFactory
poller = TwistedRunnerDriver()
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
aalba6675/pyopenssl | examples/certgen.py | 3 | 2736 | # -*- coding: latin-1 -*-
#
# Copyright (C) AB Strakt
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Certificate generation module.
"""
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
def createKeyPair(type, bits):
"""
Create a public/private key pair.
Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
bits - Number of bits to use in the key
Returns: The public/private key pair in a PKey object
"""
pkey = crypto.PKey()
pkey.generate_key(type, bits)
return pkey
def createCertRequest(pkey, digest="sha256", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for key, value in name.items():
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req
def createCertificate(req, issuerCertKey, serial, validityPeriod, digest="sha256"):
"""
Generate a certificate given a certificate request.
Arguments: req - Certificate request to use
issuerCert - The certificate of the issuer
issuerKey - The private key of the issuer
serial - Serial number for the certificate
notBefore - Timestamp (relative to now) when the certificate
starts being valid
notAfter - Timestamp (relative to now) when the certificate
stops being valid
digest - Digest method to use for signing, default is sha256
Returns: The signed certificate in an X509 object
"""
issuerCert, issuerKey = issuerCertKey
notBefore, notAfter = validityPeriod
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(notBefore)
cert.gmtime_adj_notAfter(notAfter)
cert.set_issuer(issuerCert.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(issuerKey, digest)
return cert
| apache-2.0 |
mindw/python-qwt | qwt/plot_canvas.py | 2 | 23653 | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
from qwt.null_paintdevice import QwtNullPaintDevice
from qwt.painter import QwtPainter
from qwt.qt import PYQT5
from qwt.qt.QtGui import (QFrame, QPaintEngine, QPen, QBrush, QRegion, QImage,
QPainterPath, QPixmap, QGradient, QPainter, qAlpha,
QPolygonF, QStyleOption, QStyle, QStyleOptionFrame)
from qwt.qt.QtCore import Qt, QSizeF, QT_VERSION, QEvent, QPointF, QRectF
class Border(object):
def __init__(self):
self.pathlist = []
self.rectList = []
self.clipRegion = QRegion()
class Background(object):
def __init__(self):
self.path = QPainterPath()
self.brush = QBrush()
self.origin = QPointF()
class QwtStyleSheetRecorder(QwtNullPaintDevice):
def __init__(self, size):
super(QwtStyleSheetRecorder, self).__init__()
self.__size = size
self.__pen = QPen()
self.__brush = QBrush()
self.__origin = QPointF()
self.clipRects = []
self.border = Border()
self.background = Background()
def updateState(self, state):
if state.state() & QPaintEngine.DirtyPen:
self.__pen = state.pen()
if state.state() & QPaintEngine.DirtyBrush:
self.__brush = state.brush()
if state.state() & QPaintEngine.DirtyBrushOrigin:
self.__origin = state.brushOrigin()
def drawRects(self, rects, count):
for i in range(count):
self.border.rectList += rects[i]
def drawPath(self, path):
rect = QRectF(QPointF(0., 0.), self.__size)
if path.controlPointRect().contains(rect.center()):
self.setCornerRects(path)
self.alignCornerRects(rect)
self.background.path = path
self.background.brush = self.__brush
self.background.origin = self.__origin
else:
self.border.pathlist += [path]
def setCornerRects(self, path):
pos = QPointF(0., 0.)
for i in range(path.elementCount()):
el = path.elementAt(i)
if el.type in (QPainterPath.MoveToElement,
QPainterPath.LineToElement):
pos.setX(el.x)
pos.setY(el.y)
elif el.type == QPainterPath.CurveToElement:
r = QRectF(pos, QPointF(el.x, el.y))
self.clipRects += [r.normalized()]
pos.setX(el.x)
pos.setY(el.y)
elif el.type == QPainterPath.CurveToDataElement:
if self.clipRects:
r = self.clipRects[-1]
r.setCoords(min([r.left(), el.x]),
min([r.top(), el.y]),
max([r.right(), el.x]),
max([r.bottom(), el.y]))
self.clipRects[-1] = r.normalized()
def sizeMetrics(self):
return self.__size
def alignCornerRects(self, rect):
for r in self.clipRects:
if r.center().x() < rect.center().x():
r.setLeft(rect.left())
else:
r.setRight(rect.right())
if r.center().y() < rect.center().y():
r.setTop(rect.top())
else:
r.setBottom(rect.bottom())
def _rects_conv_PyQt5(rects):
# PyQt5 compatibility: the conversion from QRect to QRectF should not
# be necessary but it seems to be anyway... PyQt5 bug?
if PYQT5:
return [QRectF(rect) for rect in rects]
else:
return rects
def qwtDrawBackground(painter, canvas):
painter.save()
borderClip = canvas.borderPath(canvas.rect())
if not borderClip.isEmpty():
painter.setClipPath(borderClip, Qt.IntersectClip)
brush = canvas.palette().brush(canvas.backgroundRole())
if brush.style() == Qt.TexturePattern:
pm = QPixmap(canvas.size())
QwtPainter.fillPixmap(canvas, pm)
painter.drawPixmap(0, 0, pm)
elif brush.gradient():
rects = []
if brush.gradient().coordinateMode() == QGradient.ObjectBoundingMode:
rects += [canvas.rect()]
else:
rects += [painter.clipRegion().rects()]
useRaster = False
if painter.paintEngine().type() == QPaintEngine.X11:
useRaster = True
if useRaster:
format_ = QImage.Format_RGB32
stops = brush.gradient().stops()
for stop in stops:
if stop.second.alpha() != 255:
format_ = QImage.Format_ARGB32
break
image = QImage(canvas.size(), format_)
p = QPainter(image)
p.setPen(Qt.NoPen)
p.setBrush(brush)
p.drawRects(_rects_conv_PyQt5(rects))
p.end()
painter.drawImage(0, 0, image)
else:
painter.setPen(Qt.NoPen)
painter.setBrush(brush)
painter.drawRects(_rects_conv_PyQt5(rects))
else:
painter.setPen(Qt.NoPen)
painter.setBrush(brush)
painter.drawRects(_rects_conv_PyQt5(painter.clipRegion().rects()))
painter.restore()
def qwtRevertPath(path):
if path.elementCount() == 4:
el0 = path.elementAt(0)
el3 = path.elementAt(3)
path.setElementPositionAt(0, el3.x, el3.y)
path.setElementPositionAt(3, el0.x, el0.y)
def qwtCombinePathList(rect, pathList):
if not pathList:
return QPainterPath()
ordered = [None] * 8
for subPath in pathList:
index = -1
br = subPath.controlPointRect()
if br.center().x() < rect.center().x():
if br.center().y() < rect.center().y():
if abs(br.top()-rect.top()) < abs(br.left()-rect.left()):
index = 1
else:
index = 0
else:
if abs(br.bottom()-rect.bottom) < abs(br.left()-rect.left()):
index = 6
else:
index = 7
if subPath.currentPosition().y() > br.center().y():
qwtRevertPath(subPath)
else:
if br.center().y() < rect.center().y():
if abs(br.top()-rect.top()) < abs(br.right()-rect.right()):
index = 2
else:
index = 3
else:
if abs(br.bottom()-rect.bottom()) < abs(br.right()-rect.right()):
index = 5
else:
index = 4
if subPath.currentPosition().y() < br.center().y():
qwtRevertPath(subPath)
ordered[index] = subPath
for i in range(4):
if ordered[2*i].isEmpty() != ordered[2*i+1].isEmpty():
return QPainterPath()
corners = QPolygonF(rect)
path = QPainterPath()
for i in range(4):
if ordered[2*i].isEmpty():
path.lineTo(corners[i])
else:
path.connectPath(ordered[2*i])
path.connectPath(ordered[2*i+1])
path.closeSubpath()
return path
def qwtDrawStyledBackground(w, painter):
opt = QStyleOption()
opt.initFrom(w)
w.style().drawPrimitive(QStyle.PE_Widget, opt, painter, w)
def qwtBackgroundWidget(w):
if w.parentWidget() is None:
return w
if w.autoFillBackground():
brush = w.palette().brush(w.backgroundRole())
if brush.color().alpha() > 0:
return w
if w.testAttribute(Qt.WA_StyledBackground):
image = QImage(1, 1, QImage.Format_ARGB32)
image.fill(Qt.transparent)
painter = QPainter(image)
painter.translate(-w.rect().center())
qwtDrawStyledBackground(w, painter)
painter.end()
if qAlpha(image.pixel(0, 0)) != 0:
return w
return qwtBackgroundWidget(w.parentWidget())
def qwtFillBackground(*args):
if len(args) == 2:
painter, canvas = args
rects = []
if canvas.testAttribute(Qt.WA_StyledBackground):
recorder = QwtStyleSheetRecorder(canvas.size())
p = QPainter(recorder)
qwtDrawStyledBackground(canvas, p)
p.end()
if recorder.background.brush.isOpaque():
rects = recorder.clipRects
else:
rects += [canvas.rect()]
else:
r = canvas.rect()
radius = canvas.borderRadius()
if radius > 0.:
sz = QSizeF(radius, radius)
rects += [QRectF(r.topLeft(), sz),
QRectF(r.topRight()-QPointF(radius, 0), sz),
QRectF(r.bottomRight()-QPointF(radius, radius), sz),
QRectF(r.bottomLeft()-QPointF(0, radius), sz)]
qwtFillBackground(painter, canvas, rects)
elif len(args) == 3:
painter, widget, fillRects = args
if not fillRects:
return
if painter.hasClipping():
clipRegion = painter.transform().map(painter.clipRegion())
else:
clipRegion = widget.contentsRect()
bgWidget = qwtBackgroundWidget(widget.parentWidget())
for fillRect in fillRects:
rect = fillRect.toAlignedRect()
if clipRegion.intersects(rect):
pm = QPixmap(rect.size())
QwtPainter.fillPixmap(bgWidget, pm, widget.mapTo(bgWidget, rect.topLeft()))
painter.drawPixmap(rect, pm)
else:
raise TypeError("%s() takes 2 or 3 argument(s) (%s given)"\
% ("qwtFillBackground", len(args)))
class StyleSheetBackground(object):
def __init__(self):
self.brush = QBrush()
self.origin = QPointF()
class StyleSheet(object):
def __init__(self):
self.hasBorder = False
self.borderPath = QPainterPath()
self.cornerRects = []
self.background = StyleSheetBackground()
class QwtPlotCanvas_PrivateData(object):
def __init__(self):
self.focusIndicator = QwtPlotCanvas.NoFocusIndicator
self.borderRadius = 0
self.paintAttributes = 0
self.backingStore = None
self.styleSheet = StyleSheet()
self.styleSheet.hasBorder = False
class QwtPlotCanvas(QFrame):
# enum PaintAttribute
BackingStore = 1
Opaque = 2
HackStyledBackground = 4
ImmediatePaint = 8
# enum FocusIndicator
NoFocusIndicator, CanvasFocusIndicator, ItemFocusIndicator = list(range(3))
def __init__(self, plot=None):
super(QwtPlotCanvas, self).__init__(plot)
self.__plot = plot
self.setFrameStyle(QFrame.Panel|QFrame.Sunken)
self.setLineWidth(2)
self.__data = QwtPlotCanvas_PrivateData()
self.setCursor(Qt.CrossCursor)
self.setAutoFillBackground(True)
self.setPaintAttribute(QwtPlotCanvas.BackingStore, False)
self.setPaintAttribute(QwtPlotCanvas.Opaque, True)
self.setPaintAttribute(QwtPlotCanvas.HackStyledBackground, True)
def plot(self):
return self.__plot
def setPaintAttribute(self, attribute, on=True):
if bool(self.__data.paintAttributes & attribute) == on:
return
if on:
self.__data.paintAttributes |= attribute
else:
self.__data.paintAttributes &= ~attribute
if attribute == self.BackingStore:
if on:
if self.__data.backingStore is None:
self.__data.backingStore = QPixmap()
if self.isVisible():
if QT_VERSION >= 0x050000:
self.__data.backingStore = self.grab(self.rect())
else:
if PYQT5:
pm = QPixmap.grabWidget(self, self.rect())
else:
pm = self.grab(self.rect())
self.__data.backingStore = pm
else:
self.__data.backingStore = None
elif attribute == self.Opaque:
if on:
self.setAttribute(Qt.WA_OpaquePaintEvent, True)
elif attribute in (self.HackStyledBackground, self.ImmediatePaint):
pass
def testPaintAttribute(self, attribute):
return self.__data.paintAttributes & attribute
def backingStore(self):
return self.__data.backingStore
def invalidateBackingStore(self):
if self.__data.backingStore:
self.__data.backingStore = QPixmap()
def setFocusIndicator(self, focusIndicator):
self.__data.focusIndicator = focusIndicator
def focusIndicator(self):
return self.__data.focusIndicator
def setBorderRadius(self, radius):
self.__data.borderRadius = max([0., radius])
def borderRadius(self):
return self.__data.borderRadius
def event(self, event):
if event.type() == QEvent.PolishRequest:
if self.testPaintAttribute(self.Opaque):
self.setAttribute(Qt.WA_OpaquePaintEvent, True)
if event.type() in (QEvent.PolishRequest, QEvent.StyleChange):
self.updateStyleSheetInfo()
return QFrame.event(self, event)
def paintEvent(self, event):
painter = QPainter(self)
painter.setClipRegion(event.region())
if self.testPaintAttribute(self.BackingStore) and\
self.__data.backingStore is not None:
bs = self.__data.backingStore
if bs.size() != self.size():
bs = QwtPainter.backingStore(self, self.size())
if self.testAttribute(Qt.WA_StyledBackground):
p = QPainter(bs)
qwtFillBackground(p, self)
self.drawCanvas(p, True)
else:
p = QPainter()
if self.__data.borderRadius <= 0.:
# print('**DEBUG: QwtPlotCanvas.paintEvent')
QwtPainter.fillPixmap(self, bs)
p.begin(bs)
self.drawCanvas(p, False)
else:
p.begin(bs)
qwtFillBackground(p, self)
self.drawCanvas(p, True)
if self.frameWidth() > 0:
self.drawBorder(p)
p.end()
painter.drawPixmap(0, 0, self.__data.backingStore)
else:
if self.testAttribute(Qt.WA_StyledBackground):
if self.testAttribute(Qt.WA_OpaquePaintEvent):
qwtFillBackground(painter, self)
self.drawCanvas(painter, True)
else:
self.drawCanvas(painter, False)
else:
if self.testAttribute(Qt.WA_OpaquePaintEvent):
if self.autoFillBackground():
qwtFillBackground(painter, self)
qwtDrawBackground(painter, self)
else:
if self.borderRadius() > 0.:
clipPath = QPainterPath()
clipPath.addRect(self.rect())
clipPath = clipPath.subtracted(self.borderPath(self.rect()))
painter.save()
painter.setClipPath(clipPath, Qt.IntersectClip)
qwtFillBackground(painter, self)
qwtDrawBackground(painter, self)
painter.restore()
self.drawCanvas(painter, False)
if self.frameWidth() > 0:
self.drawBorder(painter)
if self.hasFocus() and self.focusIndicator() == self.CanvasFocusIndicator:
self.drawFocusIndicator(painter)
def drawCanvas(self, painter, withBackground):
hackStyledBackground = False
if withBackground and self.testAttribute(Qt.WA_StyledBackground) and\
self.testPaintAttribute(self.HackStyledBackground):
if self.__data.styleSheet.hasBorder and\
not self.__data.styleSheet.borderPath.isEmpty():
hackStyledBackground = True
if withBackground:
painter.save()
if self.testAttribute(Qt.WA_StyledBackground):
if hackStyledBackground:
painter.setPen(Qt.NoPen)
painter.setBrush(self.__data.styleSheet.background.brush)
painter.setBrushOrigin(self.__data.styleSheet.background.origin)
painter.setClipPath(self.__data.styleSheet.borderPath)
painter.drawRect(self.contentsRect())
else:
qwtDrawStyledBackground(self, painter)
elif self.autoFillBackground():
painter.setPen(Qt.NoPen)
painter.setBrush(self.palette().brush(self.backgroundRole()))
if self.__data.borderRadius > 0. and self.rect() == self.frameRect():
if self.frameWidth() > 0:
painter.setClipPath(self.borderPath(self.rect()))
painter.drawRect(self.rect())
else:
painter.setRenderHint(QPainter.Antialiasing, True)
painter.drawPath(self.borderPath(self.rect()))
else:
painter.drawRect(self.rect())
painter.restore()
painter.save()
if not self.__data.styleSheet.borderPath.isEmpty():
painter.setClipPath(self.__data.styleSheet.borderPath,
Qt.IntersectClip)
else:
if self.__data.borderRadius > 0.:
painter.setClipPath(self.borderPath(self.frameRect()),
Qt.IntersectClip)
else:
# print('**DEBUG: QwtPlotCanvas.drawCanvas')
painter.setClipRect(self.contentsRect(), Qt.IntersectClip)
self.plot().drawCanvas(painter)
painter.restore()
if withBackground and hackStyledBackground:
opt = QStyleOptionFrame()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Frame, opt, painter, self)
def drawBorder(self, painter):
if self.__data.borderRadius > 0:
if self.frameWidth() > 0:
QwtPainter.drawRoundedFrame(painter, QRectF(self.frameRect()),
self.__data.borderRadius, self.__data.borderRadius,
self.palette(), self.frameWidth(), self.frameStyle())
else:
if QT_VERSION >= 0x040500:
if PYQT5:
from qwt.qt.QtGui import QStyleOptionFrame
else:
from qwt.qt.QtGui import QStyleOptionFrameV3 as\
QStyleOptionFrame
opt = QStyleOptionFrame()
opt.initFrom(self)
frameShape = self.frameStyle() & QFrame.Shape_Mask
frameShadow = self.frameStyle() & QFrame.Shadow_Mask
opt.frameShape = QFrame.Shape(int(opt.frameShape)|frameShape)
if frameShape in (QFrame.Box, QFrame.HLine, QFrame.VLine,
QFrame.StyledPanel, QFrame.Panel):
opt.lineWidth = self.lineWidth()
opt.midLineWidth = self.midLineWidth()
else:
opt.lineWidth = self.frameWidth()
if frameShadow == self.Sunken:
opt.state |= QStyle.State_Sunken
elif frameShadow == self.Raised:
opt.state |= QStyle.State_Raised
self.style().drawControl(QStyle.CE_ShapedFrame, opt, painter, self)
else:
self.drawFrame(painter)
def resizeEvent(self, event):
QFrame.resizeEvent(self, event)
self.updateStyleSheetInfo()
def drawFocusIndicator(self, painter):
margin = 1
focusRect = self.contentsRect()
focusRect.setRect(focusRect.x()+margin, focusRect.y()+margin,
focusRect.width()-2*margin, focusRect.height()-2*margin)
QwtPainter.drawFocusRect(painter, self, focusRect)
def replot(self):
self.invalidateBackingStore()
if self.testPaintAttribute(self.ImmediatePaint):
self.repaint(self.contentsRect())
else:
self.update(self.contentsRect())
def invalidatePaintCache(self):
import warnings
warnings.warn("`invalidatePaintCache` has been removed: "\
"please use `replot` instead", RuntimeWarning)
self.replot()
def updateStyleSheetInfo(self):
if not self.testAttribute(Qt.WA_StyledBackground):
return
recorder = QwtStyleSheetRecorder(self.size())
painter = QPainter(recorder)
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
painter.end()
self.__data.styleSheet.hasBorder = not recorder.border.rectList.isEmpty()
self.__data.styleSheet.cornerRects = recorder.clipRects
if recorder.background.path.isEmpty():
if not recorder.border.rectList.isEmpty():
self.__data.styleSheet.borderPath =\
qwtCombinePathList(self.rect(), recorder.border.pathlist)
else:
self.__data.styleSheet.borderPath = recorder.background.path
self.__data.styleSheet.background.brush = recorder.background.brush
self.__data.styleSheet.background.origin = recorder.background.origin
def borderPath(self, rect):
if self.testAttribute(Qt.WA_StyledBackground):
recorder = QwtStyleSheetRecorder(rect.size())
painter = QPainter(recorder)
opt = QStyleOption()
opt.initFrom(self)
opt.rect = rect
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
painter.end()
if not recorder.background.path.isEmpty():
return recorder.background.path
if not recorder.border.rectList.isEmpty():
return qwtCombinePathList(rect, recorder.border.pathlist)
elif self.__data.borderRadius > 0.:
fw2 = self.frameWidth()*.5
r = QRectF(rect).adjusted(fw2, fw2, -fw2, -fw2)
path = QPainterPath()
path.addRoundedRect(r, self.__data.borderRadius,
self.__data.borderRadius)
return path
return QPainterPath()
| lgpl-2.1 |
irwinlove/django | tests/template_tests/syntax_tests/test_firstof.py | 199 | 3728 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from ..utils import setup
class FirstOfTagTests(SimpleTestCase):
libraries = {'future': 'django.templatetags.future'}
@setup({'firstof01': '{% firstof a b c %}'})
def test_firstof01(self):
output = self.engine.render_to_string('firstof01', {'a': 0, 'c': 0, 'b': 0})
self.assertEqual(output, '')
@setup({'firstof02': '{% firstof a b c %}'})
def test_firstof02(self):
output = self.engine.render_to_string('firstof02', {'a': 1, 'c': 0, 'b': 0})
self.assertEqual(output, '1')
@setup({'firstof03': '{% firstof a b c %}'})
def test_firstof03(self):
output = self.engine.render_to_string('firstof03', {'a': 0, 'c': 0, 'b': 2})
self.assertEqual(output, '2')
@setup({'firstof04': '{% firstof a b c %}'})
def test_firstof04(self):
output = self.engine.render_to_string('firstof04', {'a': 0, 'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof05': '{% firstof a b c %}'})
def test_firstof05(self):
output = self.engine.render_to_string('firstof05', {'a': 1, 'c': 3, 'b': 2})
self.assertEqual(output, '1')
@setup({'firstof06': '{% firstof a b c %}'})
def test_firstof06(self):
output = self.engine.render_to_string('firstof06', {'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof07': '{% firstof a b "c" %}'})
def test_firstof07(self):
output = self.engine.render_to_string('firstof07', {'a': 0})
self.assertEqual(output, 'c')
@setup({'firstof08': '{% firstof a b "c and d" %}'})
def test_firstof08(self):
output = self.engine.render_to_string('firstof08', {'a': 0, 'b': 0})
self.assertEqual(output, 'c and d')
@setup({'firstof09': '{% firstof %}'})
def test_firstof09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('firstof09')
@setup({'firstof10': '{% firstof a %}'})
def test_firstof10(self):
output = self.engine.render_to_string('firstof10', {'a': '<'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof11': '{% load firstof from future %}{% firstof a b %}'})
def test_firstof11(self):
output = self.engine.render_to_string('firstof11', {'a': '<', 'b': '>'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof12': '{% load firstof from future %}{% firstof a b %}'})
def test_firstof12(self):
output = self.engine.render_to_string('firstof12', {'a': '', 'b': '>'})
self.assertEqual(output, '>')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof13': '{% load firstof from future %}'
'{% autoescape off %}{% firstof a %}{% endautoescape %}'})
def test_firstof13(self):
output = self.engine.render_to_string('firstof13', {'a': '<'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof14': '{% load firstof from future %}{% firstof a|safe b %}'})
def test_firstof14(self):
output = self.engine.render_to_string('firstof14', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof15': '{% firstof a b c as myvar %}'})
def test_firstof15(self):
ctx = {'a': 0, 'b': 2, 'c': 3}
output = self.engine.render_to_string('firstof15', ctx)
self.assertEqual(ctx['myvar'], '2')
self.assertEqual(output, '')
| bsd-3-clause |
aperigault/ansible | lib/ansible/modules/cloud/azure/azure_rm_image.py | 26 | 14444 | #!/usr/bin/python
#
# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_image
version_added: "2.5"
short_description: Manage Azure image
description:
- Create, delete an image from virtual machine, blob uri, managed disk or snapshot.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- Name of the image.
required: true
source:
description:
- OS disk source from the same region.
- It can be a virtual machine, OS disk blob URI, managed OS disk, or OS snapshot.
- Each type of source except for blob URI can be given as resource id, name or a dict contains C(resource_group), C(name) and C(type).
- If source type is blob URI, the source should be the full URI of the blob in string type.
- If you specify the I(type) in a dict, acceptable value contains C(disks), C(virtual_machines) and C(snapshots).
type: raw
required: true
data_disk_sources:
description:
- List of data disk sources, including unmanaged blob URI, managed disk id or name, or snapshot id or name.
type: list
location:
description:
- Location of the image. Derived from I(resource_group) if not specified.
os_type:
description: The OS type of image.
choices:
- Windows
- Linux
state:
description:
- Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create an image from a virtual machine
azure_rm_image:
resource_group: myResourceGroup
name: myImage
source: myVirtualMachine
- name: Create an image from os disk
azure_rm_image:
resource_group: myResourceGroup
name: myImage
source: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/disks/disk001
data_disk_sources:
- datadisk001
- datadisk002
os_type: Linux
- name: Create an image from os disk via dict
azure_rm_image:
resource_group: myResourceGroup
name: myImage
source:
type: disks
resource_group: myResourceGroup
name: disk001
data_disk_sources:
- datadisk001
- datadisk002
os_type: Linux
- name: Delete an image
azure_rm_image:
state: absent
resource_group: myResourceGroup
name: myImage
source: testvm001
'''
RETURN = '''
id:
description:
- Image resource path.
type: str
returned: success
example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/images/myImage"
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMImage(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
source=dict(type='raw'),
data_disk_sources=dict(type='list', default=[]),
os_type=dict(type='str', choices=['Windows', 'Linux'])
)
self.results = dict(
changed=False,
id=None
)
required_if = [
('state', 'present', ['source'])
]
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.source = None
self.data_disk_sources = None
self.os_type = None
super(AzureRMImage, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
results = None
changed = False
image = None
if not self.location:
# Set default location
resource_group = self.get_resource_group(self.resource_group)
self.location = resource_group.location
self.log('Fetching image {0}'.format(self.name))
image = self.get_image()
if image:
self.check_provisioning_state(image, self.state)
results = image.id
# update is not supported except for tags
update_tags, tags = self.update_tags(image.tags)
if update_tags:
changed = True
self.tags = tags
if self.state == 'absent':
changed = True
# the image does not exist and create a new one
elif self.state == 'present':
changed = True
self.results['changed'] = changed
self.results['id'] = results
if changed:
if self.state == 'present':
image_instance = None
# create from virtual machine
vm = self.get_source_vm()
if vm:
if self.data_disk_sources:
self.fail('data_disk_sources is not allowed when capturing image from vm')
image_instance = self.compute_models.Image(location=self.location,
source_virtual_machine=self.compute_models.SubResource(id=vm.id),
tags=self.tags)
else:
if not self.os_type:
self.fail('os_type is required to create the image')
os_disk = self.create_os_disk()
data_disks = self.create_data_disks()
storage_profile = self.compute_models.ImageStorageProfile(os_disk=os_disk, data_disks=data_disks)
image_instance = self.compute_models.Image(location=self.location, storage_profile=storage_profile, tags=self.tags)
# finally make the change if not check mode
if not self.check_mode and image_instance:
new_image = self.create_image(image_instance)
self.results['id'] = new_image.id
elif self.state == 'absent':
if not self.check_mode:
# delete image
self.delete_image()
# the delete does not actually return anything. if no exception, then we'll assume it worked.
self.results['id'] = None
return self.results
def resolve_storage_source(self, source):
blob_uri = None
disk = None
snapshot = None
# blob URI can only be given by str
if isinstance(source, str) and source.lower().endswith('.vhd'):
blob_uri = source
return (blob_uri, disk, snapshot)
tokenize = dict()
if isinstance(source, dict):
tokenize = source
elif isinstance(source, str):
tokenize = parse_resource_id(source)
else:
self.fail("source parameter should be in type string or dictionary")
if tokenize.get('type') == 'disks':
disk = format_resource_id(tokenize['name'],
tokenize.get('subscription_id') or self.subscription_id,
'Microsoft.Compute',
'disks',
tokenize.get('resource_group') or self.resource_group)
return (blob_uri, disk, snapshot)
if tokenize.get('type') == 'snapshots':
snapshot = format_resource_id(tokenize['name'],
tokenize.get('subscription_id') or self.subscription_id,
'Microsoft.Compute',
'snapshots',
tokenize.get('resource_group') or self.resource_group)
return (blob_uri, disk, snapshot)
# not a disk or snapshots
if 'type' in tokenize:
return (blob_uri, disk, snapshot)
# source can be name of snapshot or disk
snapshot_instance = self.get_snapshot(tokenize.get('resource_group') or self.resource_group,
tokenize['name'])
if snapshot_instance:
snapshot = snapshot_instance.id
return (blob_uri, disk, snapshot)
disk_instance = self.get_disk(tokenize.get('resource_group') or self.resource_group,
tokenize['name'])
if disk_instance:
disk = disk_instance.id
return (blob_uri, disk, snapshot)
def create_os_disk(self):
blob_uri, disk, snapshot = self.resolve_storage_source(self.source)
snapshot_resource = self.compute_models.SubResource(id=snapshot) if snapshot else None
managed_disk = self.compute_models.SubResource(id=disk) if disk else None
return self.compute_models.ImageOSDisk(os_type=self.os_type,
os_state=self.compute_models.OperatingSystemStateTypes.generalized,
snapshot=snapshot_resource,
managed_disk=managed_disk,
blob_uri=blob_uri)
def create_data_disk(self, lun, source):
blob_uri, disk, snapshot = self.resolve_storage_source(source)
if blob_uri or disk or snapshot:
snapshot_resource = self.compute_models.SubResource(id=snapshot) if snapshot else None
managed_disk = self.compute_models.SubResource(id=disk) if disk else None
return self.compute_models.ImageDataDisk(lun=lun,
blob_uri=blob_uri,
snapshot=snapshot_resource,
managed_disk=managed_disk)
def create_data_disks(self):
return list(filter(None, [self.create_data_disk(lun, source) for lun, source in enumerate(self.data_disk_sources)]))
def get_source_vm(self):
# self.resource can be a vm (id/name/dict), or not a vm. return the vm iff it is an existing vm.
resource = dict()
if isinstance(self.source, dict):
if self.source.get('type') != 'virtual_machines':
return None
resource = dict(type='virtualMachines',
name=self.source['name'],
resource_group=self.source.get('resource_group') or self.resource_group)
elif isinstance(self.source, str):
vm_resource_id = format_resource_id(self.source,
self.subscription_id,
'Microsoft.Compute',
'virtualMachines',
self.resource_group)
resource = parse_resource_id(vm_resource_id)
else:
self.fail("Unsupported type of source parameter, please give string or dictionary")
return self.get_vm(resource['resource_group'], resource['name']) if resource['type'] == 'virtualMachines' else None
def get_snapshot(self, resource_group, snapshot_name):
return self._get_resource(self.compute_client.snapshots.get, resource_group, snapshot_name)
def get_disk(self, resource_group, disk_name):
return self._get_resource(self.compute_client.disks.get, resource_group, disk_name)
def get_vm(self, resource_group, vm_name):
return self._get_resource(self.compute_client.virtual_machines.get, resource_group, vm_name, 'instanceview')
def get_image(self):
return self._get_resource(self.compute_client.images.get, self.resource_group, self.name)
def _get_resource(self, get_method, resource_group, name, expand=None):
try:
if expand:
return get_method(resource_group, name, expand=expand)
else:
return get_method(resource_group, name)
except CloudError as cloud_err:
# Return None iff the resource is not found
if cloud_err.status_code == 404:
self.log('{0}'.format(str(cloud_err)))
return None
self.fail('Error: failed to get resource {0} - {1}'.format(name, str(cloud_err)))
except Exception as exc:
self.fail('Error: failed to get resource {0} - {1}'.format(name, str(exc)))
def create_image(self, image):
try:
poller = self.compute_client.images.create_or_update(self.resource_group, self.name, image)
new_image = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating image {0} - {1}".format(self.name, str(exc)))
self.check_provisioning_state(new_image)
return new_image
def delete_image(self):
self.log('Deleting image {0}'.format(self.name))
try:
poller = self.compute_client.images.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting image {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMImage()
if __name__ == '__main__':
main()
| gpl-3.0 |
ppwwyyxx/tensorflow | tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py | 5 | 127245 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for boosted_trees training kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
_INEQUALITY_DEFAULT_LEFT = 'INEQUALITY_DEFAULT_LEFT'.encode('utf-8')
_INEQUALITY_DEFAULT_RIGHT = 'INEQUALITY_DEFAULT_RIGHT'.encode('utf-8')
_EQUALITY_DEFAULT_RIGHT = 'EQUALITY_DEFAULT_RIGHT'.encode('utf-8')
class UpdateTreeEnsembleOpTest(test_util.TensorFlowTestCase):
"""Tests for growing tree ensemble from split candidates."""
@test_util.run_deprecated_v1
def testGrowWithEmptyEnsemble(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_ids = [0, 2, 6]
# Prepare feature inputs.
# Note that features 1 & 3 have the same gain but different splits.
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24]], dtype=np.float32)
# Feature split with the highest gain.
feature3_nodes = np.array([0], dtype=np.int32)
feature3_gains = np.array([7.65], dtype=np.float32)
feature3_thresholds = np.array([7], dtype=np.int32)
feature3_left_node_contribs = np.array([[-4.89]], dtype=np.float32)
feature3_right_node_contribs = np.array([[5.3]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# Tree will be finalized now, since we will reach depth 1.
max_depth=1,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
# Note that since the tree is finalized, we added a new dummy tree.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 6
threshold: 7
left_id: 1
right_id: 2
}
metadata {
gain: 7.65
}
}
nodes {
leaf {
scalar: -0.489
}
}
nodes {
leaf {
scalar: 0.53
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowWithEmptyEnsembleV2(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_ids = [0, 6]
# Prepare feature inputs.
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature1_inequality_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Feature split with the highest gain.
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([7.65], dtype=np.float32)
feature2_dimensions = np.array([1], dtype=np.int32)
feature2_thresholds = np.array([7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-4.89]], dtype=np.float32)
feature2_right_node_contribs = np.array([[5.3]], dtype=np.float32)
feature2_inequality_split_types = np.array([_INEQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# Tree will be finalized now, since we will reach depth 1.
max_depth=1,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions, feature2_dimensions],
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
],
split_types=[
feature1_inequality_split_types, feature2_inequality_split_types
])
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
# Note that since the tree is finalized, we added a new dummy tree.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 6
threshold: 7
dimension_id: 1
left_id: 1
right_id: 2
default_direction: DEFAULT_RIGHT
}
metadata {
gain: 7.65
}
}
nodes {
leaf {
scalar: -0.489
}
}
nodes {
leaf {
scalar: 0.53
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowWithEmptyEnsembleV2EqualitySplit(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_ids = [0, 6]
# Prepare feature inputs.
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature1_inequality_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Feature split with the highest gain.
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([7.65], dtype=np.float32)
feature2_dimensions = np.array([1], dtype=np.int32)
feature2_thresholds = np.array([7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-4.89]], dtype=np.float32)
feature2_right_node_contribs = np.array([[5.3]], dtype=np.float32)
feature2_inequality_split_types = np.array([_EQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# Tree will be finalized now, since we will reach depth 1.
max_depth=1,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions, feature2_dimensions],
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
],
split_types=[
feature1_inequality_split_types, feature2_inequality_split_types
],
)
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
# Note that since the tree is finalized, we added a new dummy tree.
expected_result = """
trees {
nodes {
categorical_split {
feature_id: 6
value: 7
dimension_id: 1
left_id: 1
right_id: 2
}
metadata {
gain: 7.65
}
}
nodes {
leaf {
scalar: -0.489
}
}
nodes {
leaf {
scalar: 0.53
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowWithEmptyEnsembleV2MultiClass(self):
"""Test growing an empty ensemble for multi-class case."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
logits_dimension = 2
feature_ids = [0, 6]
# Prepare feature inputs.
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375, 5.11]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143, 2.98]], dtype=np.float32)
feature1_inequality_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Feature split with the highest gain.
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([7.65], dtype=np.float32)
feature2_dimensions = np.array([1], dtype=np.int32)
feature2_thresholds = np.array([7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-4.89]], dtype=np.float32)
feature2_right_node_contribs = np.array([[5.3]], dtype=np.float32)
feature2_left_node_contribs = np.array([[-4.89, 6.31]], dtype=np.float32)
feature2_right_node_contribs = np.array([[5.3, -1.21]], dtype=np.float32)
feature2_inequality_split_types = np.array([_INEQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# Tree will be finalized now, since we will reach depth 1.
max_depth=1,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions, feature2_dimensions],
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
],
split_types=[
feature1_inequality_split_types, feature2_inequality_split_types
],
logits_dimension=logits_dimension)
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
# Note that since the tree is finalized, we added a new dummy tree.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 6
threshold: 7
dimension_id: 1
left_id: 1
right_id: 2
dimension_id: 1
default_direction: DEFAULT_RIGHT
}
metadata {
gain: 7.65
original_leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
nodes {
leaf {
vector {
value: -0.489
}
vector {
value: 0.631
}
}
}
nodes {
leaf {
vector {
value: 0.53
}
vector {
value: -0.121
}
}
}
}
trees {
nodes {
leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testBiasCenteringOnEmptyEnsemble(self):
"""Test growing with bias centering on an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
gradients = np.array([[5.]], dtype=np.float32)
hessians = np.array([[24.]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.center_bias(
tree_ensemble_handle,
mean_gradients=gradients,
mean_hessians=hessians,
l1=0.0,
l2=1.0
)
session.run(grow_op)
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
scalar: -0.2
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 0
is_finalized: false
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([0.63, 2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([1.7], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should be finalized as max tree depth is 2 and we have
# grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
threshold: 21
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 0.714
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -0.4375
}
}
}
nodes {
leaf {
scalar: 0.114
}
}
nodes {
leaf {
scalar: 0.879
}
}
nodes {
leaf {
scalar: -0.5875
}
}
nodes {
leaf {
scalar: -0.2075
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeV2NotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([1.4], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([0.63, 2.7], dtype=np.float32)
feature2_dimensions = np.array([1, 3], dtype=np.int32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature2_split_types = np.array(
[_INEQUALITY_DEFAULT_RIGHT, _INEQUALITY_DEFAULT_RIGHT])
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([1.7], dtype=np.float32)
feature3_dimensions = np.array([0], dtype=np.int32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
feature3_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[
feature1_dimensions, feature2_dimensions, feature3_dimensions
],
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
],
split_types=[
feature1_split_types, feature2_split_types, feature3_split_types
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should be finalized as max tree depth is 2 and we have
# grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 21
dimension_id: 0
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 0.714
}
}
}
nodes {
bucketized_split {
feature_id: 1
dimension_id: 3
threshold: 7
left_id: 5
right_id: 6
default_direction: DEFAULT_RIGHT
}
metadata {
gain: 2.7
original_leaf {
scalar: -0.4375
}
}
}
nodes {
leaf {
scalar: 0.114
}
}
nodes {
leaf {
scalar: 0.879
}
}
nodes {
leaf {
scalar: -0.5875
}
}
nodes {
leaf {
scalar: -0.2075
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeV2NotFinalizedEqualitySplit(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([1.4], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([0.63, 2.7], dtype=np.float32)
feature2_dimensions = np.array([1, 3], dtype=np.int32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature2_split_types = np.array(
[_EQUALITY_DEFAULT_RIGHT, _EQUALITY_DEFAULT_RIGHT])
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([1.7], dtype=np.float32)
feature3_dimensions = np.array([0], dtype=np.int32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
feature3_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[
feature1_dimensions, feature2_dimensions, feature3_dimensions
],
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
],
split_types=[
feature1_split_types, feature2_split_types, feature3_split_types
],
)
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should be finalized as max tree depth is 2 and we have
# grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 21
dimension_id: 0
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 0.714
}
}
}
nodes {
categorical_split {
feature_id: 1
dimension_id: 3
value: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -0.4375
}
}
}
nodes {
leaf {
scalar: 0.114
}
}
nodes {
leaf {
scalar: 0.879
}
}
nodes {
leaf {
scalar: -0.5875
}
}
nodes {
leaf {
scalar: -0.2075
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeV2NotFinalizedMultiClass(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector {
value: 0.714
}
vector {
value: 0.1
}
}
}
nodes {
leaf {
vector {
value: -0.4375
}
vector {
value: 1.2
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
logits_dimension = 2
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([1.4], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0, .95]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65, 0.1]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([0.63, 2.7], dtype=np.float32)
feature2_dimensions = np.array([1, 3], dtype=np.int32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6, 2.1], [-1.5, 2.1]],
dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24, -1.1], [2.3, 0.5]],
dtype=np.float32)
feature2_split_types = np.array(
[_INEQUALITY_DEFAULT_RIGHT, _INEQUALITY_DEFAULT_RIGHT])
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([1.7], dtype=np.float32)
feature3_dimensions = np.array([0], dtype=np.int32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75, 3.2]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93, -1.05]], dtype=np.float32)
feature3_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[
feature1_dimensions, feature2_dimensions, feature3_dimensions
],
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
],
split_types=[
feature1_split_types, feature2_split_types, feature3_split_types
],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should be finalized as max tree depth is 2 and we have
# grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 21
dimension_id: 0
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
vector {
value: 0.714
}
vector {
value: 0.1
}
}
}
}
nodes {
bucketized_split {
feature_id: 1
dimension_id: 3
threshold: 7
left_id: 5
right_id: 6
dimension_id: 3
default_direction: DEFAULT_RIGHT
}
metadata {
gain: 2.7
original_leaf {
vector {
value: -0.4375
}
vector {
value: 1.2
}
}
}
}
nodes {
leaf {
vector {
value: 0.114
}
vector {
value: 0.195
}
}
}
nodes {
leaf {
vector {
value: 0.879
}
vector {
value: 0.11
}
}
}
nodes {
leaf {
vector {
value: -0.5875
}
vector {
value: 1.41
}
}
}
nodes {
leaf {
vector {
value: -0.2075
}
vector {
value: 1.25
}
}
}
}
trees {
nodes {
leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeFinalized(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [75]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
learning_rate=0.1,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# Expect a new tree added, with a split on feature 75
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 75
threshold: 21
left_id: 1
right_id: 2
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeV2Finalized(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [75]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_dimensions = np.array([1], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
learning_rate=0.1,
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions],
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs],
split_types=[feature1_split_types])
session.run(grow_op)
# Expect a new tree added, with a split on feature 75
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 75
dimension_id: 1
threshold: 21
left_id: 1
right_id: 2
default_direction: DEFAULT_RIGHT
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeV2FinalizedEqualitySplit(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
leaf {
scalar: 0.0
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [75]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_dimensions = np.array([1], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature1_split_types = np.array([_EQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
learning_rate=0.1,
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions],
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs],
split_types=[feature1_split_types])
session.run(grow_op)
# Expect a new tree added, with a split on feature 75
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
trees {
nodes {
categorical_split {
feature_id: 75
dimension_id: 1
value: 21
left_id: 1
right_id: 2
}
metadata {
gain: -1.4
}
}
nodes {
leaf {
scalar: -0.6
}
}
nodes {
leaf {
scalar: 0.165
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testGrowExistingEnsembleTreeV2FinalizedMultiClass(self):
"""Test growing an existing ensemble with the last tree finalized."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector {
value: 0.714
}
vector {
value: 0.1
}
}
}
nodes {
leaf {
vector {
value: -0.4375
}
vector {
value: 1.2
}
}
}
}
trees {
nodes {
leaf {
vector {
value: 0.0
}
vector {
value: 0.0
}
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
logits_dimension = 2
# Prepare feature inputs.
feature_ids = [75]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_dimensions = np.array([1], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0, 1.1]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65, 0.8]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
learning_rate=0.1,
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions],
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs],
split_types=[feature1_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect a new tree added, with a split on feature 75
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
dimension_id: 0
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector {
value: 0.714
}
vector {
value: 0.1
}
}
}
nodes {
leaf {
vector {
value: -0.4375
}
vector {
value: 1.2
}
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 75
dimension_id: 1
threshold: 21
left_id: 1
right_id: 2
dimension_id: 1
default_direction: DEFAULT_RIGHT
}
metadata {
gain: -1.4
original_leaf {
vector {
value: 0.0
}
vector {
value: 0.0
}
}
}
}
nodes {
leaf {
vector {
value: -.6
}
vector {
value: 0.11
}
}
}
nodes {
leaf {
vector {
value: 0.165
}
vector {
value: 0.08
}
}
}
}
tree_weights: 0.15
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testPrePruning(self):
"""Test growing an existing ensemble with pre-pruning."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 0.1
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# For node 1, the best split is on feature 2 (gain -0.63), but the gain
# is negative so node 1 will not be split.
# For node 2, the best split is on feature 3, gain is positive.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, 2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([2.8], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 3
left_id: 3
right_id: 4
}
metadata {
gain: 2.8
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: -4.45
}
}
nodes {
leaf {
scalar: -4.182
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: false
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 5
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testPrePruningMultiClass(self):
"""Test growing an existing ensemble with pre-pruning."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector {
value: 7.14
}
vector {
value: 1.0
}
}
}
nodes {
leaf {
vector {
value: -4.375
}
vector {
value: 1.2
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
logits_dimension = 2
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
feature_ids = [0, 1, 0]
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0, .95]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65, 0.1]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, 2.7], dtype=np.float32)
feature2_dimensions = np.array([1, 3], dtype=np.int32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6, 2.1], [-1.5, 2.1]],
dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24, -1.1], [2.3, 0.5]],
dtype=np.float32)
feature2_split_types = np.array(
[_INEQUALITY_DEFAULT_RIGHT, _INEQUALITY_DEFAULT_RIGHT])
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([2.8], dtype=np.float32)
feature3_dimensions = np.array([0], dtype=np.int32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75, 3.2]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93, -1.05]], dtype=np.float32)
feature3_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
# tree is going to be finalized now, since we reach depth 2.
max_depth=3,
feature_ids=feature_ids,
dimension_ids=[
feature1_dimensions, feature2_dimensions, feature3_dimensions
],
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
],
split_types=[
feature1_split_types, feature2_split_types, feature3_split_types
],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect the split for node 1 to be chosen from feature 1 and
# the split for node 2 to be chosen from feature 2.
# The grown tree should not be finalized as max tree depth is 3 and
# it's only grown 2 layers.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
vector {
value: 7.14
}
vector {
value: 1.0
}
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 3
left_id: 3
right_id: 4
}
metadata {
gain: 2.8
original_leaf {
vector {
value: -4.375
}
vector {
value: 1.2
}
}
}
}
nodes {
leaf {
vector {
value: -4.45
}
vector {
value: 1.52
}
}
}
nodes {
leaf {
vector {
value: -4.182
}
vector {
value: 1.095
}
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: false
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 5
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testMetadataWhenCantSplitDueToEmptySplits(self):
"""Test that the metadata is updated even though we can't split."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
# feature 1 only has a candidate for node 1, feature 2 has candidates
# for both nodes and feature 3 only has a candidate for node 2.
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING,
max_depth=2,
# No splits are available.
feature_ids=[],
node_ids=[],
gains=[],
thresholds=[],
left_node_contribs=[],
right_node_contribs=[])
session.run(grow_op)
# Expect no new splits created, but attempted (global) stats updated. Meta
# data for this tree should not be updated (we didn't succeed building a
# layer. Node ranges don't change.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 0.714
}
}
nodes {
leaf {
scalar: -0.4375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testMetadataWhenCantSplitDuePrePruning(self):
"""Test metadata is updated correctly when no split due to prepruning."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare feature inputs.
feature_ids = [0, 1, 0]
# All the gains are negative.
feature1_nodes = np.array([1], dtype=np.int32)
feature1_gains = np.array([-1.4], dtype=np.float32)
feature1_thresholds = np.array([21], dtype=np.int32)
feature1_left_node_contribs = np.array([[-6.0]], dtype=np.float32)
feature1_right_node_contribs = np.array([[1.65]], dtype=np.float32)
feature2_nodes = np.array([1, 2], dtype=np.int32)
feature2_gains = np.array([-0.63, -2.7], dtype=np.float32)
feature2_thresholds = np.array([23, 7], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6], [-1.5]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24], [2.3]], dtype=np.float32)
feature3_nodes = np.array([2], dtype=np.int32)
feature3_gains = np.array([-2.8], dtype=np.float32)
feature3_thresholds = np.array([3], dtype=np.int32)
feature3_left_node_contribs = np.array([[-0.75]], dtype=np.float32)
feature3_right_node_contribs = np.array([[1.93]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=0.1,
pruning_mode=boosted_trees_ops.PruningMode.PRE_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes, feature3_nodes],
gains=[feature1_gains, feature2_gains, feature3_gains],
thresholds=[
feature1_thresholds, feature2_thresholds, feature3_thresholds
],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs,
feature3_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs,
feature3_right_node_contribs
])
session.run(grow_op)
# Expect that no new split was created because all the gains were negative
# Global metadata should be updated, tree metadata should not be updated.
new_stamp, serialized = session.run(tree_ensemble.serialize())
tree_ensemble = boosted_trees_pb2.TreeEnsemble()
tree_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 4
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 7.14
}
}
nodes {
leaf {
scalar: -4.375
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, tree_ensemble)
@test_util.run_deprecated_v1
def testPostPruningOfSomeNodes(self):
"""Test growing an ensemble with post-pruning."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
# Second feature has larger (but still negative gain).
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.2], dtype=np.float32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from second features to be chosen despite the negative
# gain.
# No pruning happened just yet.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
leaf {
scalar: 0.0143
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare the second layer.
# Note that node 1 gain is negative and node 2 gain is positive.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, 0.5], dtype=np.float32)
feature1_thresholds = np.array([7, 5], dtype=np.int32)
feature1_left_node_contribs = np.array(
[[0.07], [0.041]], dtype=np.float32)
feature1_right_node_contribs = np.array(
[[0.083], [0.064]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# After adding this layer, the tree will not be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: -0.2
original_leaf {
scalar: 0.01
}
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 5
right_id: 6
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.08
}
}
nodes {
leaf {
scalar: 0.093
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 2
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 7
}
"""
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(expected_result, res_ensemble)
# Now split the leaf 3, again with negative gain. After this layer, the
# tree will be finalized, and post-pruning happens. The leafs 3,4,7,8 will
# be pruned out.
# Prepare the third layer.
feature_ids = [92]
feature1_nodes = np.array([3], dtype=np.int32)
feature1_gains = np.array([-0.45], dtype=np.float32)
feature1_thresholds = np.array([11], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.15]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.5]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# After adding this layer, the tree will be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Node that nodes 3, 4, 7 and 8 got deleted, so metadata stores has ids
# mapped to their parent node 1, with the respective change in logits.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 3)
self.assertProtoEquals(expected_result, res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningOfSomeNodesMultiClass(self):
"""Test growing an ensemble with post-pruning."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
logits_dimension = 2
# Second feature has larger (but still negative gain).
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013, 0.14]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143, -0.2]],
dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.2], dtype=np.float32)
feature2_dimensions = np.array([3], dtype=np.int32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01, -0.3]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143, 0.121]],
dtype=np.float32)
feature2_split_types = np.array([_INEQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions, feature2_dimensions],
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
],
split_types=[feature1_split_types, feature2_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect the split from second features to be chosen despite the negative
# gain.
# No pruning happened just yet.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
dimension_id: 3
default_direction: DEFAULT_RIGHT
}
metadata {
gain: -0.2
original_leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
nodes {
leaf {
vector {
value: 0.01
}
vector {
value: -0.3
}
}
}
nodes {
leaf {
vector {
value: 0.0143
}
vector {
value: 0.121
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare the second layer.
# Note that node 1 gain is negative and node 2 gain is positive.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, 0.5], dtype=np.float32)
feature1_dimensions = np.array([0, 2], dtype=np.int32)
feature1_thresholds = np.array([7, 5], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.07, 0.5], [0.041, 0.279]],
dtype=np.float32)
feature1_right_node_contribs = np.array([[0.083, 0.31], [0.064, -0.931]],
dtype=np.float32)
feature1_split_types = np.array(
[_INEQUALITY_DEFAULT_LEFT, _INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions],
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs],
split_types=[feature1_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# After adding this layer, the tree will not be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
dimension_id: 3
default_direction: DEFAULT_RIGHT
}
metadata {
gain: -0.2
original_leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 7
left_id: 3
right_id: 4
dimension_id: 0
}
metadata {
gain: -0.2
original_leaf {
vector {
value: 0.01
}
vector {
value: -0.3
}
}
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 5
right_id: 6
dimension_id: 2
}
metadata {
gain: 0.5
original_leaf {
vector {
value: 0.0143
}
vector {
value: 0.121
}
}
}
}
nodes {
leaf {
vector {
value: 0.08
}
vector {
value: 0.2
}
}
}
nodes {
leaf {
vector {
value: 0.093
}
vector {
value: 0.01
}
}
}
nodes {
leaf {
vector {
value: 0.0553
}
vector {
value: 0.4
}
}
}
nodes {
leaf {
vector {
value: 0.0783
}
vector {
value: -0.81
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 2
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 3
last_layer_node_end: 7
}
"""
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(expected_result, res_ensemble)
# Now split node 3, again with negative gain. After this layer, the
# tree will be finalized, and post-pruning happens. The leafs at nodes 3,
# 4,7,8 will be pruned out.
# Prepare the third layer.
feature_ids = [92]
feature1_nodes = np.array([3], dtype=np.int32)
feature1_gains = np.array([-0.45], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([11], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.15, -0.32]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.5, 0.81]], dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=3,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions],
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs],
split_types=[feature1_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# After adding this layer, the tree will be finalized
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Node that nodes 3, 4, 7 and 8 got deleted, so metadata stores has ids
# mapped to their parent node 1, with the respective change in logits.
expected_result = """
trees {
nodes {
bucketized_split {
feature_id:1
threshold: 33
left_id: 1
right_id: 2
dimension_id: 3
default_direction: DEFAULT_RIGHT
}
metadata {
gain: -0.2
original_leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
nodes {
leaf {
vector {
value: 0.01
}
vector {
value: -0.3
}
}
}
nodes {
bucketized_split {
feature_id: 3
threshold: 5
left_id: 3
right_id: 4
dimension_id: 2
}
metadata {
gain: 0.5
original_leaf {
vector {
value: 0.0143
}
vector {
value: 0.121
}
}
}
}
nodes {
leaf {
vector {
value: 0.0553
}
vector {
value: 0.4
}
}
}
nodes {
leaf {
vector {
value: 0.0783
}
vector {
value: -0.81
}
}
}
}
trees {
nodes {
leaf {
vector {
value: 0
value: 0
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
logit_change: -0.5
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
logit_change: -0.31
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
logit_change: -0.18
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
logit_change: -1.31
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 3)
self.assertProtoEquals(expected_result, res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningOfAllNodes(self):
"""Test growing an ensemble with post-pruning, with all nodes are pruned."""
with self.cached_session() as session:
# Create empty ensemble.
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs. All have negative gains.
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.62], dtype=np.float32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from feature 2 to be chosen despite the negative gain.
# The grown tree should not be finalized as max tree depth is 2 so no
# pruning occurs.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.62
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
leaf {
scalar: 0.0143
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare inputs.
# All have negative gain.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, -0.5], dtype=np.float32)
feature1_thresholds = np.array([77, 79], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.023], [0.3]], dtype=np.float32)
feature1_right_node_contribs = np.array(
[[0.012343], [24]], dtype=np.float32)
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs])
session.run(grow_op)
# Expect the split from feature 1 to be chosen despite the negative gain.
# The grown tree should be finalized. Since all nodes have negative gain,
# the whole tree is pruned.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Expect the ensemble to be empty as post-pruning will prune
# the entire finalized tree.
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(
"""
trees {
nodes {
leaf {
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata{
num_layers_grown: 2
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.01
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.0143
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.033
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.022343
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.3143
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -24.014299
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
""", res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningOfAllNodesMultiClass(self):
"""Test growing an ensemble with post-pruning, with all nodes are pruned."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
logits_dimension = 2
# Prepare inputs. All have negative gains.
feature_ids = [0, 1]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([-1.3], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([7], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.013, 0.14]], dtype=np.float32)
feature1_right_node_contribs = np.array([[0.0143, -0.2]],
dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([-0.62], dtype=np.float32)
feature2_dimensions = np.array([3], dtype=np.int32)
feature2_thresholds = np.array([33], dtype=np.int32)
feature2_left_node_contribs = np.array([[0.01, -0.3]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.0143, 0.121]],
dtype=np.float32)
feature2_split_types = np.array([_INEQUALITY_DEFAULT_RIGHT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions, feature2_dimensions],
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
],
split_types=[feature1_split_types, feature2_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect the split from feature 2 to be chosen despite the negative gain.
# The grown tree should not be finalized as max tree depth is 2 so no
# pruning occurs.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 33
left_id: 1
right_id: 2
dimension_id: 3
default_direction: DEFAULT_RIGHT
}
metadata {
gain: -0.62
original_leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
nodes {
leaf {
vector {
value: 0.01
}
vector {
value: -0.3
}
}
}
nodes {
leaf {
vector {
value: 0.0143
}
vector {
value: 0.121
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 1
last_layer_node_end: 3
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
# Prepare inputs.
# All have negative gain.
feature_ids = [3]
feature1_nodes = np.array([1, 2], dtype=np.int32)
feature1_gains = np.array([-0.2, -0.5], dtype=np.float32)
feature1_dimensions = np.array([0, 4], dtype=np.int32)
feature1_thresholds = np.array([77, 79], dtype=np.int32)
feature1_left_node_contribs = np.array([[0.023, -0.99], [0.3, 5.979]],
dtype=np.float32)
feature1_right_node_contribs = np.array([[0.012343, 0.63], [24, 0.289]],
dtype=np.float32)
feature1_split_types = np.array(
[_INEQUALITY_DEFAULT_LEFT, _INEQUALITY_DEFAULT_LEFT])
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=2,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions],
node_ids=[feature1_nodes],
gains=[feature1_gains],
thresholds=[feature1_thresholds],
left_node_contribs=[feature1_left_node_contribs],
right_node_contribs=[feature1_right_node_contribs],
split_types=[feature1_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect the split from feature 1 to be chosen despite the negative gain.
# The grown tree should be finalized. Since all nodes have negative gain,
# the whole tree is pruned.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
# Expect the ensemble to be empty as post-pruning will prune
# the entire finalized tree.
self.assertEqual(new_stamp, 2)
self.assertProtoEquals(
"""
trees {
nodes {
leaf {
vector {
value: 0
value: 0
}
}
}
}
trees {
nodes {
leaf {
vector {
value: 0
value: 0
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata{
num_layers_grown: 2
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.01
logit_change: 0.3
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.0143
logit_change: -0.121
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.033
logit_change: 1.29
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.022343
logit_change: -0.33
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -0.3143
logit_change: -6.1
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -24.014299
logit_change: -0.41
}
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
last_layer_node_start: 0
last_layer_node_end: 1
}
""", res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningChangesNothing(self):
"""Test growing an ensemble with post-pruning with all gains >0."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
# Second feature has larger (but still negative gain).
feature_ids = [3, 4]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143]], dtype=np.float32)
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24]], dtype=np.float32)
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=1,
feature_ids=feature_ids,
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
])
session.run(grow_op)
# Expect the split from the first feature to be chosen.
# Pruning got triggered but changed nothing.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 3
threshold: 52
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: -4.375
}
}
nodes {
leaf {
scalar: 7.143
}
}
}
trees {
nodes {
leaf {
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
@test_util.run_deprecated_v1
def testPostPruningChangesNothingMultiClass(self):
"""Test growing an ensemble with post-pruning with all gains >0."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Prepare inputs.
logits_dimension = 2
# Second feature has larger (but still negative gain).
feature_ids = [3, 4]
feature1_nodes = np.array([0], dtype=np.int32)
feature1_gains = np.array([7.62], dtype=np.float32)
feature1_dimensions = np.array([0], dtype=np.int32)
feature1_thresholds = np.array([52], dtype=np.int32)
feature1_left_node_contribs = np.array([[-4.375, 2.18]], dtype=np.float32)
feature1_right_node_contribs = np.array([[7.143, -0.40]],
dtype=np.float32)
feature1_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
feature2_nodes = np.array([0], dtype=np.int32)
feature2_gains = np.array([0.63], dtype=np.float32)
feature2_dimensions = np.array([0], dtype=np.int32)
feature2_thresholds = np.array([23], dtype=np.int32)
feature2_left_node_contribs = np.array([[-0.6, 1.11]], dtype=np.float32)
feature2_right_node_contribs = np.array([[0.24, -2.01]], dtype=np.float32)
feature2_split_types = np.array([_INEQUALITY_DEFAULT_LEFT])
# Grow tree ensemble.
grow_op = boosted_trees_ops.update_ensemble_v2(
tree_ensemble_handle,
learning_rate=1.0,
pruning_mode=boosted_trees_ops.PruningMode.POST_PRUNING,
max_depth=1,
feature_ids=feature_ids,
dimension_ids=[feature1_dimensions, feature2_dimensions],
node_ids=[feature1_nodes, feature2_nodes],
gains=[feature1_gains, feature2_gains],
thresholds=[feature1_thresholds, feature2_thresholds],
left_node_contribs=[
feature1_left_node_contribs, feature2_left_node_contribs
],
right_node_contribs=[
feature1_right_node_contribs, feature2_right_node_contribs
],
split_types=[feature1_split_types, feature2_split_types],
logits_dimension=logits_dimension)
session.run(grow_op)
# Expect the split from the first feature to be chosen.
# Pruning got triggered but changed nothing.
new_stamp, serialized = session.run(tree_ensemble.serialize())
res_ensemble = boosted_trees_pb2.TreeEnsemble()
res_ensemble.ParseFromString(serialized)
expected_result = """
trees {
nodes {
bucketized_split {
feature_id: 3
threshold: 52
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
vector {
value: 0.0
value: 0.0
}
}
}
}
nodes {
leaf {
vector {
value: -4.375
}
vector {
value: 2.18
}
}
}
nodes {
leaf {
vector {
value: 7.143
}
vector {
value: -0.40
}
}
}
}
trees {
nodes {
leaf {
vector {
value: 0
value: 0
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
last_layer_node_start: 0
last_layer_node_end: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertProtoEquals(expected_result, res_ensemble)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
TobiasNils/python-seabreeze | seabreeze/pyseabreeze/interfaces/nonlinearity.py | 1 | 1329 |
from .common import SeaBreezeError, get_pyseabreeze_decorator
from .eeprom import EEPromFeature
from .communication import USBCommOBP
import struct
convert_exceptions = get_pyseabreeze_decorator('interfaces.nonlinearity')
class NonlinearityCoefficientsEEPromFeature(EEPromFeature):
def has_nonlinearity_coeffs_feature(self):
return [True]
@convert_exceptions("Error when reading nonlinearity coeffs.")
def get_nonlinearity_coeffs(self):
# The spectrometers store the wavelength calibration in slots 6..13
coeffs = []
order = int(float(self.read_eeprom_slot(14)))
for i in range(6, 6 + order + 1):
coeffs.append(float(self.read_eeprom_slot(i)))
return coeffs
class NonlinearityCoefficientsOBPFeature(USBCommOBP):
def has_nonlinearity_coeffs_feature(self):
return [True]
@convert_exceptions("Error when reading nonlinearity coeffs.")
def get_nonlinearity_coeffs(self):
# get number of nonlinearity coefficients
data = self.query(0x00181100, "")
N = struct.unpack("<B", data)[0]
# now query the coefficients
coeffs = []
for i in range(N):
data = self.query(0x00181101, struct.pack("<B", i))
coeffs.append(struct.unpack("<f", data)[0])
return coeffs
| mit |
sunze/py_flask | venv/lib/python3.4/site-packages/kombu/compression.py | 32 | 1986 | """
kombu.compression
=================
Compression utilities.
"""
from __future__ import absolute_import
from kombu.utils.encoding import ensure_bytes
import zlib
_aliases = {}
_encoders = {}
_decoders = {}
__all__ = ['register', 'encoders', 'get_encoder',
'get_decoder', 'compress', 'decompress']
def register(encoder, decoder, content_type, aliases=[]):
"""Register new compression method.
:param encoder: Function used to compress text.
:param decoder: Function used to decompress previously compressed text.
:param content_type: The mime type this compression method identifies as.
:param aliases: A list of names to associate with this compression method.
"""
_encoders[content_type] = encoder
_decoders[content_type] = decoder
_aliases.update((alias, content_type) for alias in aliases)
def encoders():
"""Return a list of available compression methods."""
return list(_encoders)
def get_encoder(t):
"""Get encoder by alias name."""
t = _aliases.get(t, t)
return _encoders[t], t
def get_decoder(t):
"""Get decoder by alias name."""
return _decoders[_aliases.get(t, t)]
def compress(body, content_type):
"""Compress text.
:param body: The text to compress.
:param content_type: mime-type of compression method to use.
"""
encoder, content_type = get_encoder(content_type)
return encoder(ensure_bytes(body)), content_type
def decompress(body, content_type):
"""Decompress compressed text.
:param body: Previously compressed text to uncompress.
:param content_type: mime-type of compression method used.
"""
return get_decoder(content_type)(body)
register(zlib.compress,
zlib.decompress,
'application/x-gzip', aliases=['gzip', 'zlib'])
try:
import bz2
except ImportError:
pass # Jython?
else:
register(bz2.compress,
bz2.decompress,
'application/x-bz2', aliases=['bzip2', 'bzip'])
| mit |
damix911/flask-bees | bees/bees.py | 1 | 8469 | from flask import jsonify, request, Response
from pkg_resources import resource_string
from jinja2 import Template
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
_sessions = {}
def _bind(func):
def f():
try:
data = request.get_data()
if data:
i = request.get_json()
else:
i = {}
if not i:
i = {}
i["method"] = request.method
o = func(i)
if not o:
o = {}
o["error"] = False
return jsonify(o)
except Exception as e:
import traceback
print(traceback.format_exc())
msg = str(e)
code = ''.join(e for e in msg.lower() if e.isalnum() or e.isspace())
code = code.replace(" ", "_")
return jsonify({"error": True, "errorMessage": msg, "errorCode": code})
return f
class Bees(object):
def __init__(self, app, base):
self.app = app
self.base = base
self.names = []
@app.route(self.base + "api.js")
def js():
up = urlparse(request.url)
application = "%s://%s" % (up.scheme, up.netloc)
return Response(self.js_client(application), mimetype="text/plain")
@app.route(self.base + "api.py")
def py():
up = urlparse(request.url)
application = "%s://%s" % (up.scheme, up.netloc)
return Response(self.py_client(application), mimetype="text/plain")
@app.route(self.base + "api_corona.lua")
def corona_lua():
up = urlparse(request.url)
application = "%s://%s" % (up.scheme, up.netloc)
return Response(self.corona_lua_client(application), mimetype="text/plain")
@app.route(self.base + "sdk")
def sdk():
data = resource_string(__name__, "resources/api-list.html")
src = bytes.decode(data, "utf-8")
t = Template(src)
s = t.render(names = self.names)
return Response(s)
@self.publish("meta_clients")
def meta_clients(i):
o = {}
o["platforms"] = {}
o["platforms"]["javascript"] = self.base + "api.js"
o["platforms"]["python"] = self.base + "api.py"
o["platforms"]["corona_lua"] = self.base + "api_corona.lua"
return o
def publish(self, ep):
def deco(func):
@self.app.route(self.base + ep + ":docs", methods = ["GET"], endpoint = ep + ":docs")
def f():
if func.__doc__:
return "<h1>%s</h1><div class=\"docs\">%s</div>" % (ep, func.__doc__)
else:
return "<h1>%s</h1><div class=\"docs\">%s</div>" % (ep, "No documentation attached.")
@self.app.route(self.base + ep + ":examples.javascript", methods = ["GET"], endpoint = ep + ":examples.javascript")
def f():
return "var x = 10;"
@self.app.route(self.base + ep + ":examples.python", methods = ["GET"], endpoint = ep + ":examples.python")
def f():
return "x = 10"
@self.app.route(self.base + ep + ":examples.corona_lua", methods = ["GET"], endpoint = ep + ":examples.corona_lua")
def f():
return "local x = 10"
self.names.append(ep)
tmp = _bind(func)
r = self.app.route(self.base + ep, methods = ["GET", "POST", "PUT", "DELETE"], endpoint = ep)
return r(tmp)
return deco
def js_client(self, app_url):
src = ""
src += "var __application = \"%s\";\n" % app_url
src += "var __base = \"%s\";\n" % self.base
src += """
function __ajax(method, url, data, onHttpSuccess, onHttpFail) {
var xhr = new XMLHttpRequest();
xhr.open(method, url);
xhr.onreadystatechange = function () {
if (xhr.readyState == 4) {
if (xhr.status == 200) {
onHttpSuccess(JSON.parse(xhr.responseText));
}
else {
onHttpFail(xhr.status);
}
}
};
xhr.setRequestHeader("Content-Type", "application/json");
xhr.send(JSON.stringify(data));
}
function __bees(method, relUrl, data, onSuccess, onFail) {
__ajax(method, __application + __base + relUrl, data, function (d) {
if (!d.error)
onSuccess(d);
else
onFail(d.errorCode, d.errorMessage);
},
function (status) {
onFail("http_" + status, "An HTTP error has occurred.");
});
}
function __Promise() {
this.success = function (s) {
this._onSuccess = s;
return this;
};
this.fail = function (f) {
this._onFail = f;
return this;
};
}
"""
src += "var api = {};\n"
src += "api.session = '';\n"
src += "api.onsessionchanged = null;\n"
for name in self.names:
jsName = name
pyName = name
src += """
api.%s = function (i) {
var promise = new __Promise();
if (!i) {
i = {};
}
if (api.session != '') {
i.session = api.session;
}
__bees("POST", "%s", i, function (data) {
if ('session' in data) {
api.session = data.session;
if (api.onsessionchanged)
api.onsessionchanged(api.session);
}
if (promise._onSuccess)
promise._onSuccess(data, i);
},
function (errorCode, errorMessage) {
if (promise._onFail)
promise._onFail(errorCode, errorMessage, i);
});
return promise;
};
""" % (jsName, pyName)
return src
def py_client(self, app_url):
src = ""
src += """
import requests
import json
"""
src += "\n"
src += "application = \"%s\"\n" % app_url
src += "base = \"%s\"\n" % self.base
src += "session = ''\n"
src += "onsessionchanged = None\n"
for name in self.names:
pyName = name
epName = name
src += """
def %s(i = None):
global application
global base
global session
global onsessionchanged
if not i:
i = {}
if session != "":
i["session"] = session
o = requests.post(application + base + \"%s\", data=json.dumps(i), headers={"Content-Type": "application/json"}).json()
if "session" in o:
session = o["session"]
if onsessionchanged:
onsessionchanged(session)
return o
""" % (pyName, epName)
return src
def corona_lua_client(self, app_url):
src = ""
src += "local api = {}\n"
src += "api.application = \"%s\"\n" % app_url
src += "api.base = \"%s\"\n" % self.base
src += "api.session = ''\n"
src += "api.onsessionchanged = None\n"
for name in self.names:
luaName = name
epName = name
src += """
function api.%s(i)
if not i then
i = {}
end
if api.session != "" then
i["session"] = api.session
end
--[[ !!!TODO!!!
o = requests.post(application + base + \"%s\", data=json.dumps(i), headers={"Content-Type": "application/json"}).json()
if "session" in o:
session = o["session"]
if onsessionchanged:
onsessionchanged(session)
return o
]]--
end
""" % (luaName, epName)
src += "return api\n"
return src
def new_session(permissions):
import random, string
s = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
_sessions[s] = {}
_sessions[s]["permissions"] = permissions[:]
return s
def del_session(i):
if i and "session" in i:
if i["session"] != "":
session = i["session"]
del _sessions[session]
def get_session(i):
session = None
if i and "session" in i:
s = i["session"]
if s in _sessions:
session = _sessions[s]
return session
def get_active_sessions():
return _sessions
def has_one_of(i, permissions):
if len(permissions) == 0:
raise Exception("At least one permission should be specified.")
session = get_session(i)
if not session:
return False
for needed in permissions:
for actual in session["permissions"]:
if needed == actual:
return True
def needs_one_of(i, permissions):
if not has_one_of(i, permissions):
raise Exception("Unauthorized.")
| mit |
TinLe/Diamond | src/collectors/udp/udp.py | 30 | 2659 | # coding=utf-8
"""
The UDPCollector class collects metrics on UDP stats (surprise!)
#### Dependencies
* /proc/net/snmp
"""
import diamond.collector
import os
class UDPCollector(diamond.collector.Collector):
PROC = [
'/proc/net/snmp'
]
def process_config(self):
super(UDPCollector, self).process_config()
if self.config['allowed_names'] is None:
self.config['allowed_names'] = []
def get_default_config_help(self):
config_help = super(UDPCollector, self).get_default_config_help()
config_help.update({
'allowed_names': 'list of entries to collect, empty to collect all',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UDPCollector, self).get_default_config()
config.update({
'path': 'udp',
'allowed_names': 'InDatagrams, NoPorts, '
+ 'InErrors, OutDatagrams, RcvbufErrors, SndbufErrors'
})
return config
def collect(self):
metrics = {}
for filepath in self.PROC:
if not os.access(filepath, os.R_OK):
self.log.error('Permission to access %s denied', filepath)
continue
header = ''
data = ''
# Seek the file for the lines that start with Tcp
file = open(filepath)
if not file:
self.log.error('Failed to open %s', filepath)
continue
while True:
line = file.readline()
# Reached EOF?
if len(line) == 0:
break
# Line has metrics?
if line.startswith("Udp"):
header = line
data = file.readline()
break
file.close()
# No data from the file?
if header == '' or data == '':
self.log.error('%s has no lines with Udp', filepath)
continue
header = header.split()
data = data.split()
for i in xrange(1, len(header)):
metrics[header[i]] = data[i]
for metric_name in metrics.keys():
if (len(self.config['allowed_names']) > 0
and metric_name not in self.config['allowed_names']):
continue
value = metrics[metric_name]
value = self.derivative(metric_name, long(value))
# Publish the metric
self.publish(metric_name, value, 0)
| mit |
saurabh6790/pow-app | setup/doctype/email_settings/email_settings.py | 29 | 2140 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint
class DocType:
def __init__(self,doc,doclist):
self.doc,self.doclist = doc,doclist
def validate(self):
"""Checks connectivity to email servers before saving"""
self.validate_outgoing()
self.validate_incoming()
def validate_outgoing(self):
"""Checks incoming email settings"""
self.doc.encode()
if self.doc.outgoing_mail_server:
from webnotes.utils import cint
from webnotes.utils.email_lib.smtp import SMTPServer
smtpserver = SMTPServer(login = self.doc.mail_login,
password = self.doc.mail_password,
server = self.doc.outgoing_mail_server,
port = cint(self.doc.mail_port),
use_ssl = self.doc.use_ssl
)
# exceptions are handled in session connect
sess = smtpserver.sess
def validate_incoming(self):
"""
Checks support ticket email settings
"""
if self.doc.sync_support_mails and self.doc.support_host:
from webnotes.utils.email_lib.receive import POP3Mailbox
from webnotes.model.doc import Document
import _socket, poplib
inc_email = Document('Incoming Email Settings')
inc_email.encode()
inc_email.host = self.doc.support_host
inc_email.use_ssl = self.doc.support_use_ssl
try:
err_msg = 'User Name or Support Password missing. Please enter and try again.'
if not (self.doc.support_username and self.doc.support_password):
raise AttributeError, err_msg
inc_email.username = self.doc.support_username
inc_email.password = self.doc.support_password
except AttributeError, e:
webnotes.msgprint(err_msg)
raise
pop_mb = POP3Mailbox(inc_email)
try:
pop_mb.connect()
except _socket.error, e:
# Invalid mail server -- due to refusing connection
webnotes.msgprint('Invalid POP3 Mail Server. Please rectify and try again.')
raise
except poplib.error_proto, e:
webnotes.msgprint('Invalid User Name or Support Password. Please rectify and try again.')
raise
| agpl-3.0 |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/django/core/cache/backends/dummy.py | 209 | 1229 | "Dummy cache backend"
from django.core.cache.backends.base import BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=0, version=None):
pass
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
# For backwards compatibility
class CacheClass(DummyCache):
pass
| gpl-3.0 |
kustomzone/Rusthon | regtests/c++/cyclic.py | 5 | 1252 | '''
detect cyclic parent/child, and insert weakref
'''
class Parent:
def __init__(self, y:int, children:[]Child ):
self.children = children
self.y = y
def create_child(self, x:int, parent:Parent) ->Child:
child = Child(x, parent)
self.children.push_back( child )
return child
def say(self, msg:string):
print(msg)
class Child:
def __init__(self, x:int, parent:Parent ):
self.x = x
self.parent = parent
def foo(self) ->int:
'''
It is also valid to use `par=self.parent`,
but it is more clear to use `weakref.unwrap(self.parent)`
'''
par = weak.unwrap(self.parent)
if par is not None:
return self.x * par.y
else:
print('parent is gone..')
def bar(self):
'''
below `self.parent` is directly used in expressions,
and not first assigned to a variable.
for each use of self.parent the weakref will be promoted
to a shared pointer, and then fall out of scope,
which is slower than above.
'''
self.parent.say('hello parent')
print(self.parent.y)
def main():
#children = []Child(None,None)
children = []Child()
p = Parent( 1000, children )
print 'parent:', p
c1 = p.create_child(1, p)
c2 = p.create_child(2, p)
c3 = p.create_child(3, p)
print 'children:'
print c1
print c2
print c3 | bsd-3-clause |
OptiPop/external_chromium_org | build/android/pylib/base/base_test_runner.py | 36 | 7537 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for running tests on a single device."""
import logging
import time
from pylib import ports
from pylib.chrome_test_server_spawner import SpawningServer
from pylib.device import device_utils
from pylib.forwarder import Forwarder
from pylib.valgrind_tools import CreateTool
# TODO(frankf): Move this to pylib/utils
import lighttpd_server
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device."""
def __init__(self, device_serial, tool, push_deps=True,
cleanup_test_files=False):
"""
Args:
device: Tests will run on the device of this ID.
tool: Name of the Valgrind tool.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
self.device_serial = device_serial
self.device = device_utils.DeviceUtils(device_serial)
self.tool = CreateTool(tool, self.device)
self._http_server = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self._spawning_server = None
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self._push_deps = push_deps
self._cleanup_test_files = cleanup_test_files
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.device.WriteFile(
self.device.GetExternalStoragePath() + '/' +
NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port, self.test_server_port))
def RunTest(self, test):
"""Runs a test. Needs to be overridden.
Args:
test: A test to run.
Returns:
Tuple containing:
(base_test_result.TestRunResults, tests to rerun or None)
"""
raise NotImplementedError
def InstallTestPackage(self):
"""Installs the test package once before all tests are run."""
pass
def PushDataDeps(self):
"""Push all data deps to device once before all tests are run."""
pass
def SetUp(self):
"""Run once before all tests are run."""
self.InstallTestPackage()
push_size_before = self.device.old_interface.GetPushSizeInfo()
if self._push_deps:
logging.warning('Pushing data files to device.')
self.PushDataDeps()
push_size_after = self.device.old_interface.GetPushSizeInfo()
logging.warning(
'Total data: %0.3fMB' %
((push_size_after[0] - push_size_before[0]) / float(2 ** 20)))
logging.warning(
'Total data transferred: %0.3fMB' %
((push_size_after[1] - push_size_before[1]) / float(2 ** 20)))
else:
logging.warning('Skipping pushing data to device.')
def TearDown(self):
"""Run once after all tests are run."""
self.ShutdownHelperToolsForTestSuite()
if self._cleanup_test_files:
self.device.old_interface.RemovePushedFiles()
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self._ForwardPortsForHttpServer()
return (self._forwarder_device_port, self._http_server.port)
def _ForwardPorts(self, port_pairs):
"""Forwards a port."""
Forwarder.Map(port_pairs, self.device, self.tool)
def _UnmapPorts(self, port_pairs):
"""Unmap previously forwarded ports."""
for (device_port, _) in port_pairs:
Forwarder.UnmapDevicePort(device_port, self.device)
# Deprecated: Use ForwardPorts instead.
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
self._ForwardPorts(port_pairs)
def _ForwardPortsForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self._ForwardPorts([(self._forwarder_device_port, self._http_server.port)])
def _RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.device, self._forwarder_device_port):
self._ForwardPortsForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
if self._http_server:
self._UnmapPorts([(self._forwarder_device_port, self._http_server.port)])
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
def CleanupSpawningServerState(self):
"""Tells the spawning server to clean up any state.
If the spawning server is reused for multiple tests, this should be called
after each test to prevent tests affecting each other.
"""
if self._spawning_server:
self._spawning_server.CleanupState()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
server_ready = False
error_msgs = []
# TODO(pliard): deflake this function. The for loop should be removed as
# well as IsHttpServerConnectable(). spawning_server.Start() should also
# block until the server is ready.
# Try 3 times to launch test spawner server.
for _ in xrange(0, 3):
self.test_server_spawner_port = ports.AllocateTestServerPort()
self._ForwardPorts(
[(self.test_server_spawner_port, self.test_server_spawner_port)])
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.device,
self.tool)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
expected_read='ready')
if server_ready:
break
else:
error_msgs.append(error_msg)
self._spawning_server.Stop()
# Wait for 2 seconds then restart.
time.sleep(2)
if not server_ready:
logging.error(';'.join(error_msgs))
raise Exception('Can not start the test spawner server.')
self._PushTestServerPortInfoToDevice()
| bsd-3-clause |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/ad_group_criterion.py | 1 | 19851 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.common.types import criteria
from google.ads.googleads.v7.common.types import custom_parameter
from google.ads.googleads.v7.enums.types import ad_group_criterion_approval_status
from google.ads.googleads.v7.enums.types import ad_group_criterion_status
from google.ads.googleads.v7.enums.types import bidding_source
from google.ads.googleads.v7.enums.types import criterion_system_serving_status
from google.ads.googleads.v7.enums.types import criterion_type
from google.ads.googleads.v7.enums.types import quality_score_bucket
__protobuf__ = proto.module(
package='google.ads.googleads.v7.resources',
marshal='google.ads.googleads.v7',
manifest={
'AdGroupCriterion',
},
)
class AdGroupCriterion(proto.Message):
r"""An ad group criterion.
Attributes:
resource_name (str):
Immutable. The resource name of the ad group criterion. Ad
group criterion resource names have the form:
``customers/{customer_id}/adGroupCriteria/{ad_group_id}~{criterion_id}``
criterion_id (int):
Output only. The ID of the criterion.
This field is ignored for mutates.
display_name (str):
Output only. The display name of the
criterion.
This field is ignored for mutates.
status (google.ads.googleads.v7.enums.types.AdGroupCriterionStatusEnum.AdGroupCriterionStatus):
The status of the criterion.
This is the status of the ad group criterion
entity, set by the client. Note: UI reports may
incorporate additional information that affects
whether a criterion is eligible to run. In some
cases a criterion that's REMOVED in the API can
still show as enabled in the UI. For example,
campaigns by default show to users of all age
ranges unless excluded. The UI will show each
age range as "enabled", since they're eligible
to see the ads; but AdGroupCriterion.status will
show "removed", since no positive criterion was
added.
quality_info (google.ads.googleads.v7.resources.types.AdGroupCriterion.QualityInfo):
Output only. Information regarding the
quality of the criterion.
ad_group (str):
Immutable. The ad group to which the
criterion belongs.
type_ (google.ads.googleads.v7.enums.types.CriterionTypeEnum.CriterionType):
Output only. The type of the criterion.
negative (bool):
Immutable. Whether to target (``false``) or exclude
(``true``) the criterion.
This field is immutable. To switch a criterion from positive
to negative, remove then re-add it.
system_serving_status (google.ads.googleads.v7.enums.types.CriterionSystemServingStatusEnum.CriterionSystemServingStatus):
Output only. Serving status of the criterion.
approval_status (google.ads.googleads.v7.enums.types.AdGroupCriterionApprovalStatusEnum.AdGroupCriterionApprovalStatus):
Output only. Approval status of the
criterion.
disapproval_reasons (Sequence[str]):
Output only. List of disapproval reasons of
the criterion.
The different reasons for disapproving a
criterion can be found here:
https://support.google.com/adspolicy/answer/6008942
This field is read-only.
labels (Sequence[str]):
Output only. The resource names of labels
attached to this ad group criterion.
bid_modifier (float):
The modifier for the bid when the criterion
matches. The modifier must be in the range: 0.1
- 10.0. Most targetable criteria types support
modifiers.
cpc_bid_micros (int):
The CPC (cost-per-click) bid.
cpm_bid_micros (int):
The CPM (cost-per-thousand viewable
impressions) bid.
cpv_bid_micros (int):
The CPV (cost-per-view) bid.
percent_cpc_bid_micros (int):
The CPC bid amount, expressed as a fraction of the
advertised price for some good or service. The valid range
for the fraction is [0,1) and the value stored here is
1,000,000 \* [fraction].
effective_cpc_bid_micros (int):
Output only. The effective CPC (cost-per-
lick) bid.
effective_cpm_bid_micros (int):
Output only. The effective CPM (cost-per-
housand viewable impressions) bid.
effective_cpv_bid_micros (int):
Output only. The effective CPV (cost-per-
iew) bid.
effective_percent_cpc_bid_micros (int):
Output only. The effective Percent CPC bid
amount.
effective_cpc_bid_source (google.ads.googleads.v7.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective CPC bid.
effective_cpm_bid_source (google.ads.googleads.v7.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective CPM bid.
effective_cpv_bid_source (google.ads.googleads.v7.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective CPV bid.
effective_percent_cpc_bid_source (google.ads.googleads.v7.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective Percent
CPC bid.
position_estimates (google.ads.googleads.v7.resources.types.AdGroupCriterion.PositionEstimates):
Output only. Estimates for criterion bids at
various positions.
final_urls (Sequence[str]):
The list of possible final URLs after all
cross-domain redirects for the ad.
final_mobile_urls (Sequence[str]):
The list of possible final mobile URLs after
all cross-domain redirects.
final_url_suffix (str):
URL template for appending params to final
URL.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
url_custom_parameters (Sequence[google.ads.googleads.v7.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
keyword (google.ads.googleads.v7.common.types.KeywordInfo):
Immutable. Keyword.
placement (google.ads.googleads.v7.common.types.PlacementInfo):
Immutable. Placement.
mobile_app_category (google.ads.googleads.v7.common.types.MobileAppCategoryInfo):
Immutable. Mobile app category.
mobile_application (google.ads.googleads.v7.common.types.MobileApplicationInfo):
Immutable. Mobile application.
listing_group (google.ads.googleads.v7.common.types.ListingGroupInfo):
Immutable. Listing group.
age_range (google.ads.googleads.v7.common.types.AgeRangeInfo):
Immutable. Age range.
gender (google.ads.googleads.v7.common.types.GenderInfo):
Immutable. Gender.
income_range (google.ads.googleads.v7.common.types.IncomeRangeInfo):
Immutable. Income range.
parental_status (google.ads.googleads.v7.common.types.ParentalStatusInfo):
Immutable. Parental status.
user_list (google.ads.googleads.v7.common.types.UserListInfo):
Immutable. User List.
youtube_video (google.ads.googleads.v7.common.types.YouTubeVideoInfo):
Immutable. YouTube Video.
youtube_channel (google.ads.googleads.v7.common.types.YouTubeChannelInfo):
Immutable. YouTube Channel.
topic (google.ads.googleads.v7.common.types.TopicInfo):
Immutable. Topic.
user_interest (google.ads.googleads.v7.common.types.UserInterestInfo):
Immutable. User Interest.
webpage (google.ads.googleads.v7.common.types.WebpageInfo):
Immutable. Webpage
app_payment_model (google.ads.googleads.v7.common.types.AppPaymentModelInfo):
Immutable. App Payment Model.
custom_affinity (google.ads.googleads.v7.common.types.CustomAffinityInfo):
Immutable. Custom Affinity.
custom_intent (google.ads.googleads.v7.common.types.CustomIntentInfo):
Immutable. Custom Intent.
custom_audience (google.ads.googleads.v7.common.types.CustomAudienceInfo):
Immutable. Custom Audience.
combined_audience (google.ads.googleads.v7.common.types.CombinedAudienceInfo):
Immutable. Combined Audience.
"""
class QualityInfo(proto.Message):
r"""A container for ad group criterion quality information.
Attributes:
quality_score (int):
Output only. The quality score.
This field may not be populated if Google does
not have enough information to determine a
value.
creative_quality_score (google.ads.googleads.v7.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
Output only. The performance of the ad
compared to other advertisers.
post_click_quality_score (google.ads.googleads.v7.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
Output only. The quality score of the landing
page.
search_predicted_ctr (google.ads.googleads.v7.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
Output only. The click-through rate compared
to that of other advertisers.
"""
quality_score = proto.Field(
proto.INT32,
number=5,
optional=True,
)
creative_quality_score = proto.Field(
proto.ENUM,
number=2,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
post_click_quality_score = proto.Field(
proto.ENUM,
number=3,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
search_predicted_ctr = proto.Field(
proto.ENUM,
number=4,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
class PositionEstimates(proto.Message):
r"""Estimates for criterion bids at various positions.
Attributes:
first_page_cpc_micros (int):
Output only. The estimate of the CPC bid
required for ad to be shown on first page of
search results.
first_position_cpc_micros (int):
Output only. The estimate of the CPC bid
required for ad to be displayed in first
position, at the top of the first page of search
results.
top_of_page_cpc_micros (int):
Output only. The estimate of the CPC bid
required for ad to be displayed at the top of
the first page of search results.
estimated_add_clicks_at_first_position_cpc (int):
Output only. Estimate of how many clicks per week you might
get by changing your keyword bid to the value in
first_position_cpc_micros.
estimated_add_cost_at_first_position_cpc (int):
Output only. Estimate of how your cost per week might change
when changing your keyword bid to the value in
first_position_cpc_micros.
"""
first_page_cpc_micros = proto.Field(
proto.INT64,
number=6,
optional=True,
)
first_position_cpc_micros = proto.Field(
proto.INT64,
number=7,
optional=True,
)
top_of_page_cpc_micros = proto.Field(
proto.INT64,
number=8,
optional=True,
)
estimated_add_clicks_at_first_position_cpc = proto.Field(
proto.INT64,
number=9,
optional=True,
)
estimated_add_cost_at_first_position_cpc = proto.Field(
proto.INT64,
number=10,
optional=True,
)
resource_name = proto.Field(
proto.STRING,
number=1,
)
criterion_id = proto.Field(
proto.INT64,
number=56,
optional=True,
)
display_name = proto.Field(
proto.STRING,
number=77,
)
status = proto.Field(
proto.ENUM,
number=3,
enum=ad_group_criterion_status.AdGroupCriterionStatusEnum.AdGroupCriterionStatus,
)
quality_info = proto.Field(
proto.MESSAGE,
number=4,
message=QualityInfo,
)
ad_group = proto.Field(
proto.STRING,
number=57,
optional=True,
)
type_ = proto.Field(
proto.ENUM,
number=25,
enum=criterion_type.CriterionTypeEnum.CriterionType,
)
negative = proto.Field(
proto.BOOL,
number=58,
optional=True,
)
system_serving_status = proto.Field(
proto.ENUM,
number=52,
enum=criterion_system_serving_status.CriterionSystemServingStatusEnum.CriterionSystemServingStatus,
)
approval_status = proto.Field(
proto.ENUM,
number=53,
enum=ad_group_criterion_approval_status.AdGroupCriterionApprovalStatusEnum.AdGroupCriterionApprovalStatus,
)
disapproval_reasons = proto.RepeatedField(
proto.STRING,
number=59,
)
labels = proto.RepeatedField(
proto.STRING,
number=60,
)
bid_modifier = proto.Field(
proto.DOUBLE,
number=61,
optional=True,
)
cpc_bid_micros = proto.Field(
proto.INT64,
number=62,
optional=True,
)
cpm_bid_micros = proto.Field(
proto.INT64,
number=63,
optional=True,
)
cpv_bid_micros = proto.Field(
proto.INT64,
number=64,
optional=True,
)
percent_cpc_bid_micros = proto.Field(
proto.INT64,
number=65,
optional=True,
)
effective_cpc_bid_micros = proto.Field(
proto.INT64,
number=66,
optional=True,
)
effective_cpm_bid_micros = proto.Field(
proto.INT64,
number=67,
optional=True,
)
effective_cpv_bid_micros = proto.Field(
proto.INT64,
number=68,
optional=True,
)
effective_percent_cpc_bid_micros = proto.Field(
proto.INT64,
number=69,
optional=True,
)
effective_cpc_bid_source = proto.Field(
proto.ENUM,
number=21,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_cpm_bid_source = proto.Field(
proto.ENUM,
number=22,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_cpv_bid_source = proto.Field(
proto.ENUM,
number=23,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_percent_cpc_bid_source = proto.Field(
proto.ENUM,
number=35,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
position_estimates = proto.Field(
proto.MESSAGE,
number=10,
message=PositionEstimates,
)
final_urls = proto.RepeatedField(
proto.STRING,
number=70,
)
final_mobile_urls = proto.RepeatedField(
proto.STRING,
number=71,
)
final_url_suffix = proto.Field(
proto.STRING,
number=72,
optional=True,
)
tracking_url_template = proto.Field(
proto.STRING,
number=73,
optional=True,
)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE,
number=14,
message=custom_parameter.CustomParameter,
)
keyword = proto.Field(
proto.MESSAGE,
number=27,
oneof='criterion',
message=criteria.KeywordInfo,
)
placement = proto.Field(
proto.MESSAGE,
number=28,
oneof='criterion',
message=criteria.PlacementInfo,
)
mobile_app_category = proto.Field(
proto.MESSAGE,
number=29,
oneof='criterion',
message=criteria.MobileAppCategoryInfo,
)
mobile_application = proto.Field(
proto.MESSAGE,
number=30,
oneof='criterion',
message=criteria.MobileApplicationInfo,
)
listing_group = proto.Field(
proto.MESSAGE,
number=32,
oneof='criterion',
message=criteria.ListingGroupInfo,
)
age_range = proto.Field(
proto.MESSAGE,
number=36,
oneof='criterion',
message=criteria.AgeRangeInfo,
)
gender = proto.Field(
proto.MESSAGE,
number=37,
oneof='criterion',
message=criteria.GenderInfo,
)
income_range = proto.Field(
proto.MESSAGE,
number=38,
oneof='criterion',
message=criteria.IncomeRangeInfo,
)
parental_status = proto.Field(
proto.MESSAGE,
number=39,
oneof='criterion',
message=criteria.ParentalStatusInfo,
)
user_list = proto.Field(
proto.MESSAGE,
number=42,
oneof='criterion',
message=criteria.UserListInfo,
)
youtube_video = proto.Field(
proto.MESSAGE,
number=40,
oneof='criterion',
message=criteria.YouTubeVideoInfo,
)
youtube_channel = proto.Field(
proto.MESSAGE,
number=41,
oneof='criterion',
message=criteria.YouTubeChannelInfo,
)
topic = proto.Field(
proto.MESSAGE,
number=43,
oneof='criterion',
message=criteria.TopicInfo,
)
user_interest = proto.Field(
proto.MESSAGE,
number=45,
oneof='criterion',
message=criteria.UserInterestInfo,
)
webpage = proto.Field(
proto.MESSAGE,
number=46,
oneof='criterion',
message=criteria.WebpageInfo,
)
app_payment_model = proto.Field(
proto.MESSAGE,
number=47,
oneof='criterion',
message=criteria.AppPaymentModelInfo,
)
custom_affinity = proto.Field(
proto.MESSAGE,
number=48,
oneof='criterion',
message=criteria.CustomAffinityInfo,
)
custom_intent = proto.Field(
proto.MESSAGE,
number=49,
oneof='criterion',
message=criteria.CustomIntentInfo,
)
custom_audience = proto.Field(
proto.MESSAGE,
number=74,
oneof='criterion',
message=criteria.CustomAudienceInfo,
)
combined_audience = proto.Field(
proto.MESSAGE,
number=75,
oneof='criterion',
message=criteria.CombinedAudienceInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
flh/odoo | addons/website/models/ir_qweb.py | 26 | 18823 | # -*- coding: utf-8 -*-
"""
Website-context rendering needs to add some metadata to rendered fields,
as well as render a few fields differently.
Also, adds methods to convert values back to openerp models.
"""
import cStringIO
import datetime
import itertools
import logging
import os
import urllib2
import urlparse
import re
import pytz
import werkzeug.urls
import werkzeug.utils
from dateutil import parser
from lxml import etree, html
from PIL import Image as I
import openerp.modules
import openerp
from openerp.osv import orm, fields
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html_escape as escape
from openerp.addons.web.http import request
from openerp.addons.base.ir import ir_qweb
REMOTE_CONNECTION_TIMEOUT = 2.5
logger = logging.getLogger(__name__)
class QWeb(orm.AbstractModel):
""" QWeb object for rendering stuff in the website context
"""
_name = 'website.qweb'
_inherit = 'ir.qweb'
URL_ATTRS = {
'form': 'action',
'a': 'href',
}
def add_template(self, qcontext, name, node):
# preprocessing for multilang static urls
if request.website:
for tag, attr in self.URL_ATTRS.iteritems():
for e in node.iterdescendants(tag=tag):
url = e.get(attr)
if url:
e.set(attr, qcontext.get('url_for')(url))
super(QWeb, self).add_template(qcontext, name, node)
def render_att_att(self, element, attribute_name, attribute_value, qwebcontext):
att, val = super(QWeb, self).render_att_att(element, attribute_name, attribute_value, qwebcontext)
if request.website and att == self.URL_ATTRS.get(element.tag) and isinstance(val, basestring):
val = qwebcontext.get('url_for')(val)
return att, val
def get_converter_for(self, field_type):
return self.pool.get(
'website.qweb.field.' + field_type,
self.pool['website.qweb.field'])
class Field(orm.AbstractModel):
_name = 'website.qweb.field'
_inherit = 'ir.qweb.field'
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context, context=None):
if options is None: options = {}
column = record._model._all_columns[field_name].column
attrs = [('data-oe-translate', 1 if column.translate else 0)]
placeholder = options.get('placeholder') \
or source_element.get('placeholder') \
or getattr(column, 'placeholder', None)
if placeholder:
attrs.append(('placeholder', placeholder))
return itertools.chain(
super(Field, self).attributes(cr, uid, field_name, record, options,
source_element, g_att, t_att,
qweb_context, context=context),
attrs
)
def value_from_string(self, value):
return value
def from_html(self, cr, uid, model, column, element, context=None):
return self.value_from_string(element.text_content().strip())
def qweb_object(self):
return self.pool['website.qweb']
class Integer(orm.AbstractModel):
_name = 'website.qweb.field.integer'
_inherit = ['website.qweb.field']
value_from_string = int
class Float(orm.AbstractModel):
_name = 'website.qweb.field.float'
_inherit = ['website.qweb.field', 'ir.qweb.field.float']
def from_html(self, cr, uid, model, column, element, context=None):
lang = self.user_lang(cr, uid, context=context)
value = element.text_content().strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
def parse_fuzzy(in_format, value):
day_first = in_format.find('%d') < in_format.find('%m')
if '%y' in in_format:
year_first = in_format.find('%y') < in_format.find('%d')
else:
year_first = in_format.find('%Y') < in_format.find('%d')
return parser.parse(value, dayfirst=day_first, yearfirst=year_first)
class Date(orm.AbstractModel):
_name = 'website.qweb.field.date'
_inherit = ['website.qweb.field', 'ir.qweb.field.date']
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
attrs = super(Date, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [('data-oe-original', record[field_name])])
def from_html(self, cr, uid, model, column, element, context=None):
value = element.text_content().strip()
if not value: return False
datetime.datetime.strptime(value, DEFAULT_SERVER_DATE_FORMAT)
return value
class DateTime(orm.AbstractModel):
_name = 'website.qweb.field.datetime'
_inherit = ['website.qweb.field', 'ir.qweb.field.datetime']
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
value = record[field_name]
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value, DEFAULT_SERVER_DATETIME_FORMAT)
if value:
# convert from UTC (server timezone) to user timezone
value = fields.datetime.context_timestamp(
cr, uid, timestamp=value, context=context)
value = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
attrs = super(DateTime, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [
('data-oe-original', value)
])
def from_html(self, cr, uid, model, column, element, context=None):
if context is None: context = {}
value = element.text_content().strip()
if not value: return False
# parse from string to datetime
dt = datetime.datetime.strptime(value, DEFAULT_SERVER_DATETIME_FORMAT)
# convert back from user's timezone to UTC
tz_name = context.get('tz') \
or self.pool['res.users'].read(cr, openerp.SUPERUSER_ID, uid, ['tz'], context=context)['tz']
if tz_name:
try:
user_tz = pytz.timezone(tz_name)
utc = pytz.utc
dt = user_tz.localize(dt).astimezone(utc)
except Exception:
logger.warn(
"Failed to convert the value for a field of the model"
" %s back from the user's timezone (%s) to UTC",
model, tz_name,
exc_info=True)
# format back to string
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class Text(orm.AbstractModel):
_name = 'website.qweb.field.text'
_inherit = ['website.qweb.field', 'ir.qweb.field.text']
def from_html(self, cr, uid, model, column, element, context=None):
return html_to_text(element)
class Selection(orm.AbstractModel):
_name = 'website.qweb.field.selection'
_inherit = ['website.qweb.field', 'ir.qweb.field.selection']
def from_html(self, cr, uid, model, column, element, context=None):
value = element.text_content().strip()
selection = column.reify(cr, uid, model, column, context=context)
for k, v in selection:
if isinstance(v, str):
v = ustr(v)
if value == v:
return k
raise ValueError(u"No value found for label %s in selection %s" % (
value, selection))
class ManyToOne(orm.AbstractModel):
_name = 'website.qweb.field.many2one'
_inherit = ['website.qweb.field', 'ir.qweb.field.many2one']
def from_html(self, cr, uid, model, column, element, context=None):
# FIXME: layering violations all the things
Model = self.pool[element.get('data-oe-model')]
M2O = self.pool[column._obj]
field = element.get('data-oe-field')
id = int(element.get('data-oe-id'))
# FIXME: weird things are going to happen for char-type _rec_name
value = html_to_text(element)
# if anything blows up, just ignore it and bail
try:
# get parent record
[obj] = Model.read(cr, uid, [id], [field])
# get m2o record id
(m2o_id, _) = obj[field]
# assume _rec_name and write directly to it
M2O.write(cr, uid, [m2o_id], {
M2O._rec_name: value
}, context=context)
except:
logger.exception("Could not save %r to m2o field %s of model %s",
value, field, Model._name)
# not necessary, but might as well be explicit about it
return None
class HTML(orm.AbstractModel):
_name = 'website.qweb.field.html'
_inherit = ['website.qweb.field', 'ir.qweb.field.html']
def from_html(self, cr, uid, model, column, element, context=None):
content = []
if element.text: content.append(element.text)
content.extend(html.tostring(child)
for child in element.iterchildren(tag=etree.Element))
return '\n'.join(content)
class Image(orm.AbstractModel):
"""
Widget options:
``class``
set as attribute on the generated <img> tag
"""
_name = 'website.qweb.field.image'
_inherit = ['website.qweb.field', 'ir.qweb.field.image']
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
assert source_element.tag != 'img',\
"Oddly enough, the root tag of an image field can not be img. " \
"That is because the image goes into the tag, or it gets the " \
"hose again."
return super(Image, self).to_html(
cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=context)
def record_to_html(self, cr, uid, field_name, record, column, options=None, context=None):
if options is None: options = {}
classes = ['img', 'img-responsive'] + options.get('class', '').split()
url_params = {
'model': record._model._name,
'field': field_name,
'id': record.id,
}
for options_key in ['max_width', 'max_height']:
if options.get(options_key):
url_params[options_key] = options[options_key]
return ir_qweb.HTMLSafe('<img class="%s" src="/website/image?%s"/>' % (
' '.join(itertools.imap(escape, classes)),
werkzeug.urls.url_encode(url_params)
))
local_url_re = re.compile(r'^/(?P<module>[^]]+)/static/(?P<rest>.+)$')
def from_html(self, cr, uid, model, column, element, context=None):
url = element.find('img').get('src')
url_object = urlparse.urlsplit(url)
query = dict(urlparse.parse_qsl(url_object.query))
if url_object.path == '/website/image':
item = self.pool[query['model']].browse(
cr, uid, int(query['id']), context=context)
return item[query['field']]
if self.local_url_re.match(url_object.path):
return self.load_local_url(url)
return self.load_remote_url(url)
def load_local_url(self, url):
match = self.local_url_re.match(urlparse.urlsplit(url).path)
rest = match.group('rest')
for sep in os.sep, os.altsep:
if sep and sep != '/':
rest.replace(sep, '/')
path = openerp.modules.get_module_resource(
match.group('module'), 'static', *(rest.split('/')))
if not path:
return None
try:
with open(path, 'rb') as f:
# force complete image load to ensure it's valid image data
image = I.open(f)
image.load()
f.seek(0)
return f.read().encode('base64')
except Exception:
logger.exception("Failed to load local image %r", url)
return None
def load_remote_url(self, url):
try:
# should probably remove remote URLs entirely:
# * in fields, downloading them without blowing up the server is a
# challenge
# * in views, may trigger mixed content warnings if HTTPS CMS
# linking to HTTP images
# implement drag & drop image upload to mitigate?
req = urllib2.urlopen(url, timeout=REMOTE_CONNECTION_TIMEOUT)
# PIL needs a seekable file-like image, urllib result is not seekable
image = I.open(cStringIO.StringIO(req.read()))
# force a complete load of the image data to validate it
image.load()
except Exception:
logger.exception("Failed to load remote image %r", url)
return None
# don't use original data in case weird stuff was smuggled in, with
# luck PIL will remove some of it?
out = cStringIO.StringIO()
image.save(out, image.format)
return out.getvalue().encode('base64')
class Monetary(orm.AbstractModel):
_name = 'website.qweb.field.monetary'
_inherit = ['website.qweb.field', 'ir.qweb.field.monetary']
def from_html(self, cr, uid, model, column, element, context=None):
lang = self.user_lang(cr, uid, context=context)
value = element.find('span').text.strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
class Duration(orm.AbstractModel):
_name = 'website.qweb.field.duration'
_inherit = [
'ir.qweb.field.duration',
'website.qweb.field.float',
]
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
attrs = super(Duration, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [('data-oe-original', record[field_name])])
def from_html(self, cr, uid, model, column, element, context=None):
value = element.text_content().strip()
# non-localized value
return float(value)
class RelativeDatetime(orm.AbstractModel):
_name = 'website.qweb.field.relative'
_inherit = [
'ir.qweb.field.relative',
'website.qweb.field.datetime',
]
# get formatting from ir.qweb.field.relative but edition/save from datetime
class Contact(orm.AbstractModel):
_name = 'website.qweb.field.contact'
_inherit = ['ir.qweb.field.contact', 'website.qweb.field.many2one']
class QwebView(orm.AbstractModel):
_name = 'website.qweb.field.qweb'
_inherit = ['ir.qweb.field.qweb']
def html_to_text(element):
""" Converts HTML content with HTML-specified line breaks (br, p, div, ...)
in roughly equivalent textual content.
Used to replace and fixup the roundtripping of text and m2o: when using
libxml 2.8.0 (but not 2.9.1) and parsing HTML with lxml.html.fromstring
whitespace text nodes (text nodes composed *solely* of whitespace) are
stripped out with no recourse, and fundamentally relying on newlines
being in the text (e.g. inserted during user edition) is probably poor form
anyway.
-> this utility function collapses whitespace sequences and replaces
nodes by roughly corresponding linebreaks
* p are pre-and post-fixed by 2 newlines
* br are replaced by a single newline
* block-level elements not already mentioned are pre- and post-fixed by
a single newline
ought be somewhat similar (but much less high-tech) to aaronsw's html2text.
the latter produces full-blown markdown, our text -> html converter only
replaces newlines by <br> elements at this point so we're reverting that,
and a few more newline-ish elements in case the user tried to add
newlines/paragraphs into the text field
:param element: lxml.html content
:returns: corresponding pure-text output
"""
# output is a list of str | int. Integers are padding requests (in minimum
# number of newlines). When multiple padding requests, fold them into the
# biggest one
output = []
_wrap(element, output)
# remove any leading or tailing whitespace, replace sequences of
# (whitespace)\n(whitespace) by a single newline, where (whitespace) is a
# non-newline whitespace in this case
return re.sub(
r'[ \t\r\f]*\n[ \t\r\f]*',
'\n',
''.join(_realize_padding(output)).strip())
_PADDED_BLOCK = set('p h1 h2 h3 h4 h5 h6'.split())
# https://developer.mozilla.org/en-US/docs/HTML/Block-level_elements minus p
_MISC_BLOCK = set((
'address article aside audio blockquote canvas dd dl div figcaption figure'
' footer form header hgroup hr ol output pre section tfoot ul video'
).split())
def _collapse_whitespace(text):
""" Collapses sequences of whitespace characters in ``text`` to a single
space
"""
return re.sub('\s+', ' ', text)
def _realize_padding(it):
""" Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
"""
padding = None
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = None
yield item
# leftover padding irrelevant as the output will be stripped
def _wrap(element, output, wrapper=u''):
""" Recursively extracts text from ``element`` (via _element_to_text), and
wraps it all in ``wrapper``. Extracted text is added to ``output``
:type wrapper: basestring | int
"""
output.append(wrapper)
if element.text:
output.append(_collapse_whitespace(element.text))
for child in element:
_element_to_text(child, output)
output.append(wrapper)
def _element_to_text(e, output):
if e.tag == 'br':
output.append(u'\n')
elif e.tag in _PADDED_BLOCK:
_wrap(e, output, 2)
elif e.tag in _MISC_BLOCK:
_wrap(e, output, 1)
else:
# inline
_wrap(e, output)
if e.tail:
output.append(_collapse_whitespace(e.tail))
| agpl-3.0 |
Srisai85/scipy | scipy/linalg/_matfuncs_inv_ssq.py | 36 | 28127 | """
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg.decomp_schur import schur, rsf2csf
from scipy.linalg.matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def _rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def _matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
def _adjoint(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when scipy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] Robert M. Corless and David J. Jeffrey,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
David J. Jeffrey and Stephen M. Watt,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number preferably belonging to the closed negative real axis.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as written in the publication does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this is constraint is relaxed.
References
----------
.. [1] Awad H. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is Eq. (11.28) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
ua = _unwindk(np.log(l2) - np.log(l1))
ub = _unwindk(np.log(1+z) - np.log(1-z))
u = ua + ub
if u:
f12 = t12 * (2*np.arctanh(z) + 2*np.pi*1j*(ua + ub)) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
The degree of the Pade approximation.
Notes
-----
This subroutine appears as a chunk of lines within
a couple of published algorithms; for example it appears
as lines 4--35 in algorithm (3.1) of [1]_, and
as lines 3--34 in algorithm (4.1) of [2]_.
The instances of 'goto line 38' in algorithm (3.1) of [1]_
probably mean 'goto line 36' and have been intepreted accordingly.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T0.shape
T = T0
# Find s0, the smallest s such that the spectral radius
# of a certain diagonal matrix is at most theta[7].
# Note that because theta[7] < 1,
# this search will not terminate if any diagonal entry of T is zero.
s0 = 0
tmp_diag = np.diag(T)
if np.count_nonzero(tmp_diag) != n:
raise Exception('internal inconsistency')
while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
tmp_diag = np.sqrt(tmp_diag)
s0 += 1
# Take matrix square roots of T.
for i in range(s0):
T = _sqrtm_triu(T)
# Flow control in this section is a little odd.
# This is because I am translating algorithm descriptions
# which have GOTOs in the publication.
s = s0
k = 0
d2 = _onenormest_m1_power(T, 2) ** (1/2)
d3 = _onenormest_m1_power(T, 3) ** (1/3)
a2 = max(d2, d3)
m = None
for i in (1, 2):
if a2 <= theta[i]:
m = i
break
while m is None:
if s > s0:
d3 = _onenormest_m1_power(T, 3) ** (1/3)
d4 = _onenormest_m1_power(T, 4) ** (1/4)
a3 = max(d3, d4)
if a3 <= theta[7]:
j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
if j1 <= 6:
m = j1
break
elif a3 / 2 <= theta[5] and k < 2:
k += 1
T = _sqrtm_triu(T)
s += 1
continue
d5 = _onenormest_m1_power(T, 5) ** (1/5)
a4 = max(d4, d5)
eta = min(a3, a4)
for i in (6, 7):
if eta <= theta[i]:
m = i
break
if m is not None:
break
T = _sqrtm_triu(T)
s += 1
# The subtraction of the identity is redundant here,
# because the diagonal will be replaced for improved numerical accuracy,
# but this formulation should help clarify the meaning of R.
R = T - np.identity(n)
# Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
# using formulas that have less subtractive cancellation.
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
for j in range(n):
a = T0[j, j]
r = _briggs_helper_function(a, s)
R[j, j] = r
p = np.exp2(-s)
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
R[j, j+1] = f12
# Return the T-I matrix, the number of square roots, and the Pade degree.
if not np.array_equal(R, np.triu(R)):
raise Exception('internal inconsistency')
return R, s, m
def _fractional_power_pade_constant(i, t):
# A helper function for matrix fractional power.
if i < 1:
raise ValueError('expected a positive integer i')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
if i == 1:
return -t
elif i % 2 == 0:
j = i // 2
return (-j + t) / (2 * (2*j - 1))
elif i % 2 == 1:
j = (i - 1) // 2
return (-j - t) / (2 * (2*j + 1))
else:
raise Exception('internal error')
def _fractional_power_pade(R, t, m):
"""
Evaluate the Pade approximation of a fractional matrix power.
Evaluate the degree-m Pade approximation of R
to the fractional matrix power t using the continued fraction
in bottom-up fashion using algorithm (4.1) in [1]_.
Parameters
----------
R : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
m : positive integer
Degree of Pade approximation.
Returns
-------
U : (N, N) array_like
The degree-m Pade approximation of R to the fractional power t.
This matrix will be upper triangular.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if m < 1 or int(m) != m:
raise ValueError('expected a positive integer m')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
R = np.asarray(R)
if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = R.shape
ident = np.identity(n)
Y = R * _fractional_power_pade_constant(2*m, t)
for j in range(2*m - 1, 0, -1):
rhs = R * _fractional_power_pade_constant(j, t)
Y = solve_triangular(ident + Y, rhs)
U = ident + Y
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power_triu(T, t):
"""
Compute a fractional power of an upper triangular matrix.
The fractional power is restricted to fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
m_to_theta = {
1: 1.51e-5,
2: 2.24e-3,
3: 1.88e-2,
4: 6.04e-2,
5: 1.24e-1,
6: 2.00e-1,
7: 2.79e-1,
}
n, n = T.shape
T0 = T
T0_diag = np.diag(T0)
if np.array_equal(T0, np.diag(T0_diag)):
U = np.diag(T0_diag ** t)
else:
R, s, m = _inverse_squaring_helper(T0, m_to_theta)
# Evaluate the Pade approximation.
# Note that this function expects the negative of the matrix
# returned by the inverse squaring helper.
U = _fractional_power_pade(-R, t, m)
# Undo the inverse scaling and squaring.
# Be less clever about this
# if the principal branch does not exist at T0;
# this happens when a diagonal entry of T0
# is negative with imaginary part 0.
eivals = np.diag(T0)
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
for i in range(s, -1, -1):
if i < s:
U = U.dot(U)
else:
if has_principal_branch:
p = t * np.exp2(-i)
U[np.diag_indices(n)] = T0_diag ** p
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
U[j, j+1] = f12
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power(A, t):
"""
Compute the fractional power of a matrix, for fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if np.count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def _fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
See the fractional_matrix_power docstring in matfuncs.py for more info.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError as e:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
return T
def _logm(A):
"""
Compute the matrix logarithm.
See the logm docstring in matfuncs.py for more info.
Notes
-----
In this function we look at triangular matrices that are similar
to the input matrix. If any diagonal entry of such a triangular matrix
is exactly zero then the original matrix is singular.
The matrix logarithm does not exist for such matrices,
but in such cases we will pretend that the diagonal entries that are zero
are actually slightly positive by an ad-hoc amount, in the interest
of returning something more useful than NaN. This will cause a warning.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A)) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T,Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
except (SqrtmError, LogmError) as e:
X = np.empty_like(A)
X.fill(np.nan)
return X
| bsd-3-clause |
wirwolf/minexcoin | qa/rpc-tests/proxy_test.py | 39 | 8489 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
start_nodes,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
print("Warning: testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| mit |
skoslowski/gnuradio | gr-filter/examples/fir_filter_fff.py | 3 | 3371 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print("Num. Taps: ", len(taps))
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=10000,
help="Number of samples to process [default=%(default)r]")
parser.add_argument("-s", "--samplerate", type=eng_float, default=8000,
help="System sample rate [default=%(default)r]")
parser.add_argument("-B", "--bandwidth", type=eng_float, default=1000,
help="Filter bandwidth [default=%(default)r]")
parser.add_argument("-T", "--transition", type=eng_float, default=100,
help="Transition band [default=%(default)r]")
parser.add_argument("-A", "--attenuation", type=eng_float, default=80,
help="Stopband attenuation [default=%(default)r]")
parser.add_argument("-D", "--decimation", type=int, default=1,
help="Decmation factor [default=%(default)r]")
args = parser.parse_args()
put = example_fir_filter_fff(args.nsamples,
args.samplerate,
args.bandwidth,
args.transition,
args.attenuation,
args.decimation)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_snk = numpy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
f2 = pyplot.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
joelddiaz/openshift-tools | scripts/monitoring/cron-send-cluster-capacity.py | 12 | 18329 | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Send Openshift cluster capacity to MetricSender
'''
#
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
# pylint flaggs import errors, as the bot doesn't know out openshift-tools libs
#pylint: disable=import-error
import argparse
from openshift_tools.web.openshift_rest_api import OpenshiftRestApi
from openshift_tools.monitoring.metric_sender import MetricSender
import sqlite3
import yaml
from openshift_tools.conversions import to_bytes, to_milicores
class OpenshiftClusterCapacity(object):
''' Checks for cluster capacity '''
def __init__(self):
self.args = None
self.metric_sender = None
self.ora = None
self.sql_conn = None
self.zbx_key_prefix = "openshift.master.cluster.compute_nodes."
def run(self):
''' Main function to run the check '''
self.parse_args()
self.metric_sender = MetricSender(verbose=self.args.verbose,
debug=self.args.debug)
master_cfg = []
with open(self.args.master_config, 'r') as yml:
master_cfg = yaml.load(yml)
self.ora = OpenshiftRestApi(host=master_cfg['oauthConfig']['masterURL'],
verify_ssl=True)
self.cluster_capacity()
if not self.args.dry_run:
self.metric_sender.send_metrics()
def parse_args(self):
''' parse the args from the cli '''
parser = argparse.ArgumentParser(description='Cluster capacity sender')
parser.add_argument('--master-config',
default='/etc/origin/master/master-config.yaml',
help='Location of OpenShift master-config.yml file')
parser.add_argument('-v', '--verbose', action='store_true',
default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true',
default=None, help='Debug?')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Do not sent results to Zabbix')
self.args = parser.parse_args()
def load_nodes(self):
''' load nodes into SQL '''
self.sql_conn.execute('''CREATE TABLE nodes
(name text, type text, api text,
max_cpu integer, max_memory integer,
max_pods integer)''')
response = self.ora.get('/api/v1/nodes')
for new_node in response['items']:
# Skip nodes not in 'Ready' state
node_ready = False
for condition in new_node['status']['conditions']:
if condition['type'] == 'Ready' and \
condition['status'] == 'True':
node_ready = True
if not node_ready:
continue
# Skip unschedulable nodes
if new_node['spec'].get('unschedulable'):
continue
node = {}
node['name'] = new_node['metadata']['name']
node['type'] = new_node['metadata']['labels'].get('type', 'unknown')
node['api'] = new_node['metadata']['selfLink']
if 'allocatable' in new_node['status']:
cpu = new_node['status']['allocatable']['cpu']
mem = new_node['status']['allocatable']['memory']
node['max_pods'] = int(new_node['status']['allocatable']['pods'])
else:
cpu = new_node['status']['capacity']['cpu']
mem = new_node['status']['capacity']['memory']
node['max_pods'] = int(new_node['status']['capacity']['pods'])
node['max_cpu'] = to_milicores(cpu)
node['max_memory'] = to_bytes(mem)
if self.args.debug:
print "Adding node: {}".format(str(node))
self.sql_conn.execute('INSERT INTO nodes VALUES (?,?,?,?,?,?)',
(node['name'], node['type'], node['api'],
node['max_cpu'], node['max_memory'],
node['max_pods']))
@staticmethod
def load_container_limits(pod, containers):
''' process/store container limits data '''
for container in containers:
if 'limits' in container['resources']:
pod['cpu_limits'] = int(pod.get('cpu_limits', 0)) \
+ int(to_milicores(container['resources']['limits'].get('cpu', '0')))
pod['memory_limits'] = int(pod.get('memory_limits', 0)) \
+ int(to_bytes(container['resources']['limits'].get('memory', '0')))
if 'requests' in container['resources']:
pod['cpu_requests'] = int(pod.get('cpu_requests', 0)) \
+ int(to_milicores(container['resources']['requests'].get('cpu', '0')))
pod['memory_requests'] = int(pod.get('memory_requests', 0)) \
+ int(to_bytes(container['resources']['requests'].get('memory', '0')))
def load_pods(self):
''' put pod details into db '''
self.sql_conn.execute('''CREATE TABLE pods
(name text, namespace text, api text,
cpu_limits integer, cpu_requests integer,
memory_limits integer,
memory_requests integer, node text)''')
response = self.ora.get('/api/v1/pods')
for new_pod in response['items']:
if new_pod['status']['phase'] != 'Running':
continue
pod = {}
pod['name'] = new_pod['metadata']['name']
pod['namespace'] = new_pod['metadata']['namespace']
pod['api'] = new_pod['metadata']['selfLink']
pod['node'] = new_pod['spec']['nodeName']
self.load_container_limits(pod, new_pod['spec']['containers'])
self.sql_conn.execute('INSERT INTO pods VALUES (?,?,?,?,?,?,?,?)',
(pod['name'], pod['namespace'], pod['api'],
pod.get('cpu_limits'),
pod.get('cpu_requests'),
pod.get('memory_limits'),
pod.get('memory_requests'),
pod['node']))
def get_largest_pod(self):
''' return single largest memory request number for all running pods '''
max_pod = 0
for row in self.sql_conn.execute('''SELECT MAX(memory_requests)
FROM pods, nodes
WHERE pods.node=nodes.name
AND nodes.type="compute"'''):
max_pod = row[0]
return max_pod
def how_many_schedulable(self, node_size):
''' return how many pods with memory request 'node_size' can be scheduled '''
nodes = {}
# get max mem for each compute node
for row in self.sql_conn.execute('''SELECT nodes.name, nodes.max_memory
FROM nodes
WHERE nodes.type="compute"'''):
nodes[row[0]] = {'max_memory': row[1],
# set memory_allocated to '0' because node may have
# no pods running, and next SQL query below will
# leave this field unpopulated
'memory_scheduled': 0}
# get memory requests for all pods on all compute nodes
for row in self.sql_conn.execute('''SELECT nodes.name,
SUM(pods.memory_requests)
FROM pods, nodes
WHERE pods.node=nodes.name
AND nodes.type="compute"
GROUP BY nodes.name'''):
nodes[row[0]]['memory_scheduled'] = row[1]
schedulable = 0
for node in nodes.keys():
# TODO: Some containers from `oc get pods --all-namespaces -o json`
# don't have resources scheduled, causing memory_scheduled == 0
available = nodes[node]['max_memory'] - \
nodes[node]['memory_scheduled']
num = available / node_size
# ignore negative number (overcommitted nodes)
if num > 0:
schedulable += num
return schedulable
def get_compute_nodes_max_schedulable_cpu(self):
''' calculate total schedulable CPU (in milicores) for all compute nodes '''
max_cpu = 0
for row in self.sql_conn.execute('''SELECT SUM(nodes.max_cpu)
FROM nodes
WHERE nodes.type="compute" '''):
max_cpu = row[0]
return max_cpu
def get_compute_nodes_max_schedulable_mem(self):
''' calculate total schedulable memory for all compute nodes '''
max_mem = 0
for row in self.sql_conn.execute('''SELECT SUM(nodes.max_memory)
FROM nodes
WHERE nodes.type="compute" '''):
max_mem = row[0]
return max_mem
def get_compute_nodes_scheduled_cpu(self):
''' calculate cpu scheduled to pods
(total requested and percentage of cluster-wide total) '''
max_cpu = self.get_compute_nodes_max_schedulable_cpu()
cpu_requests_for_all_pods = 0
for row in self.sql_conn.execute('''SELECT SUM(pods.cpu_requests)
FROM pods, nodes
WHERE pods.node = nodes.name
AND nodes.type = "compute" '''):
cpu_requests_for_all_pods = row[0]
cpu_scheduled_as_pct = 100.0 * cpu_requests_for_all_pods / max_cpu
cpu_unscheduled = max_cpu - cpu_requests_for_all_pods
cpu_unscheduled_as_pct = 100.0 * cpu_unscheduled / max_cpu
return (cpu_requests_for_all_pods, cpu_scheduled_as_pct,
cpu_unscheduled, cpu_unscheduled_as_pct)
def get_compute_nodes_scheduled_mem(self):
''' calculate mem allocated to pods
(total requested and percentage of cluster-wide total) '''
max_mem = self.get_compute_nodes_max_schedulable_mem()
mem_requests_for_all_pods = 0
for row in self.sql_conn.execute('''SELECT SUM(pods.memory_requests)
FROM pods, nodes
WHERE pods.node = nodes.name
AND nodes.type = "compute" '''):
mem_requests_for_all_pods = row[0]
mem_scheduled_as_pct = 100.0 * mem_requests_for_all_pods / max_mem
mem_unscheduled = max_mem - mem_requests_for_all_pods
mem_unscheduled_as_pct = 100.0 * mem_unscheduled / max_mem
return (mem_requests_for_all_pods, mem_scheduled_as_pct, mem_unscheduled, mem_unscheduled_as_pct)
def get_oversub_cpu(self):
''' return percentage oversubscribed based on CPU limits on runing pods '''
max_cpu = self.get_compute_nodes_max_schedulable_cpu()
pod_cpu_limits = 0
# get cpu limits for all running pods
for row in self.sql_conn.execute('''SELECT SUM(pods.cpu_limits)
FROM pods, nodes
WHERE pods.node = nodes.name
AND nodes.type = "compute" '''):
pod_cpu_limits = row[0]
return ((float(pod_cpu_limits)/max_cpu) * 100.0) - 100
def get_oversub_mem(self):
''' return percentage oversubscribed based on memory limits on running pods '''
max_mem = self.get_compute_nodes_max_schedulable_mem()
pod_mem_limits = 0
# get mem limits for all running pods
for row in self.sql_conn.execute('''SELECT SUM(pods.memory_limits)
FROM pods, nodes
WHERE pods.node = nodes.name
AND nodes.type = "compute" '''):
pod_mem_limits = row[0]
return ((float(pod_mem_limits)/max_mem) * 100.0) - 100
def do_cpu_stats(self):
''' gather and report CPU statistics '''
# CPU items
zbx_key_max_schedulable_cpu = self.zbx_key_prefix + "max_schedulable.cpu"
zbx_key_scheduled_cpu = self.zbx_key_prefix + "scheduled.cpu"
zbx_key_scheduled_cpu_pct = self.zbx_key_prefix + "scheduled.cpu_pct"
zbx_key_unscheduled_cpu = self.zbx_key_prefix + "unscheduled.cpu"
zbx_key_unscheduled_cpu_pct = self.zbx_key_prefix + "unscheduled.cpu_pct"
zbx_key_oversub_cpu_pct = self.zbx_key_prefix + "oversubscribed.cpu_pct"
print "CPU Stats:"
max_schedulable_cpu = self.get_compute_nodes_max_schedulable_cpu()
self.metric_sender.add_metric({zbx_key_max_schedulable_cpu: max_schedulable_cpu})
scheduled_cpu, scheduled_cpu_pct, unscheduled_cpu, unscheduled_cpu_pct = self.get_compute_nodes_scheduled_cpu()
oversub_cpu_pct = self.get_oversub_cpu()
print " Scheduled CPU for compute nodes:\t\t\t" + \
"{:>15} milicores".format(scheduled_cpu)
print " Unscheduled CPU for compute nodes:\t\t\t" + \
"{:>15} milicores".format(unscheduled_cpu)
print " Maximum (total) schedulable CPU for compute " + \
"nodes:\t{:>15} milicores".format(max_schedulable_cpu)
print " Percent scheduled CPU for compute nodes:\t\t\t" + \
"{:.2f}%".format(scheduled_cpu_pct)
print " Percent unscheduled CPU for compute nodes:\t\t\t" + \
"{:.2f}%".format(unscheduled_cpu_pct)
print " Percent oversubscribed CPU for compute nodes: \t\t" + \
"{:.2f}%".format(oversub_cpu_pct)
self.metric_sender.add_metric({zbx_key_scheduled_cpu: scheduled_cpu})
self.metric_sender.add_metric({zbx_key_scheduled_cpu_pct: int(scheduled_cpu_pct)})
self.metric_sender.add_metric({zbx_key_unscheduled_cpu: unscheduled_cpu})
self.metric_sender.add_metric({zbx_key_unscheduled_cpu_pct: int(unscheduled_cpu_pct)})
self.metric_sender.add_metric({zbx_key_oversub_cpu_pct: int(oversub_cpu_pct)})
def do_mem_stats(self):
''' gather and report memory statistics '''
# Memory items
zbx_key_max_schedulable_mem = self.zbx_key_prefix + "max_schedulable.mem"
zbx_key_scheduled_mem = self.zbx_key_prefix + "scheduled.mem"
zbx_key_scheduled_mem_pct = self.zbx_key_prefix + "scheduled.mem_pct"
zbx_key_unscheduled_mem = self.zbx_key_prefix + "unscheduled.mem"
zbx_key_unscheduled_mem_pct = self.zbx_key_prefix + "unscheduled.mem_pct"
zbx_key_oversub_mem_pct = self.zbx_key_prefix + "oversubscribed.mem_pct"
print "\nMemory Stats:"
max_schedulable_mem = self.get_compute_nodes_max_schedulable_mem()
self.metric_sender.add_metric({zbx_key_max_schedulable_mem: max_schedulable_mem})
scheduled_mem, scheduled_mem_pct, unscheduled_mem, unscheduled_mem_pct = self.get_compute_nodes_scheduled_mem()
oversub_mem_pct = self.get_oversub_mem()
print " Scheduled mem for compute nodes:\t\t\t" + \
"{:>20} bytes".format(scheduled_mem)
print " Unscheduled mem for compute nodes:\t\t\t" + \
"{:>20} bytes".format(unscheduled_mem)
print " Maximum (total) schedulable memory for compute nodes:\t" + \
"{:>20} bytes".format(max_schedulable_mem)
print " Percent scheduled mem for compute nodes:\t\t\t" + \
"{:.2f}%".format(scheduled_mem_pct)
print " Percent unscheduled mem for compute nodes:\t\t\t" + \
"{:.2f}%".format(unscheduled_mem_pct)
print " Percent oversubscribed mem for compute nodes: \t\t" + \
"{:.2f}%".format(oversub_mem_pct)
self.metric_sender.add_metric({zbx_key_scheduled_mem: scheduled_mem})
self.metric_sender.add_metric({zbx_key_scheduled_mem_pct: int(scheduled_mem_pct)})
self.metric_sender.add_metric({zbx_key_unscheduled_mem: unscheduled_mem})
self.metric_sender.add_metric({zbx_key_unscheduled_mem_pct: int(unscheduled_mem_pct)})
self.metric_sender.add_metric({zbx_key_oversub_mem_pct: int(oversub_mem_pct)})
def cluster_capacity(self):
''' check capacity of compute nodes on cluster'''
# Other zabbix items
zbx_key_max_pods = "openshift.master.cluster.max_mem_pods_schedulable"
self.sql_conn = sqlite3.connect(':memory:')
self.load_nodes()
self.load_pods()
self.do_cpu_stats()
self.do_mem_stats()
print "\nOther stats:"
largest = self.get_largest_pod()
if self.args.debug:
print " Largest memory pod: {}".format(largest)
schedulable = self.how_many_schedulable(largest)
print " Number of max-size nodes schedulable:\t\t\t\t{}".format(schedulable)
self.metric_sender.add_metric({zbx_key_max_pods: schedulable})
if __name__ == '__main__':
OCC = OpenshiftClusterCapacity()
OCC.run()
| apache-2.0 |
PsychoTV/PsychoTeam.repository | plugin.video.SportsDevil/lib/utils/pyDes.py | 54 | 32294 | #############################################################################
# Documentation #
#############################################################################
# Author: Todd Whiteman
# Date: 16th March, 2009
# Verion: 2.0.0
# License: Public Domain - free to do as you wish
# Homepage: http://twhiteman.netfirms.com/des.html
#
# This is a pure python implementation of the DES encryption algorithm.
# It's pure python to avoid portability issues, since most DES
# implementations are programmed in C (for performance reasons).
#
# Triple DES class is also implemented, utilising the DES base. Triple DES
# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key.
#
# See the README.txt that should come with this python module for the
# implementation methods used.
#
# Thanks to:
# * David Broadwell for ideas, comments and suggestions.
# * Mario Wolff for pointing out and debugging some triple des CBC errors.
# * Santiago Palladino for providing the PKCS5 padding technique.
# * Shaya for correcting the PAD_PKCS5 triple des CBC errors.
#
"""A pure python implementation of the DES and TRIPLE DES encryption algorithms.
Class initialization
--------------------
pyDes.des(key, [mode], [IV], [pad], [padmode])
pyDes.triple_des(key, [mode], [IV], [pad], [padmode])
key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes
for Triple DES
mode -> Optional argument for encryption type, can be either
pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Length must be 8 bytes.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use during
all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5)
to use during all encrypt/decrpt operations done with this instance.
I recommend to use PAD_PKCS5 padding, as then you never need to worry about any
padding issues, as the padding can be removed unambiguously upon decrypting
data that was encrypted using PAD_PKCS5 padmode.
Common methods
--------------
encrypt(data, [pad], [padmode])
decrypt(data, [pad], [padmode])
data -> Bytes to be encrypted/decrypted
pad -> Optional argument. Only when using padmode of PAD_NORMAL. For
encryption, adds this characters to the end of the data block when
data is not a multiple of 8 bytes. For decryption, will remove the
trailing characters that match this pad character from the last 8
bytes of the unencrypted data block.
padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL
or PAD_PKCS5). Defaults to PAD_NORMAL.
Example
-------
from pyDes import *
data = "Please encrypt my data"
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
print "Encrypted: %r" % d
print "Decrypted: %r" % k.decrypt(d)
assert k.decrypt(d, padmode=PAD_PKCS5) == data
See the module source (pyDes.py) for more examples of use.
You can also run the pyDes.py file without and arguments to see a simple test.
Note: This code was not written for high-end systems needing a fast
implementation, but rather a handy portable solution with small usage.
"""
import sys
# _pythonMajorVersion is used to handle Python2 and Python3 differences.
_pythonMajorVersion = sys.version_info[0]
# Modes of crypting / cyphering
ECB = 0
CBC = 1
# Modes of padding
PAD_NORMAL = 1
PAD_PKCS5 = 2
# PAD_PKCS5: is a method that will unambiguously remove all padding
# characters after decryption, when originally encrypted with
# this padding mode.
# For a good description of the PKCS5 padding technique, see:
# http://www.faqs.org/rfcs/rfc1423.html
# The base class shared by des and triple des.
class _baseDes(object):
def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
if IV:
IV = self._guardAgainstUnicode(IV)
if pad:
pad = self._guardAgainstUnicode(pad)
self.block_size = 8
# Sanity checking of arguments.
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if IV and len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
# Set the passed in variables
self._mode = mode
self._iv = IV
self._padding = pad
self._padmode = padmode
def getKey(self):
"""getKey() -> bytes"""
return self.__key
def setKey(self, key):
"""Will set the crypting key for this object."""
key = self._guardAgainstUnicode(key)
self.__key = key
def getMode(self):
"""getMode() -> pyDes.ECB or pyDes.CBC"""
return self._mode
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
self._mode = mode
def getPadding(self):
"""getPadding() -> bytes of length 1. Padding character."""
return self._padding
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
def getPadMode(self):
"""getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
return self._padmode
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
self._padmode = mode
def getIV(self):
"""getIV() -> bytes"""
return self._iv
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
if not IV or len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
IV = self._guardAgainstUnicode(IV)
self._iv = IV
def _padData(self, data, pad, padmode):
# Pad data depending on the mode
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode == PAD_NORMAL:
if len(data) % self.block_size == 0:
# No padding required.
return data
if not pad:
# Get the default padding.
pad = self.getPadding()
if not pad:
raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.")
data += (self.block_size - (len(data) % self.block_size)) * pad
elif padmode == PAD_PKCS5:
pad_len = 8 - (len(data) % self.block_size)
if _pythonMajorVersion < 3:
data += pad_len * chr(pad_len)
else:
data += bytes([pad_len] * pad_len)
return data
def _unpadData(self, data, pad, padmode):
# Unpad data depending on the mode.
if not data:
return data
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if padmode == PAD_NORMAL:
if not pad:
# Get the default padding.
pad = self.getPadding()
if pad:
data = data[:-self.block_size] + \
data[-self.block_size:].rstrip(pad)
elif padmode == PAD_PKCS5:
if _pythonMajorVersion < 3:
pad_len = ord(data[-1])
else:
pad_len = data[-1]
data = data[:-pad_len]
return data
def _guardAgainstUnicode(self, data):
# Only accept byte strings or ascii unicode values, otherwise
# there is no way to correctly decode the data into bytes.
if _pythonMajorVersion < 3:
if isinstance(data, unicode):
raise ValueError("pyDes can only work with bytes, not Unicode strings.")
else:
if isinstance(data, str):
# Only accept ascii unicode values.
try:
return data.encode('ascii')
except UnicodeEncodeError:
pass
raise ValueError("pyDes can only work with encoded strings, not Unicode.")
return data
#############################################################################
# DES #
#############################################################################
class des(_baseDes):
"""DES encryption/decrytpion class
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key,[mode], [IV])
key -> Bytes containing the encryption key, must be exactly 8 bytes
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
# Permutation and translation tables for DES
__pc1 = [56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# number left rotations of pc1
__left_rotations = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# permuted choice key (table 2)
__pc2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# initial permutation IP
__ip = [57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table for turning 32 bit blocks into 48 bits
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# The (in)famous S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# 32-bit permutation function P used on the output of the S-boxes
__p = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23,13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# final permutation IP^-1
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Type of crypting being done
ENCRYPT = 0x00
DECRYPT = 0x01
# Initialisation
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
# Sanity checking of arguments.
if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
_baseDes.__init__(self, mode, IV, pad, padmode)
self.key_size = 8
self.L = []
self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = []
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Must be 8 bytes."""
_baseDes.setKey(self, key)
self.__create_sub_keys()
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if _pythonMajorVersion < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if _pythonMajorVersion < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
# Transform the secret key, so that it is ready for data processing
# Create the 16 subkeys, K[1] - K[16]
def __create_sub_keys(self):
"""Create the 16 subkeys K[1] to K[16] from the given key"""
key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey()))
i = 0
# Split into Left and Right sections
self.L = key[:28]
self.R = key[28:]
while i < 16:
j = 0
# Perform circular left shifts
while j < des.__left_rotations[i]:
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
j += 1
# Create one of the 16 subkeys through pc2 permutation
self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R)
i += 1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = list(map(lambda x, y: x ^ y, block, iv))
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = list(map(lambda x, y: x ^ y, processed_block, iv))
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadMode(mode)
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
_baseDes.setIV(self, IV)
for key in (self.__key1, self.__key2, self.__key3):
key.setIV(IV)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
# Pad the data accordingly.
data = self._padData(data, pad, padmode)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
block = self.__key1.crypt(data[i:i+8], ENCRYPT)
block = self.__key2.crypt(block, DECRYPT)
block = self.__key3.crypt(block, ENCRYPT)
self.__key1.setIV(block)
self.__key2.setIV(block)
self.__key3.setIV(block)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
else:
data = self.__key1.crypt(data, ENCRYPT)
data = self.__key2.crypt(data, DECRYPT)
return self.__key3.crypt(data, ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode)
| gpl-2.0 |
vmahuli/tempest | tempest/api/identity/admin/test_services.py | 2 | 4564 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import moves
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ServicesTestJSON(base.BaseIdentityV2AdminTest):
_interface = 'json'
def _del_service(self, service_id):
# Deleting the service created in this method
resp, _ = self.client.delete_service(service_id)
self.assertEqual(204, resp.status)
# Checking whether service is deleted successfully
self.assertRaises(exceptions.NotFound, self.client.get_service,
service_id)
@test.attr(type='smoke')
def test_create_get_delete_service(self):
# GET Service
# Creating a Service
name = data_utils.rand_name('service-')
type = data_utils.rand_name('type--')
description = data_utils.rand_name('description-')
resp, service_data = self.client.create_service(
name, type, description=description)
self.assertFalse(service_data['id'] is None)
self.addCleanup(self._del_service, service_data['id'])
self.assertEqual(200, resp.status)
# Verifying response body of create service
self.assertIn('id', service_data)
self.assertIn('name', service_data)
self.assertEqual(name, service_data['name'])
self.assertIn('type', service_data)
self.assertEqual(type, service_data['type'])
self.assertIn('description', service_data)
self.assertEqual(description, service_data['description'])
# Get service
resp, fetched_service = self.client.get_service(service_data['id'])
self.assertEqual(200, resp.status)
# verifying the existence of service created
self.assertIn('id', fetched_service)
self.assertEqual(fetched_service['id'], service_data['id'])
self.assertIn('name', fetched_service)
self.assertEqual(fetched_service['name'], service_data['name'])
self.assertIn('type', fetched_service)
self.assertEqual(fetched_service['type'], service_data['type'])
self.assertIn('description', fetched_service)
self.assertEqual(fetched_service['description'],
service_data['description'])
@test.attr(type='gate')
def test_create_service_without_description(self):
# Create a service only with name and type
name = data_utils.rand_name('service-')
type = data_utils.rand_name('type--')
resp, service = self.client.create_service(name, type)
self.assertIn('id', service)
self.assertTrue('200', resp['status'])
self.addCleanup(self._del_service, service['id'])
self.assertIn('name', service)
self.assertEqual(name, service['name'])
self.assertIn('type', service)
self.assertEqual(type, service['type'])
@test.attr(type='smoke')
def test_list_services(self):
# Create, List, Verify and Delete Services
services = []
for _ in moves.xrange(3):
name = data_utils.rand_name('service-')
type = data_utils.rand_name('type--')
description = data_utils.rand_name('description-')
resp, service = self.client.create_service(
name, type, description=description)
services.append(service)
service_ids = map(lambda x: x['id'], services)
def delete_services():
for service_id in service_ids:
self.client.delete_service(service_id)
self.addCleanup(delete_services)
# List and Verify Services
resp, body = self.client.list_services()
self.assertEqual(200, resp.status)
found = [service for service in body if service['id'] in service_ids]
self.assertEqual(len(found), len(services), 'Services not found')
class ServicesTestXML(ServicesTestJSON):
_interface = 'xml'
| apache-2.0 |
stormpath/stormpath-django | django_stormpath/urls.py | 2 | 2179 | from django.conf.urls import url
from django.conf import settings
from django_stormpath import views
urlpatterns = [
url(r'^login/$', views.stormpath_id_site_login, name='stormpath_id_site_login'),
url(r'^logout/$', views.stormpath_id_site_logout, name='stormpath_id_site_logout'),
url(r'^register/$', views.stormpath_id_site_register, name='stormpath_id_site_register'),
url(r'^forgot-password/$', views.stormpath_id_site_forgot_password, name='stormpath_id_site_forgot_password'),
url(r'^handle-callback/(?P<provider>stormpath)', views.stormpath_callback, name='stormpath_id_site_callback'),
]
if getattr(settings, 'STORMPATH_ENABLE_GOOGLE', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>google)', views.stormpath_callback,
name='stormpath_google_login_callback'),
url(r'^social-login/(?P<provider>google)/', views.stormpath_social_login,
name='stormpath_google_social_login'),
]
if getattr(settings, 'STORMPATH_ENABLE_FACEBOOK', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>facebook)', views.stormpath_callback,
name='stormpath_facebook_login_callback'),
url(r'^social-login/(?P<provider>facebook)/', views.stormpath_social_login,
name='stormpath_facebook_social_login'),
]
if getattr(settings, 'STORMPATH_ENABLE_GITHUB', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>github)', views.stormpath_callback,
name='stormpath_github_login_callback'),
url(r'^social-login/(?P<provider>github)/', views.stormpath_social_login,
name='stormpath_github_social_login'),
]
if getattr(settings, 'STORMPATH_ENABLE_LINKEDIN', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>linkedin)', views.stormpath_callback,
name='stormpath_linkedin_login_callback'),
url(r'^social-login/(?P<provider>linkedin)/', views.stormpath_social_login,
name='stormpath_linkedin_social_login'),
]
if django.VERSION[:2] < (1, 8):
from django.conf.urls import patterns
urlpatterns = patterns('django_stormpath.views', *urlpatterns)
| apache-2.0 |
0xCCD/mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/mozrunner/__init__.py | 36 | 27264 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import copy
import tempfile
import signal
import commands
import zipfile
import optparse
import killableprocess
import subprocess
import platform
import shutil
from StringIO import StringIO
from xml.dom import minidom
from distutils import dir_util
from time import sleep
# conditional (version-dependent) imports
try:
import simplejson
except ImportError:
import json as simplejson
import logging
logger = logging.getLogger(__name__)
# Use dir_util for copy/rm operations because shutil is all kinds of broken
copytree = dir_util.copy_tree
rmtree = dir_util.remove_tree
def findInPath(fileName, path=os.environ['PATH']):
dirs = path.split(os.pathsep)
for dir in dirs:
if os.path.isfile(os.path.join(dir, fileName)):
return os.path.join(dir, fileName)
if os.name == 'nt' or sys.platform == 'cygwin':
if os.path.isfile(os.path.join(dir, fileName + ".exe")):
return os.path.join(dir, fileName + ".exe")
return None
stdout = sys.stdout
stderr = sys.stderr
stdin = sys.stdin
def run_command(cmd, env=None, **kwargs):
"""Run the given command in killable process."""
killable_kwargs = {'stdout':stdout ,'stderr':stderr, 'stdin':stdin}
killable_kwargs.update(kwargs)
if sys.platform != "win32":
return killableprocess.Popen(cmd, preexec_fn=lambda : os.setpgid(0, 0),
env=env, **killable_kwargs)
else:
return killableprocess.Popen(cmd, env=env, **killable_kwargs)
def getoutput(l):
tmp = tempfile.mktemp()
x = open(tmp, 'w')
subprocess.call(l, stdout=x, stderr=x)
x.close(); x = open(tmp, 'r')
r = x.read() ; x.close()
os.remove(tmp)
return r
def get_pids(name, minimun_pid=0):
"""Get all the pids matching name, exclude any pids below minimum_pid."""
if os.name == 'nt' or sys.platform == 'cygwin':
import wpk
pids = wpk.get_pids(name)
else:
data = getoutput(['ps', 'ax']).splitlines()
pids = [int(line.split()[0]) for line in data if line.find(name) is not -1]
matching_pids = [m for m in pids if m > minimun_pid]
return matching_pids
def makedirs(name):
head, tail = os.path.split(name)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
try:
makedirs(head)
except OSError, e:
pass
if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
os.mkdir(name)
except:
pass
# addon_details() copied from mozprofile
def addon_details(install_rdf_fh):
"""
returns a dictionary of details about the addon
- addon_path : path to the addon directory
Returns:
{'id': u'rainbow@colors.org', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
doc = minidom.parse(install_rdf_fh)
# Get the namespaces abbreviations
em = get_namespace_id(doc, "http://www.mozilla.org/2004/em-rdf#")
rdf = get_namespace_id(doc, "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
description = doc.getElementsByTagName(rdf + "Description").item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({ entry: get_text(node) })
# turn unpack into a true/false value
if isinstance(details['unpack'], basestring):
details['unpack'] = details['unpack'].lower() == 'true'
return details
class Profile(object):
"""Handles all operations regarding profile. Created new profiles, installs extensions,
sets preferences and handles cleanup."""
def __init__(self, binary=None, profile=None, addons=None,
preferences=None):
self.binary = binary
self.create_new = not(bool(profile))
if profile:
self.profile = profile
else:
self.profile = self.create_new_profile(self.binary)
self.addons_installed = []
self.addons = addons or []
### set preferences from class preferences
preferences = preferences or {}
if hasattr(self.__class__, 'preferences'):
self.preferences = self.__class__.preferences.copy()
else:
self.preferences = {}
self.preferences.update(preferences)
for addon in self.addons:
self.install_addon(addon)
self.set_preferences(self.preferences)
def create_new_profile(self, binary):
"""Create a new clean profile in tmp which is a simple empty folder"""
profile = tempfile.mkdtemp(suffix='.mozrunner')
return profile
def unpack_addon(self, xpi_zipfile, addon_path):
for name in xpi_zipfile.namelist():
if name.endswith('/'):
makedirs(os.path.join(addon_path, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(addon_path, name))):
makedirs(os.path.dirname(os.path.join(addon_path, name)))
data = xpi_zipfile.read(name)
f = open(os.path.join(addon_path, name), 'wb')
f.write(data) ; f.close()
zi = xpi_zipfile.getinfo(name)
os.chmod(os.path.join(addon_path,name), (zi.external_attr>>16))
def install_addon(self, path):
"""Installs the given addon or directory of addons in the profile."""
extensions_path = os.path.join(self.profile, 'extensions')
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
addons = [path]
if not path.endswith('.xpi') and not os.path.exists(os.path.join(path, 'install.rdf')):
addons = [os.path.join(path, x) for x in os.listdir(path)]
for addon in addons:
if addon.endswith('.xpi'):
xpi_zipfile = zipfile.ZipFile(addon, "r")
details = addon_details(StringIO(xpi_zipfile.read('install.rdf')))
addon_path = os.path.join(extensions_path, details["id"])
if details.get("unpack", True):
self.unpack_addon(xpi_zipfile, addon_path)
self.addons_installed.append(addon_path)
else:
shutil.copy(addon, addon_path + '.xpi')
else:
# it's already unpacked, but we need to extract the id so we
# can copy it
details = addon_details(open(os.path.join(addon, "install.rdf"), "rb"))
addon_path = os.path.join(extensions_path, details["id"])
shutil.copytree(addon, addon_path, symlinks=True)
def set_preferences(self, preferences):
"""Adds preferences dict to profile preferences"""
prefs_file = os.path.join(self.profile, 'user.js')
# Ensure that the file exists first otherwise create an empty file
if os.path.isfile(prefs_file):
f = open(prefs_file, 'a+')
else:
f = open(prefs_file, 'w')
f.write('\n#MozRunner Prefs Start\n')
pref_lines = ['user_pref(%s, %s);' %
(simplejson.dumps(k), simplejson.dumps(v) ) for k, v in
preferences.items()]
for line in pref_lines:
f.write(line+'\n')
f.write('#MozRunner Prefs End\n')
f.flush() ; f.close()
def pop_preferences(self):
"""
pop the last set of preferences added
returns True if popped
"""
# our magic markers
delimeters = ('#MozRunner Prefs Start', '#MozRunner Prefs End')
lines = file(os.path.join(self.profile, 'user.js')).read().splitlines()
def last_index(_list, value):
"""
returns the last index of an item;
this should actually be part of python code but it isn't
"""
for index in reversed(range(len(_list))):
if _list[index] == value:
return index
s = last_index(lines, delimeters[0])
e = last_index(lines, delimeters[1])
# ensure both markers are found
if s is None:
assert e is None, '%s found without %s' % (delimeters[1], delimeters[0])
return False # no preferences found
elif e is None:
assert e is None, '%s found without %s' % (delimeters[0], delimeters[1])
# ensure the markers are in the proper order
assert e > s, '%s found at %s, while %s found at %s' (delimeter[1], e, delimeter[0], s)
# write the prefs
cleaned_prefs = '\n'.join(lines[:s] + lines[e+1:])
f = file(os.path.join(self.profile, 'user.js'), 'w')
f.write(cleaned_prefs)
f.close()
return True
def clean_preferences(self):
"""Removed preferences added by mozrunner."""
while True:
if not self.pop_preferences():
break
def clean_addons(self):
"""Cleans up addons in the profile."""
for addon in self.addons_installed:
if os.path.isdir(addon):
rmtree(addon)
def cleanup(self):
"""Cleanup operations on the profile."""
def oncleanup_error(function, path, excinfo):
#TODO: How should we handle this?
print "Error Cleaning up: " + str(excinfo[1])
if self.create_new:
shutil.rmtree(self.profile, False, oncleanup_error)
else:
self.clean_preferences()
self.clean_addons()
class FirefoxProfile(Profile):
"""Specialized Profile subclass for Firefox"""
preferences = {# Don't automatically update the application
'app.update.enabled' : False,
# Don't restore the last open set of tabs if the browser has crashed
'browser.sessionstore.resume_from_crash': False,
# Don't check for the default web browser
'browser.shell.checkDefaultBrowser' : False,
# Don't warn on exit when multiple tabs are open
'browser.tabs.warnOnClose' : False,
# Don't warn when exiting the browser
'browser.warnOnQuit': False,
# Only install add-ons from the profile and the app folder
'extensions.enabledScopes' : 5,
# Don't automatically update add-ons
'extensions.update.enabled' : False,
# Don't open a dialog to show available add-on updates
'extensions.update.notifyUser' : False,
}
# The possible names of application bundles on Mac OS X, in order of
# preference from most to least preferred.
# Note: Nightly is obsolete, as it has been renamed to FirefoxNightly,
# but it will still be present if users update an older nightly build
# via the app update service.
bundle_names = ['Firefox', 'FirefoxNightly', 'Nightly']
# The possible names of binaries, in order of preference from most to least
# preferred.
@property
def names(self):
if sys.platform == 'darwin':
return ['firefox', 'nightly', 'shiretoko']
if (sys.platform == 'linux2') or (sys.platform in ('sunos5', 'solaris')):
return ['firefox', 'mozilla-firefox', 'iceweasel']
if os.name == 'nt' or sys.platform == 'cygwin':
return ['firefox']
class ThunderbirdProfile(Profile):
preferences = {'extensions.update.enabled' : False,
'extensions.update.notifyUser' : False,
'browser.shell.checkDefaultBrowser' : False,
'browser.tabs.warnOnClose' : False,
'browser.warnOnQuit': False,
'browser.sessionstore.resume_from_crash': False,
}
# The possible names of application bundles on Mac OS X, in order of
# preference from most to least preferred.
bundle_names = ["Thunderbird", "Shredder"]
# The possible names of binaries, in order of preference from most to least
# preferred.
names = ["thunderbird", "shredder"]
class Runner(object):
"""Handles all running operations. Finds bins, runs and kills the process."""
def __init__(self, binary=None, profile=None, cmdargs=[], env=None,
kp_kwargs={}):
if binary is None:
self.binary = self.find_binary()
elif sys.platform == 'darwin' and binary.find('Contents/MacOS/') == -1:
self.binary = os.path.join(binary, 'Contents/MacOS/%s-bin' % self.names[0])
else:
self.binary = binary
if not os.path.exists(self.binary):
raise Exception("Binary path does not exist "+self.binary)
if sys.platform == 'linux2' and self.binary.endswith('-bin'):
dirname = os.path.dirname(self.binary)
if os.environ.get('LD_LIBRARY_PATH', None):
os.environ['LD_LIBRARY_PATH'] = '%s:%s' % (os.environ['LD_LIBRARY_PATH'], dirname)
else:
os.environ['LD_LIBRARY_PATH'] = dirname
# Disable the crash reporter by default
os.environ['MOZ_CRASHREPORTER_NO_REPORT'] = '1'
self.profile = profile
self.cmdargs = cmdargs
if env is None:
self.env = copy.copy(os.environ)
self.env.update({'MOZ_NO_REMOTE':"1",})
else:
self.env = env
self.kp_kwargs = kp_kwargs or {}
def find_binary(self):
"""Finds the binary for self.names if one was not provided."""
binary = None
if sys.platform in ('linux2', 'sunos5', 'solaris') \
or sys.platform.startswith('freebsd'):
for name in reversed(self.names):
binary = findInPath(name)
elif os.name == 'nt' or sys.platform == 'cygwin':
# find the default executable from the windows registry
try:
import _winreg
except ImportError:
pass
else:
sam_flags = [0]
# KEY_WOW64_32KEY etc only appeared in 2.6+, but that's OK as
# only 2.6+ has functioning 64bit builds.
if hasattr(_winreg, "KEY_WOW64_32KEY"):
if "64 bit" in sys.version:
# a 64bit Python should also look in the 32bit registry
sam_flags.append(_winreg.KEY_WOW64_32KEY)
else:
# possibly a 32bit Python on 64bit Windows, so look in
# the 64bit registry incase there is a 64bit app.
sam_flags.append(_winreg.KEY_WOW64_64KEY)
for sam_flag in sam_flags:
try:
# assumes self.app_name is defined, as it should be for
# implementors
keyname = r"Software\Mozilla\Mozilla %s" % self.app_name
sam = _winreg.KEY_READ | sam_flag
app_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keyname, 0, sam)
version, _type = _winreg.QueryValueEx(app_key, "CurrentVersion")
version_key = _winreg.OpenKey(app_key, version + r"\Main")
path, _ = _winreg.QueryValueEx(version_key, "PathToExe")
return path
except _winreg.error:
pass
# search for the binary in the path
for name in reversed(self.names):
binary = findInPath(name)
if sys.platform == 'cygwin':
program_files = os.environ['PROGRAMFILES']
else:
program_files = os.environ['ProgramFiles']
if binary is None:
for bin in [(program_files, 'Mozilla Firefox', 'firefox.exe'),
(os.environ.get("ProgramFiles(x86)"),'Mozilla Firefox', 'firefox.exe'),
(program_files, 'Nightly', 'firefox.exe'),
(os.environ.get("ProgramFiles(x86)"),'Nightly', 'firefox.exe'),
(program_files, 'Aurora', 'firefox.exe'),
(os.environ.get("ProgramFiles(x86)"),'Aurora', 'firefox.exe')
]:
path = os.path.join(*bin)
if os.path.isfile(path):
binary = path
break
elif sys.platform == 'darwin':
for bundle_name in self.bundle_names:
# Look for the application bundle in the user's home directory
# or the system-wide /Applications directory. If we don't find
# it in one of those locations, we move on to the next possible
# bundle name.
appdir = os.path.join("~/Applications/%s.app" % bundle_name)
if not os.path.isdir(appdir):
appdir = "/Applications/%s.app" % bundle_name
if not os.path.isdir(appdir):
continue
# Look for a binary with any of the possible binary names
# inside the application bundle.
for binname in self.names:
binpath = os.path.join(appdir,
"Contents/MacOS/%s-bin" % binname)
if (os.path.isfile(binpath)):
binary = binpath
break
if binary:
break
if binary is None:
raise Exception('Mozrunner could not locate your binary, you will need to set it.')
return binary
@property
def command(self):
"""Returns the command list to run."""
cmd = [self.binary, '-profile', self.profile.profile]
# On i386 OS X machines, i386+x86_64 universal binaries need to be told
# to run as i386 binaries. If we're not running a i386+x86_64 universal
# binary, then this command modification is harmless.
if sys.platform == 'darwin':
if hasattr(platform, 'architecture') and platform.architecture()[0] == '32bit':
cmd = ['arch', '-i386'] + cmd
return cmd
def get_repositoryInfo(self):
"""Read repository information from application.ini and platform.ini."""
import ConfigParser
config = ConfigParser.RawConfigParser()
dirname = os.path.dirname(self.binary)
repository = { }
for entry in [['application', 'App'], ['platform', 'Build']]:
(file, section) = entry
config.read(os.path.join(dirname, '%s.ini' % file))
for entry in [['SourceRepository', 'repository'], ['SourceStamp', 'changeset']]:
(key, id) = entry
try:
repository['%s_%s' % (file, id)] = config.get(section, key);
except:
repository['%s_%s' % (file, id)] = None
return repository
def start(self):
"""Run self.command in the proper environment."""
if self.profile is None:
self.profile = self.profile_class()
self.process_handler = run_command(self.command+self.cmdargs, self.env, **self.kp_kwargs)
def wait(self, timeout=None):
"""Wait for the browser to exit."""
self.process_handler.wait(timeout=timeout)
if sys.platform != 'win32':
for name in self.names:
for pid in get_pids(name, self.process_handler.pid):
self.process_handler.pid = pid
self.process_handler.wait(timeout=timeout)
def kill(self, kill_signal=signal.SIGTERM):
"""Kill the browser"""
if sys.platform != 'win32':
self.process_handler.kill()
for name in self.names:
for pid in get_pids(name, self.process_handler.pid):
self.process_handler.pid = pid
self.process_handler.kill()
else:
try:
self.process_handler.kill(group=True)
# On windows, it sometimes behooves one to wait for dust to settle
# after killing processes. Let's try that.
# TODO: Bug 640047 is invesitgating the correct way to handle this case
self.process_handler.wait(timeout=10)
except Exception, e:
logger.error('Cannot kill process, '+type(e).__name__+' '+e.message)
def stop(self):
self.kill()
class FirefoxRunner(Runner):
"""Specialized Runner subclass for running Firefox."""
app_name = 'Firefox'
profile_class = FirefoxProfile
# The possible names of application bundles on Mac OS X, in order of
# preference from most to least preferred.
# Note: Nightly is obsolete, as it has been renamed to FirefoxNightly,
# but it will still be present if users update an older nightly build
# only via the app update service.
bundle_names = ['Firefox', 'FirefoxNightly', 'Nightly']
@property
def names(self):
if sys.platform == 'darwin':
return ['firefox', 'nightly', 'shiretoko']
if sys.platform in ('linux2', 'sunos5', 'solaris') \
or sys.platform.startswith('freebsd'):
return ['firefox', 'mozilla-firefox', 'iceweasel']
if os.name == 'nt' or sys.platform == 'cygwin':
return ['firefox']
class ThunderbirdRunner(Runner):
"""Specialized Runner subclass for running Thunderbird"""
app_name = 'Thunderbird'
profile_class = ThunderbirdProfile
# The possible names of application bundles on Mac OS X, in order of
# preference from most to least preferred.
bundle_names = ["Thunderbird", "Shredder"]
# The possible names of binaries, in order of preference from most to least
# preferred.
names = ["thunderbird", "shredder"]
class CLI(object):
"""Command line interface."""
runner_class = FirefoxRunner
profile_class = FirefoxProfile
module = "mozrunner"
parser_options = {("-b", "--binary",): dict(dest="binary", help="Binary path.",
metavar=None, default=None),
('-p', "--profile",): dict(dest="profile", help="Profile path.",
metavar=None, default=None),
('-a', "--addons",): dict(dest="addons",
help="Addons paths to install.",
metavar=None, default=None),
("--info",): dict(dest="info", default=False,
action="store_true",
help="Print module information")
}
def __init__(self):
""" Setup command line parser and parse arguments """
self.metadata = self.get_metadata_from_egg()
self.parser = optparse.OptionParser(version="%prog " + self.metadata["Version"])
for names, opts in self.parser_options.items():
self.parser.add_option(*names, **opts)
(self.options, self.args) = self.parser.parse_args()
if self.options.info:
self.print_metadata()
sys.exit(0)
# XXX should use action='append' instead of rolling our own
try:
self.addons = self.options.addons.split(',')
except:
self.addons = []
def get_metadata_from_egg(self):
import pkg_resources
ret = {}
dist = pkg_resources.get_distribution(self.module)
if dist.has_metadata("PKG-INFO"):
for line in dist.get_metadata_lines("PKG-INFO"):
key, value = line.split(':', 1)
ret[key] = value
if dist.has_metadata("requires.txt"):
ret["Dependencies"] = "\n" + dist.get_metadata("requires.txt")
return ret
def print_metadata(self, data=("Name", "Version", "Summary", "Home-page",
"Author", "Author-email", "License", "Platform", "Dependencies")):
for key in data:
if key in self.metadata:
print key + ": " + self.metadata[key]
def create_runner(self):
""" Get the runner object """
runner = self.get_runner(binary=self.options.binary)
profile = self.get_profile(binary=runner.binary,
profile=self.options.profile,
addons=self.addons)
runner.profile = profile
return runner
def get_runner(self, binary=None, profile=None):
"""Returns the runner instance for the given command line binary argument
the profile instance returned from self.get_profile()."""
return self.runner_class(binary, profile)
def get_profile(self, binary=None, profile=None, addons=None, preferences=None):
"""Returns the profile instance for the given command line arguments."""
addons = addons or []
preferences = preferences or {}
return self.profile_class(binary, profile, addons, preferences)
def run(self):
runner = self.create_runner()
self.start(runner)
runner.profile.cleanup()
def start(self, runner):
"""Starts the runner and waits for Firefox to exitor Keyboard Interrupt.
Shoule be overwritten to provide custom running of the runner instance."""
runner.start()
print 'Started:', ' '.join(runner.command)
try:
runner.wait()
except KeyboardInterrupt:
runner.stop()
def cli():
CLI().run()
| gpl-3.0 |
rrussell39/selenium | py/selenium/webdriver/support/event_firing_webdriver.py | 71 | 13011 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .abstract_event_listener import AbstractEventListener
def _wrap_elements(result, ef_driver):
if isinstance(result, WebElement):
return EventFiringWebElement(result, ef_driver)
elif isinstance(result, list):
return [_wrap_elements(item, ef_driver) for item in result]
else:
return result
class EventFiringWebDriver(object):
"""
A wrapper around an arbitrary WebDriver instance which supports firing events
"""
def __init__(self, driver, event_listener):
"""
Creates a new instance of the EventFiringWebDriver
:Args:
- driver : A WebDriver instance
- event_listener : Instance of a class that subclasses AbstractEventListener and implements it fully or partially
Example:
.. code-block:: python
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
print("Before navigate to %s" % url)
def after_navigate_to(self, url, driver):
print("After navigate to %s" % url)
driver = Firefox()
ef_driver = EventFiringWebDriver(driver, MyListener())
ef_driver.get("http://www.google.co.in/")
"""
if not isinstance(driver, WebDriver):
raise WebDriverException("A WebDriver instance must be supplied")
if not isinstance(event_listener, AbstractEventListener):
raise WebDriverException("Event listener must be a subclass of AbstractEventListener")
self._driver = driver
self._driver._wrap_value = self._wrap_value
self._listener = event_listener
@property
def wrapped_driver(self):
"""Returns the WebDriver instance wrapped by this EventsFiringWebDriver"""
return self._driver
def get(self, url):
self._dispatch("navigate_to", (url, self._driver), "get", (url, ))
def back(self):
self._dispatch("navigate_back", (self._driver,), "back", ())
def forward(self):
self._dispatch("navigate_forward", (self._driver,), "forward", ())
def execute_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_script", unwrapped_args)
def execute_async_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_async_script", unwrapped_args)
def close(self):
self._dispatch("close", (self._driver,), "close", ())
def quit(self):
self._dispatch("quit", (self._driver,), "quit", ())
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._driver, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self)
def _unwrap_element_args(self, args):
if isinstance(args, EventFiringWebElement):
return args.wrapped_element
elif isinstance(args, tuple):
return tuple([self._unwrap_element_args(item) for item in args])
elif isinstance(args, list):
return [self._unwrap_element_args(item) for item in args]
else:
return args
def _wrap_value(self, value):
if isinstance(value, EventFiringWebElement):
return WebDriver._wrap_value(self._driver, value.wrapped_element)
return WebDriver._wrap_value(self._driver, value)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._driver, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._driver, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._driver, name):
try:
attrib = getattr(self._driver, name)
if not callable(attrib):
return attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
class EventFiringWebElement(object):
""""
A wrapper around WebElement instance which supports firing events
"""
def __init__(self, webelement, ef_driver):
"""
Creates a new instance of the EventFiringWebElement
"""
self._webelement = webelement
self._ef_driver = ef_driver
self._driver = ef_driver.wrapped_driver
self._listener = ef_driver._listener
@property
def wrapped_element(self):
"""Returns the WebElement wrapped by this EventFiringWebElement instance"""
return self._webelement
def click(self):
self._dispatch("click", (self._webelement, self._driver), "click", ())
def clear(self):
self._dispatch("change_value_of", (self._webelement, self._driver), "clear", ())
def send_keys(self, *value):
self._dispatch("change_value_of", (self._webelement, self._driver), "send_keys", value)
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._webelement, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self._ef_driver)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._webelement, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._webelement, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self._ef_driver)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._webelement, name):
try:
attrib = getattr(self._webelement, name)
if not callable(attrib):
return attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
| apache-2.0 |
simonwydooghe/ansible | test/units/modules/network/check_point/test_cp_mgmt_service_tcp_facts.py | 19 | 2854 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_service_tcp_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'service-tcp'
api_call_object_plural_version = 'services-tcp'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointServiceTcpFacts(object):
module = cp_mgmt_service_tcp_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
elthariel/dff | api/taskmanager/scheduler.py | 1 | 2094 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Solal J. <sja@digital-forensic.org>
#
import threading
import sys
import traceback
from Queue import *
from types import *
from api.loader import *
from api.exceptions.libexceptions import *
from api.env import *
event_type = ["refresh_tree", "add_str", "add_qwidget"]
class WorkQueue():
class __WorkQueue():
def launch(self):
while True:
work = self.waitQueue.get()
self.workerQueue.put(work)
def enqueue(self, proc):
self.waitQueue.put(proc)
def set_callback(self, type, func):
if type in self.event_func:
self.event_func[type].append(func)
def worker(self):
while True:
proc = self.workerQueue.get()
proc.launch()
self.workerQueue.task_done()
def __init__(self, max = 5):
self.waitQueue = Queue()
self.workerQueue = Queue(max)
self.max = max
self.env = env.env()
self.event_func = {}
for type in event_type:
self.event_func[type] = []
for i in range(max):
thread = threading.Thread(target = self.worker)
thread.setDaemon(True)
thread.start()
__instance = None
def __init__(self):
if WorkQueue.__instance is None:
WorkQueue.__instance = WorkQueue.__WorkQueue()
def __setattr__(self, attr, value):
setattr(self.__instance, attr, value)
def __getattr__(self, attr):
return getattr(self.__instance, attr)
sched = WorkQueue()
def voidcall(node):
pass
sched.set_callback("refresh_tree", voidcall)
sched.set_callback("add_widget", voidcall)
sched.set_callback("add_str", voidcall)
thread = threading.Thread(target = sched.launch)
thread.setDaemon(True)
thread.start()
| gpl-2.0 |
acdh-oeaw/dig_ed_cat | editions/gexf.py | 1 | 2221 | import lxml.etree as ET
gexf_doc = """
<gexf xmlns="http://www.gexf.net/1.2draft" version="1.2">
<meta lastmodifieddate="{}">
<creator>https://dig-ed-cat.acdh.oeaw.ac.at</creator>
<description>The dig-ed-cat-net</description>
</meta>
<graph defaultedgetype="directed">
<attributes class="node">
<attribute id="0" title="type" type="string"/>
</attributes>
<nodes>
</nodes>
<edges>
</edges>
</graph>
</gexf>
"""
ns_gexf = {'gexf': "http://www.gexf.net/1.2draft"}
ns_xml = {'xml': "http://www.w3.org/XML/1998/namespace"}
def create_node(node_id, label, node_type=None):
""" returns a gexf:node element """
node = ET.Element("{}node".format("{"+ns_gexf['gexf']+"}"))
node.attrib['id'] = str(node_id)
node.attrib['label'] = label
attvalues = ET.Element("{}attvalues".format("{"+ns_gexf['gexf']+"}"))
attvalue = ET.Element("{}attvalue".format("{"+ns_gexf['gexf']+"}"))
if node_type:
attvalue.attrib['for'] = "0"
attvalue.attrib['value'] = str(node_type)
attvalues.append(attvalue)
node.append(attvalues)
return node
def create_edge(edge_id, source, target):
""" returns a gexf:edge element """
edge = ET.Element("{}edge".format("{"+ns_gexf['gexf']+"}"))
edge.attrib['id'] = str(edge_id)
edge.attrib['source'] = str(source)
edge.attrib['target'] = str(target)
return edge
def netdict_to_gexf(net, gexf_doc=gexf_doc):
""" takes a python dict with network info and returns\
a list of gexf:node, gexf:edge and a gexf:doc"""
gexf_tree = ET.fromstring(gexf_doc)
nodes_root = gexf_tree.xpath('//gexf:nodes', namespaces=ns_gexf)[0]
edges_root = gexf_tree.xpath('//gexf:edges', namespaces=ns_gexf)[0]
for idx, item in enumerate(net['edges']):
edge = create_edge(idx, item['from'], item['to'])
edges_root.append(edge)
for idx, item in enumerate(net['nodes']):
node = create_node(item['id'], item['title'], item['type'])
nodes_root.append(node)
xml_stream = ET.tostring(gexf_tree, pretty_print=True, encoding="UTF-8")
return [nodes_root, edges_root, gexf_tree, xml_stream]
| mit |
LLNL/spack | lib/spack/spack/schema/mirrors.py | 5 | 1230 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Schema for mirrors.yaml configuration file.
.. literalinclude:: _spack_root/lib/spack/spack/schema/mirrors.py
:lines: 13-
"""
#: Properties for inclusion in other schemas
properties = {
'mirrors': {
'type': 'object',
'default': {},
'additionalProperties': False,
'patternProperties': {
r'\w[\w-]*': {
'anyOf': [
{'type': 'string'},
{
'type': 'object',
'required': ['fetch', 'push'],
'properties': {
'fetch': {'type': 'string'},
'push': {'type': 'string'}
}
}
]
},
},
},
}
#: Full schema with metadata
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack mirror configuration file schema',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}
| lgpl-2.1 |
PolicyStat/selenium-old | py/selenium/webdriver/firefox/webdriver.py | 1 | 4132 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httplib
from selenium_old.webdriver.common.exceptions import ErrorInResponseException
from selenium_old.webdriver.remote.command import Command
from selenium_old.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium_old.webdriver.remote.webelement import WebElement
from selenium_old.webdriver.firefox.firefoxlauncher import FirefoxLauncher
from selenium_old.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium_old.webdriver.firefox.extensionconnection import ExtensionConnection
import urllib2
import socket
class WebDriver(RemoteWebDriver):
"""The main interface to use for testing,
which represents an idealised web browser."""
def __init__(self, profile=None, timeout=30):
"""Creates a webdriver instance.
Args:
profile: a FirefoxProfile object (it can also be a profile name,
but the support for that may be removed in future, it is
recommended to pass in a FirefoxProfile object)
timeout: the amount of time to wait for extension socket
"""
port = self._free_port()
self.browser = FirefoxLauncher()
if type(profile) == str:
# This is to be Backward compatible because we used to take a
# profile name
profile = FirefoxProfile(name=profile, port=port)
if not profile:
profile = FirefoxProfile(port=port)
self.browser.launch_browser(profile)
RemoteWebDriver.__init__(self,
command_executor=ExtensionConnection(timeout),
browser_name='firefox', platform='ANY', version='',
javascript_enabled=True)
def _free_port(self):
port = 0
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind((socket.gethostname(), 0))
port = free_socket.getsockname()[1]
free_socket.close()
return port
def _execute(self, command, params=None):
try:
return RemoteWebDriver.execute(self, command, params)
except ErrorInResponseException, e:
# Legacy behavior: calling close() multiple times should not raise
# an error
if command != Command.CLOSE and command != Command.QUIT:
raise e
except urllib2.URLError, e:
# Legacy behavior: calling quit() multiple times should not raise
# an error
if command != Command.QUIT:
raise e
def create_web_element(self, element_id):
"""Override from RemoteWebDriver to use firefox.WebElement."""
return WebElement(self, element_id)
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except httplib.BadStatusLine:
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
self.browser.kill()
def save_screenshot(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
"""
png = self._execute(Command.SCREENSHOT)['value']
try:
f = open(filename, 'w')
f.write(base64.decodestring(png))
f.close()
except IOError:
return False
finally:
del png
return True
| apache-2.0 |
sungjuly/pinball | tests/pinball/repository/repository_test.py | 6 | 5251 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for configuration repository."""
import unittest
from pinball.repository.config import JobConfig
from pinball.repository.config import WorkflowScheduleConfig
from pinball.repository.repository import Repository
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
_SCHEDULE_TEMPLATE = """{
"emails": [
"some_email@pinterest.com",
"some_other_email@pinterest.com"
],
"overrun_policy": "DELAY",
"recurrence": "%s",
"start_date": "2012-01-01",
"time": "00.00.01.000",
"workflow": "some_workflow"
}"""
_JOB_TEMPLATE = """{
"abort_timeout_sec": 20,
"emails": [
"some_email@pinterest.com",
"some_other_email@pinterest.com"
],
"is_condition": false,
"job": "some_job",
"max_attempts": %d,
"parents": [
"some_parent_job",
"some_other_parent_job"
],
"priority": 123,
"retry_delay_sec": 10,
"template": "some_template",
"template_params": {
"some_param": "some_value"
},
"warn_timeout_sec": 10,
"workflow": "some_workflow"
}"""
class FakeRepository(Repository):
def __init__(self):
self.configs = {}
def _get_config(self, path):
if path == '/workflow/some_workflow/schedule':
return _SCHEDULE_TEMPLATE % "1d"
elif path == '/workflow/some_workflow/job/some_job':
return _JOB_TEMPLATE % 10
assert False, 'unrecognized path %s' % path
def _put_config(self, path, content):
self.configs[path] = content
def _delete_config(self, path):
del self.configs[path]
def _list_directory(self, directory, allow_not_found):
if directory == '/workflow/':
return ['some_workflow/', 'some_other_workflow/']
elif directory == '/workflow/some_other_workflow/':
return ['job/']
elif directory == '/workflow/some_workflow/':
return ['job/']
elif directory == '/workflow/some_workflow/job/':
return ['some_job']
elif directory == '/workflow/some_other_workflow/job/':
return ['some_other_job', 'yet_another_job']
assert False, 'unrecognized directory %s' % directory
class RepositoryTestCase(unittest.TestCase):
def setUp(self):
self._repository = FakeRepository()
def test_get_schedule(self):
schedule_config = self._repository.get_schedule('some_workflow')
self.assertEqual('some_workflow', schedule_config.workflow)
self.assertEqual('1d', schedule_config.recurrence)
def test_put_schedule(self):
schedule_config = WorkflowScheduleConfig.from_json(
_SCHEDULE_TEMPLATE % '1w')
self._repository.put_schedule(schedule_config)
self.assertEqual(1, len(self._repository.configs))
self.assertEqual(
_SCHEDULE_TEMPLATE % '1w',
self._repository.configs['/workflow/some_workflow/schedule'])
def test_delete_schedule(self):
schedule_config = WorkflowScheduleConfig.from_json(
_SCHEDULE_TEMPLATE % 100)
self._repository.put_schedule(schedule_config)
self._repository.delete_schedule('some_workflow')
self.assertEqual({}, self._repository.configs)
def test_get_job(self):
job_config = self._repository.get_job('some_workflow', 'some_job')
self.assertEqual('some_job', job_config.job)
self.assertEqual('some_workflow', job_config.workflow)
self.assertEqual(10, job_config.max_attempts)
def test_put_job(self):
job_config = JobConfig.from_json(_JOB_TEMPLATE % 100)
self._repository.put_job(job_config)
self.assertEqual(1, len(self._repository.configs))
self.assertEqual(
_JOB_TEMPLATE % 100,
self._repository.configs['/workflow/some_workflow/job/some_job'])
def test_delete_job(self):
job_config = JobConfig.from_json(_JOB_TEMPLATE % 100)
self._repository.put_job(job_config)
self._repository.delete_job('some_workflow', 'some_job')
self.assertEqual({}, self._repository.configs)
def test_get_workflow_names(self):
self.assertEqual(['some_workflow', 'some_other_workflow'],
self._repository.get_workflow_names())
def test_get_job_names(self):
self.assertEqual(['some_job'],
self._repository.get_job_names('some_workflow'))
self.assertEqual(['some_other_job', 'yet_another_job'],
self._repository.get_job_names('some_other_workflow'))
| apache-2.0 |
RAPD/RAPD | src/plugins/subcontractors/aimless.py | 1 | 39406 | """Functions for parsing aimless logs"""
"""
This file is part of RAPD
Copyright (C) 2017, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2017-05-09"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import argparse
# import from collections import OrderedDict
# import datetime
# import glob
# import json
# import logging
# import multiprocessing
import os
from pprint import pprint
# import pymongo
# import re
# import redis
# import shutil
# import subprocess
import sys
# import time
# import unittest
# import urllib2
# import uuid
# RAPD imports
# import commandline_utils
# import detectors.detector_utils as detector_utils
# import utils
# import utils.credits as credits
from utils.r_numbers import try_int, try_float
# Import smartie.py from the installed CCP4 package
# smartie.py is a python script for parsing log files from CCP4
try:
sys.path.append(os.path.join(os.environ["CCP4"], "share", "smartie"))
except KeyError as e:
print "\nError importing smartie from CCP4."
print "Environmental variable %s not set. Exiting." % e
exit(9)
import smartie
# Software dependencies
VERSIONS = {
# "eiger2cbf": ("160415",)
}
def parse_aimless(logfile):
"""
Parses the aimless logfile in order to pull out data for
graphing and the results summary table.
logfile should be input as the name of the log file
Relevant values for the summary table are stored in a dict.
key = name of result value
value = list of three numbers, 1 - Overall
2 - Inner Shell
3 - Outer Shell
Relevant information for creating plots are stored in a dict,
with the following format for each entry (i.e. each plot):
{"<*plot label*>":{
"data":{
"parameters":{<*line parameters*>},
"series":[
{xs : [],
ys : []
}
]
}
"parameters" : {<*plot parameters*>}
}
...
...
}
"""
log = smartie.parselog(logfile)
# print log.nfragments()
# print dir(log.fragment(0))
# Pull out information for the results summary table.
flag = True
summary = log.keytext(0).message().split("\n")
# print summary
# For some reason "Anomalous flag switched ON" is not always
# found, so the line below creates a blank entry for the
# the variable that should be created when that phrase is
# found, eliminating the problem where the program reports that
# the variable anomalous_report is referenced before assignment.
# anomalous_report = ""
int_results = {"anomalous_report": ""}
for line in summary:
# print line, len(line)
if "Space group" in line:
int_results["scaling_spacegroup"] = line.strip().split(": ")[-1]
elif "Average unit cell" in line:
int_results["scaling_unit_cell"] = [try_float(x) for x in line.split()[3:]]
elif "Anomalous flag switched ON" in line:
int_results["text2"] = line
elif "Low resolution limit" in line:
int_results["bins_low"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "High resolution limit" in line:
int_results["bins_high"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmerge" in line and "within" in line:
int_results["rmerge_anom"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmerge" in line and "all" in line:
int_results["rmerge_norm"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmeas" in line and "within" in line:
int_results["rmeas_anom"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmeas" in line and "all" in line:
int_results["rmeas_norm"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rpim" in line and "within" in line:
int_results["rpim_anom"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rpim" in line and "all" in line:
int_results["rpim_norm"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmerge in top intensity bin" in line:
int_results["rmerge_top"] = try_float(line.split()[-3], 0)
elif "Total number of observations" in line:
int_results["total_obs"] = [try_int(x, 0) for x in line.split()[-3:]]
elif "Total number unique" in line:
int_results["unique_obs"] = [try_int(x, 0) for x in line.split()[-3:]]
elif "Mean((I)/sd(I))" in line:
int_results["isigi"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Mn(I) half-set correlation CC(1/2)" in line:
int_results["cc-half"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Completeness" in line:
int_results["completeness"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Multiplicity" in line:
int_results["multiplicity"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Anomalous completeness" in line:
int_results["anom_completeness"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Anomalous multiplicity" in line:
int_results["anom_multiplicity"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "DelAnom correlation between half-sets" in line:
int_results["anom_correlation"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Mid-Slope of Anom Normal Probability" in line:
int_results["anom_slope"] = [try_float(x, 0) for x in line.split()[-3:]]
# This now unused due to shifting output
# int_results = {
# "bins_low": [try_float(x, 0) for x in summary[3].split()[-3:]],
# "bins_high": [try_float(x, 0) for x in summary[4].split()[-3:]],
# "rmerge_anom": [try_float(x, 0) for x in summary[6].split()[-3:]],
# "rmerge_norm": [try_float(x, 0) for x in summary[7].split()[-3:]],
# "rmeas_anom": [try_float(x, 0) for x in summary[8].split()[-3:]],
# "rmeas_norm": [try_float(x, 0) for x in summary[9].split()[-3:]],
# "rpim_anom": [try_float(x, 0) for x in summary[10].split()[-3:]],
# "rpim_norm": [try_float(x, 0) for x in summary[11].split()[-3:]],
# "rmerge_top": float(summary[12].split()[-3]),
# "total_obs": [try_int(x) for x in summary[13].split()[-3:]],
# "unique_obs": [try_int(x) for x in summary[14].split()[-3:]],
# "isigi": [try_float(x, 0) for x in summary[15].split()[-3:]],
# "cc-half": [try_float(x, 0) for x in summary[16].split()[-3:]],
# "completeness": [try_float(x, 0) for x in summary[17].split()[-3:]],
# "multiplicity": [try_float(x, 0) for x in summary[18].split()[-3:]],
# "anom_completeness": [try_float(x, 0) for x in summary[21].split()[-3:]],
# "anom_multiplicity": [try_float(x, 0) for x in summary[22].split()[-3:]],
# "anom_correlation": [try_float(x, 0) for x in summary[23].split()[-3:]],
# "anom_slope": [try_float(summary[24].split()[-3])],
# "scaling_spacegroup": space_group,
# "scaling_unit_cell": unit_cell,
# "text2": anomalous_report,
# }
# Smartie can pull table information based on a regular
# expression pattern that matches the table title from
# the aimless log file.
# NOTE : the regular expression must match the beginning
# of the table's title, but does not need to be the entire
# title.
#
# We will use this to pull out the data from tables we are
# interested in.
#
# The beginning of the titles for all common tables in the
# aimless log file are given below, but not all of them
# are currently used to generate a plot.
# scales = "=== Scales v rotation"
rfactor = "Analysis against all Batches"
cchalf = "Correlations CC(1/2)"
cc = "Run pair correlations by resolution"
# anisotropy = "Anisotropy analysis"
vresolution = "Analysis against resolution,"
# anomalous = "Analysis against resolution, with & without"
# rresolution = "Analysis against resolution for each run"
# intensity = "Analysis against intensity"
completeness = "Completeness & multiplicity"
# deviation = "Run 1, standard deviation" # and 2, 3, ...
# all_deviation = "All runs, standard deviation"
# effect = "Effect of parameter variance on sd(I)"
rcp = "Radiation damage"
# pprint(dir(log))
# for table in log.tables():
# print table.title()
# Grab plots - None is plot missing
plots = {}
try:
plots["Rmerge vs Frame"] = {
"x_data": [int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("Rmerge")],
"label": "Rmerge",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("SmRmerge")],
"label": "Smoothed",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Rmerge",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("Rmerge")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "SmRmerge",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("SmRmerge")]
}
]
},
{
"parameters": {
"linecolor": "3",
"linelabel": "Rmerge",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("Rmerge")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "SmRmerge",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("SmRmerge")]
}
]
},
],
"parameters": {
"selectlabel": "Rmerge",
"toplabel": "Rmerge vs Batch for all Runs",
"xlabel": "Batch #",
"ylabel": "Rmerge",
},
}
# Plot not present
except IndexError:
plots["Rmerge vs Frame"] = None
try:
plots["Imean/RMS scatter"] = {
"x_data": [int(x) for x in log.tables(rfactor)[0].col("N")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("I/rms")],
"label": "I/rms",
"pointRadius": 0
}
],
# "data": [
# {
# "parameters": {
# "linecolor": "3",
# "linelabel": "I/rms",
# "linetype": "11",
# "linewidth": "3",
# },
# "series": [
# {
# "xs" : [int(x) for x in log.tables(rfactor)[0].col("N")],
# "ys" : [try_float(x, 0.0) for x in \
# log.tables(rfactor)[0].col("I/rms")],
# }
# ]
# }
# ],
"parameters": {
"selectlabel": "Imean/RMS",
"toplabel": "Imean / RMS scatter",
"xlabel": "Batch Number",
"ylabel": "Imean/RMS"
}
}
# Plot not present
except IndexError:
plots["Imean/RMS scatter"] = None
try:
plots["Anomalous & Imean CCs vs Resolution"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CCanom")],
"label": "CCanom",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CC1/2")],
"label": "CC1/2",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "CCanom",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CCanom")],
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "CC1/2",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CC1/2")],
}
]
}
],
"parameters": {
"selectlabel": "CC",
"toplabel": "Anomalous & Imean CCs vs. Resolution",
"xlabel": "Dmid (Angstroms)",
"ylabel": "CC"
}
}
# Plot not present
except IndexError:
plots["Anomalous & Imean CCs vs Resolution"] = None
try:
plots["RMS correlation ratio"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("RCRanom")],
"label": "RCRanom",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "RCRanom",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("RCRanom")]
}
]
}
],
"parameters": {
"selectlabel": "RCR",
"toplabel": "RMS correlation ratio",
"xlabel": "1/d^2",
"ylabel": "RCR"
}
}
# Plot not present
except IndexError:
plots["RMS correlation ratio"] = None
try:
plots["I/sigma, Mean Mn(I)/sd(Mn(I))"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Mn(I/sd)")],
"label": "Mn(I/sd)",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("I/RMS")],
"label": "I/RMS",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "I/RMS",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("I/RMS")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "Mn(I/sd)",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Mn(I/sd)")]
}
]
}
],
"parameters": {
"selectlabel": "I/σI",
"toplabel": "I/sigma, Mean Mn(I)/sd(Mn(I))",
"xlabel": "1/d^2",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["I/sigma, Mean Mn(I)/sd(Mn(I))"] = None
try:
plots["rs_vs_res"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmrg")],
"label": "Rmerge",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rfull")],
"label": "Rfull",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmeas")],
"label": "Rmeas",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rpim")],
"label": "Rpim",
"pointRadius": 0
},
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Remerge",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmrg")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "Rfull",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rfull")]
}
]
},
{
"parameters": {
"linecolor": "5",
"linelabel": "Rmeas",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmeas")]
}
]
},
{
"parameters": {
"linecolor": "6",
"linelabel": "Rpim",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rpim")]
}
]
}
],
"parameters": {
"selectlabel": "R Factors",
"toplabel": "Rmerge, Rfull, Rmeas, Rpim vs. Resolution",
"xlabel": "Dmid (Angstroms)",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["rs_vs_res"] = None
try:
plots["Average I, RMS deviation, and Sd"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("RMSdev")],
"label": "RMSdev",
"pointRadius": 0
},
{
"data": [try_int(x, 0) for x in \
log.tables(vresolution)[0].col("AvI")],
"label": "AvgI",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("sd")],
"label": "SD",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Average I",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_int(x, 0) for x in log.tables(vresolution)[0].col("AvI")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "RMS deviation",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("RMSdev")]
}
]
},
{
"parameters": {
"linecolor": "5",
"linelabel": "std. dev.",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("sd")]
}
]
}
],
"parameters": {
"selectlabel": "I vs Res",
"toplabel": "Average I, RMS dev., and std. dev.",
"xlabel": "Dmid (Ansgstroms)",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["Average I, RMS deviation, and Sd"] = None
try:
plots["Completeness"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("%poss")],
"label": "All",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("C%poss")],
"label": "C%poss",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoCmp")],
"label": "AnoCmp",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoFrc")],
"label": "AnoFrc",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "%poss",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("%poss")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "C%poss",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("C%poss")]
}
]
},
{
"parameters": {
"linecolor": "5",
"linelabel": "AnoCmp",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoCmp")]
}
]
},
{
"parameters": {
"linecolor": "6",
"linelabel": "AnoFrc",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoFrc")]
}
]
}
],
"parameters": {
"selectlabel": "Completeness",
"toplabel": "Completeness vs. Resolution",
"xlabel": "Dmid (Angstroms)",
"ylabel": "Percent"
}
}
# Plot not present
except IndexError:
plots["Completeness"] = None
try:
plots["Redundancy"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("Mlplct")],
"label": "All",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoMlt")],
"label": "Anomalous",
"pointRadius": 0
},
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "multiplicity",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("Mlplct")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "anomalous multiplicity",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoMlt")]
}
]
}
],
"parameters": {
"selectlabel": "Redundancy",
"toplabel": "Redundancy",
"xlabel": "Dmid (Angstroms)",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["Redundancy"] = None
try:
plots["Radiation Damage"] = {
"x_data": [int(x) for x in \
log.tables(rcp)[0].col("Batch")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(rcp)[0].col("Rcp")],
"label": "RCP",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Rcp",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [int(x) for x in \
log.tables(rcp)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rcp)[0].col("Rcp")]
}
]
}
],
"parameters": {
"selectlabel": "RCP",
"toplabel": "Rcp vs. Batch",
"xlabel": "Relative frame difference",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["Radiation Damage"] = None
# Return to the main program.
return (plots, int_results)
def get_cc(log_file):
"""
Returns the CCs in the section 'Matrix of correlations of E^2 between runs'
"""
# Handle filename
if isinstance(log_file, str):
log_lines = open(log_file, "r").readlines()
elif isinstance(log_file, list):
log_lines = log_file
else:
raise TypeError("Function takes a file name or list of lines from log file")
results = {
"cc": {},
"maximum_resolution": {},
"number": {},
"runs": []
}
# Look through for the lines of interest
in_range = False
for log_line in log_lines:
if "Matrix of correlations of E^2 between runs" in log_line:
in_range = True
in_body = False
if in_range:
# print log_line.rstrip()
if "$TABLE:" in log_line:
in_range = False
# In the body of the table
if in_body:
# print "body>>", log_line.rstrip()
body_split = log_line.split()
if body_split:
# The CC
if body_split[0] == "Run":
# print body_split
from_run = int(body_split[1])
to_run = from_run + 1
for f in body_split[3:]:
results["cc"][(from_run, to_run)] = float(f)
to_run += 1
# Number of reflections used in the CC
elif body_split[0] == "N":
# print from_run, body_split
to_run = from_run + 1
for f in body_split[1:]:
results["number"][(from_run, to_run)] = int(f)
to_run += 1
# Not in the body
else:
# Maximum resolution of the runs
if "maximum resolution" in log_line:
run = int(log_line.split()[1])
max_res = float(log_line.split()[4])
results["maximum_resolution"][run] = max_res
# Header of the table
elif " Run " in log_line:
# print "header", log_line.rstrip()
in_body = True
header_split = log_line.split()
runs = [1] + [ int(i) for i in header_split[1:] ]
results["runs"] = runs
return results
def main():
"""
The main process docstring
This function is called when this module is invoked from
the commandline
"""
print "main"
args = get_commandline()
print args
# res = parse_aimless(args.file)
# pprint(res)
get_cc(args.file)
def get_commandline():
"""
Grabs the commandline
"""
print "get_commandline"
# Parse the commandline arguments
commandline_description = "Parse an aimless log file"
parser = argparse.ArgumentParser(description=commandline_description)
# Directory or files
parser.add_argument(action="store",
dest="file",
default=False,
help="Template for image files")
# Print help message is no arguments
if len(sys.argv[1:])==0:
parser.print_help()
parser.exit()
return parser.parse_args()
if __name__ == "__main__":
# Execute code
main()
| agpl-3.0 |
elky/django | django/templatetags/static.py | 122 | 4502 | from urllib.parse import quote, urljoin
from django import template
from django.apps import apps
from django.utils.encoding import iri_to_uri
from django.utils.html import conditional_escape
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populate a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populate a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(template.Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if context.autoescape:
url = conditional_escape(url)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
if apps.is_installed('django.contrib.staticfiles'):
from django.contrib.staticfiles.storage import staticfiles_storage
return staticfiles_storage.url(path)
else:
return urljoin(PrefixNode.handle_simple("STATIC_URL"), quote(path))
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
"""
Given a relative path to a static asset, return the absolute path to the
asset.
"""
return StaticNode.handle_simple(path)
| bsd-3-clause |
igboyes/virtool | tests/fixtures/history.py | 2 | 10972 | import pytest
import datetime
@pytest.fixture
def test_change(static_time):
return {
"_id": "6116cba1.1",
"method_name": "edit",
"description": "Edited Prunus virus E",
"created_at": static_time.datetime,
"diff": [
["change", "abbreviation", ["PVF", ""]],
["change", "name", ["Prunus virus F", "Prunus virus E"]],
["change", "version", [0, 1]]
],
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"reference": {
"id": "hxn167"
},
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
}
}
@pytest.fixture
def test_changes(test_change):
return [
test_change,
dict(test_change, _id="foobar.1"),
dict(test_change, _id="foobar.2")
]
@pytest.fixture
def test_otu_edit():
"""
An :class:`tuple` containing old and new otu documents for testing history diffing.
"""
return (
{
"_id": "6116cba1",
"abbreviation": "PVF",
"imported": True,
"isolates": [
{
"default": True,
"isolate_id": "cab8b360",
"sequences": [
{
"_id": "KX269872",
"definition": "Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete "
"cds.",
"host": "sweet cherry",
"isolate_id": "cab8b360",
"sequence": "TGTTTAAGAGATTAAACAACCGCTTTC",
"segment": None
}
],
"source_name": "8816-v2",
"source_type": "isolate"
}
],
"reference": {
"id": "hxn167"
},
"last_indexed_version": 0,
"lower_name": "prunus virus f",
"name": "Prunus virus F",
"schema": [],
"version": 0
},
{
"_id": "6116cba1",
"abbreviation": "",
"imported": True,
"isolates": [
{
"default": True,
"isolate_id": "cab8b360",
"sequences": [
{
"_id": "KX269872",
"definition": "Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete "
"cds.",
"host": "sweet cherry",
"isolate_id": "cab8b360",
"sequence": "TGTTTAAGAGATTAAACAACCGCTTTC",
"segment": None
}
],
"source_name": "8816-v2",
"source_type": "isolate"
}
],
"reference": {
"id": "hxn167"
},
"last_indexed_version": 0,
"lower_name": "prunus virus f",
"name": "Prunus virus E",
"schema": [],
"version": 1
}
)
@pytest.fixture
def create_mock_history(dbi):
async def func(remove):
documents = [
{
"_id": "6116cba1.0",
"created_at": datetime.datetime(2017, 7, 12, 16, 0, 50, 495000),
"description": "Description",
"diff": {
"_id": "6116cba1",
"abbreviation": "PVF",
"imported": True,
"isolates": [
{
"source_name": "8816-v2",
"source_type": "isolate",
"default": True,
"id": "cab8b360",
"sequences": [
{
"_id": "KX269872",
"definition": "Prunus virus F isolate "
"8816-s2 segment RNA2 "
"polyprotein 2 gene, "
"complete cds.",
"host": "sweet cherry",
"isolate_id": "cab8b360",
"sequence": "TGTTTAAGAGATTAAACAACCGCTTTC",
"otu_id": "6116cba1",
"segment": None
}
]
}
],
"reference": {
"id": "hxn167"
},
"schema": [],
"last_indexed_version": 0,
"lower_name": "prunus virus f",
"verified": False,
"name": "Prunus virus F",
"version": 0
},
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "create",
"user": {
"id": "test"
},
"reference": {
"id": "hxn167"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 0
}
},
{
"_id": "6116cba1.1",
"created_at": datetime.datetime(2017, 7, 12, 16, 0, 50, 600000),
"description": "Description",
"diff": [
["change", "version", [0, 1]],
["change", "abbreviation", ["PVF", "TST"]]
],
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "update",
"user": {
"id": "test"
},
"reference": {
"id": "hxn167"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
}
},
{
"_id": "6116cba1.2",
"created_at": datetime.datetime(2017, 7, 12, 16, 0, 50, 602000),
"description": "Description",
"diff": [
["change", "version", [1, 2]],
["change", "name", ["Prunus virus F", "Test Virus"]]
],
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "update",
"user": {
"id": "test"
},
"reference": {
"id": "hxn167"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 2
}
},
{
"_id": "6116cba1.3",
"created_at": datetime.datetime(2017, 7, 12, 16, 0, 50, 603000),
"description": "Description",
"diff": [
["change", "version", [2, 3]],
["remove", "isolates", [[0, {
"default": True,
"id": "cab8b360",
"sequences": [{
"_id": "KX269872",
"definition": "Prunus virus F isolate 8816-s2 segment RNA2 polyprotein 2 gene, complete "
"cds.",
"host": "sweet cherry",
"isolate_id": "cab8b360",
"sequence": "TGTTTAAGAGATTAAACAACCGCTTTC",
"otu_id": "6116cba1",
"segment": None
}],
"source_name": "8816-v2",
"source_type": "isolate"}]
]]],
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "remove_isolate",
"user": {
"id": "test"
},
"reference": {
"id": "hxn167"
},
"otu": {
"id": "6116cba1",
"name": "Test Virus",
"version": 3
}
}
]
otu = None
if remove:
documents.append({
"_id": "6116cba1.removed",
"created_at": datetime.datetime(2017, 7, 12, 16, 0, 50, 605000),
"description": "Description",
"diff": {
"_id": "6116cba1",
"abbreviation": "TST",
"imported": True,
"isolates": [],
"last_indexed_version": 0,
"lower_name": "prunus virus f",
"verified": False,
"name": "Test Virus",
"reference": {
"id": "hxn167"
},
"version": 3,
"schema": [],
},
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"reference": {
"id": "hxn167"
},
"method_name": "remove",
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Test Virus",
"version": "removed"
}
})
else:
otu = {
"_id": "6116cba1",
"abbreviation": "TST",
"imported": True,
"isolates": [],
"last_indexed_version": 0,
"lower_name": "prunus virus f",
"verified": False,
"name": "Test Virus",
"reference": {
"id": "hxn167"
},
"version": 3,
"schema": [],
}
await dbi.otus.insert_one(otu)
await dbi.history.insert_many(documents)
return otu
return func
| mit |
double12gzh/nova | nova/api/openstack/compute/contrib/consoles.py | 60 | 7022 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'consoles')
class ConsolesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self.compute_api = compute.API()
super(ConsolesController, self).__init__(*args, **kwargs)
@wsgi.action('os-getVNCConsole')
def get_vnc_console(self, req, id, body):
"""Get vnc connection information to access a server."""
context = req.environ['nova.context']
authorize(context)
# If type is not supplied or unknown, get_vnc_console below will cope
console_type = body['os-getVNCConsole'].get('type')
instance = common.get_instance(self.compute_api, context, id)
try:
output = self.compute_api.get_vnc_console(context,
instance,
console_type)
except exception.InstanceNotReady:
raise webob.exc.HTTPConflict(
explanation=_('Instance not yet ready'))
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except (exception.ConsoleTypeUnavailable,
exception.ConsoleTypeInvalid) as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to get vnc console, functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {'console': {'type': console_type, 'url': output['url']}}
@wsgi.action('os-getSPICEConsole')
def get_spice_console(self, req, id, body):
"""Get spice connection information to access a server."""
context = req.environ['nova.context']
authorize(context)
# If type is not supplied or unknown, get_spice_console below will cope
console_type = body['os-getSPICEConsole'].get('type')
instance = common.get_instance(self.compute_api, context, id)
try:
output = self.compute_api.get_spice_console(context,
instance,
console_type)
except (exception.ConsoleTypeUnavailable,
exception.ConsoleTypeInvalid) as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to get spice console, "
"functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {'console': {'type': console_type, 'url': output['url']}}
@wsgi.action('os-getRDPConsole')
def get_rdp_console(self, req, id, body):
"""Get text console output."""
context = req.environ['nova.context']
authorize(context)
# If type is not supplied or unknown, get_rdp_console below will cope
console_type = body['os-getRDPConsole'].get('type')
instance = common.get_instance(self.compute_api, context, id)
try:
output = self.compute_api.get_rdp_console(context,
instance,
console_type)
except (exception.ConsoleTypeUnavailable,
exception.ConsoleTypeInvalid) as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to get rdp console, functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {'console': {'type': console_type, 'url': output['url']}}
@wsgi.action('os-getSerialConsole')
def get_serial_console(self, req, id, body):
"""Get connection to a serial console."""
context = req.environ['nova.context']
authorize(context)
# If type is not supplied or unknown get_serial_console below will cope
console_type = body['os-getSerialConsole'].get('type')
instance = common.get_instance(self.compute_api, context, id)
try:
output = self.compute_api.get_serial_console(context,
instance,
console_type)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except (exception.ConsoleTypeUnavailable,
exception.ConsoleTypeInvalid,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
exception.SocketPortRangeExhaustedException) as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to get serial console, "
"functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {'console': {'type': console_type, 'url': output['url']}}
class Consoles(extensions.ExtensionDescriptor):
"""Interactive Console support."""
name = "Consoles"
alias = "os-consoles"
namespace = "http://docs.openstack.org/compute/ext/os-consoles/api/v2"
updated = "2011-12-23T00:00:00Z"
def get_controller_extensions(self):
controller = ConsolesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
amarlearning/InternetBots | Internet Bots/InstaFollowerHike.py | 1 | 4950 | # Importing all library that are used!
from random import randint
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
import time
def main():
# Yeah, I know it consumes a lot of RAM.
# But still i don't know why i always use this.
driver = webdriver.Chrome()
# Yes, an INSTAGRAM follower bot.
driver.get("https://www.instagram.com")
# Resizing the window so that you can see what's going on!
driver.maximize_window()
# It will open a signup page, so we redirect to login page.
Redirect_Login = driver.find_element_by_link_text('Log in')
driver.implicitly_wait(5)
Redirect_Login.click()
# Yes, we need your Instagram username && Password.
# You we can modify to make it user input type.
# But you know i am too lazy to do that.
instagram_username = "iamarpandey"
instagram_password = "***********"
# Getting the username and password's Xpath
# you know we have to enter the credentials, that's why!
username_xpath = "//*[@id=\"react-root\"]/section/main/article/div[2]/div[1]/div/form/div[1]/input"
password_xpath = "//*[@id=\"react-root\"]/section/main/article/div[2]/div[1]/div/form/div[2]/input"
# Getting the Login Button Xpath and Search bar Xpath.
login_button_xpath = "//*[@id=\"react-root\"]/section/main/article/div[2]/div[1]/div/form/span/button"
search_xpath = "//*[@id=\"react-root\"]/section/nav/div/div/div/div[1]/input"
# Getting the followers and follwing button Xpath.
# Don't worry will i will not make any mess.
followers_xpath = "//*[@id=\"react-root\"]/section/main/article/header/div[2]/ul/li[2]/a"
following_xpath = "//*[@id=\"react-root\"]/section/main/article/header/div[2]/ul/li[3]/a"
# Getting the Xpath of First search result of dropdown.
dropdown_xpath = "//*[@id=\"react-root\"]/section/nav/div/div/div/div[1]/div[2]/div[2]/div/a[1]/div/div[1]/span"
# I need this for the scrolling of users!
scroll_xpath = "/html/body/div[2]/div/div[2]/div/div[2]"
# Trying to focus on username and password, so that i can enter the credentials.
username_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(username_xpath))
password_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(password_xpath))
# Trying ot focus on Login button as well, we need this later!
login_button_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(login_button_xpath))
# Clearing of the username and password feild!
username_field_element.clear()
username_field_element.send_keys(instagram_username)
password_field_element.clear()
password_field_element.send_keys(instagram_password)
# And here we go, BOOM your account is here!
# Let's began the process.
login_button_field_element.click()
# Search for a Verified profile to follow as many people as we want!
random_users = ["iamsrk", "selenagomez", "cristiano", "jlo", "leomessi"]
search_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(search_xpath))
search_field_element.send_keys(random_users[randint(0,4)])
# We need to wait for sometime, as it's performing the search.
driver.implicitly_wait(5000)
# Selecting the result one for the search result.
dropdown_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(dropdown_xpath))
dropdown_field_element.click()
# Looking onto all the people that follow this verified profile.
followers_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(followers_xpath))
followers_field_element.click()
# Defining some of the variables that will be used during that whole following process!
run_script = False
i = 1
j = 1
# Start clicking follow button, stop when you don't find one!
while not run_script:
follow_xpath = "/html/body/div[2]/div/div[2]/div/div[2]/ul/li["+str(i)+"]/div/div[2]/span/button"
follow_field_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_xpath(follow_xpath))
follow_field_element.click()
driver.implicitly_wait(15)
centerPanel = driver.find_element_by_css_selector("body > div:nth-child(9) > div > div._g1ax7 > div > div._4gt3b")
# This was the hard part, scroll down. the content is loaded with AJAX.
# Yeah, I know Javascript is awesome. It again helped me.
jsScript = """
function move_down(element) {
element.scrollTop = element.scrollTop + 50;
}
move_down(arguments[0]);
"""
driver.execute_script(jsScript, centerPanel)
while j < 10000000 :
j = j + 1
i = i + 1
j = 1
# It's time to close this, lot of work is done today!
driver.close()
# I am the one who controls all this, Hell yeah!
if __name__ == '__main__':
main()
| mit |
jjspierx/imuduino-btle | Arduino/libraries/FreeIMU/debug/log.py | 25 | 2898 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
log.py - Logs data to a text file. Load the Arduino with the FreeIMU_serial program.
Copyright (C) 2012 Fabio Varesano <fvaresano@yahoo.it>
Development of this code has been supported by the Department of Computer Science,
Universita' degli Studi di Torino, Italy within the Piemonte Project
http://www.piemonte.di.unito.it/
This program is free software: you can redistribute it and/or modify
it under the terms of the version 3 GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time, serial
from struct import unpack
from binascii import unhexlify
from subprocess import call
print "\n\nWelcome to the FreeIMU logger routine!\nCopyright © Fabio Varesano 2012.\nReleased under GPL v3 - See http://www.gnu.org/copyleft/gpl.html\n\n"
print "Please load the FreeIMU_serial program from the FreeIMU library examples on your Arduino. Once you correctly installed the FreeIMU library, the examples are available from File->Examples->FreeIMU in the Arduino IDE.\nWhen done, close the Arduino IDE and its serial monitor."
raw_input('Hit Enter to continue.')
arduino_port = raw_input('Insert the serial port which connects to the Arduino (See in the Arduino IDE Tools->Serial Port if in doubt): ')
# instantiate a serial port object. port gets opened by default, no need to explicitly open it.
ser = serial.Serial(
port= arduino_port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
if ser.isOpen():
print "Arduino serial port opened correctly"
# we rely on the unhandled serial exception which will stop the program in case of problems during serial opening
ser.write('v') # ask version
print "\nFreeIMU library version informations:",
print ser.readline()
print "\nThe program will now start sampling debugging values and logging them to the log.txt file.\n"
raw_input('Hit Enter to continue.')
count = 30
buff = [0.0 for i in range(9)]
filename = 'log.txt'
tot_readings = 0
try:
print "Sampling from FreeIMU and logging to %s.\nHit CTRL+C to interrupt." % (filename)
f = open(filename, 'w')
ser.write('d')
while True:
f.write(ser.read()) # let's just log everything into the log file
tot_readings = tot_readings + 1
if(tot_readings % 10000 == 0):
print "%d bytes logged. Hit CTRL+C to interrupt." % (tot_readings)
except KeyboardInterrupt:
ser.close()
f.close()
print "\n%d bytes logged to %s" % (tot_readings, filename)
| apache-2.0 |
Hackplayers/Empire-mod-Hpys-tests | lib/modules/powershell/privesc/sherlock.py | 1 | 3385 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Sherlock',
# list of one or more authors for the module
'Author': ['@_RastaMouse'],
# more verbose multi-line description of the module
'Description': ('Powershell script to quickly find missing Microsoft patches'
'for local privilege escalation vulnerabilities'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
'https://github.com/rasta-mouse/Sherlock'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to grab a screenshot from.',
'Required' : True,
'Value' : ''
},
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# the PowerShell script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
"""
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/Sherlock.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Find-AllVulns | out-string"
return script
| bsd-3-clause |
Opshun/API | venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py | 442 | 5732 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
def get_attrs(self):
return self._fore + self._back * 16 + self._style
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
if light:
self._style |= WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
if light:
self._style |= WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
#I'm not currently tracking the position, so there is no default.
#position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
| mit |
kholidfu/django | django/contrib/gis/gdal/prototypes/geom.py | 450 | 4735 | from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.errcheck import check_envelope
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int_output, srs_output,
string_output, void_output,
)
# ### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = bool
return f
# ### OGR_G ctypes function prototypes ###
# GeoJSON routines.
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding='ascii')
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding='ascii')
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding='ascii')
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p], decoding='ascii')
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint,
[c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False
)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| bsd-3-clause |
tweakoz/micro_ork | ork.build/scripts/ork/build/localopts.py | 1 | 3830 | #!python
###############################################################################
# Orkid SCONS Build System
# Copyright 2010, Michael T. Mayers
# email: michael@tweakoz.com
# The Orkid Build System is published under the GPL 2.0 license
# see http://www.gnu.org/licenses/gpl-2.0.html
###############################################################################
# Orkid Build Machine Local Options
# feel free to edit localopts.py, but NOT localopts.py.template
###############################################################################
import os
import imp
import configparser
import ork.build.common as common
def IsOsx():
return common.IsOsx
def IsWindows():
return os.name == "nt"
if IsWindows():
import win32api
def IsIx():
return common.IsIx
#print "os.name<%s>" % os.name
################################################################
__all__ = [ "XCODEDIR", "VST_SDK_DIR", "VST_INST_DIR", "CXX", "AQSISDIR", "ARCH", "ConfigFileName", "ConfigData", "dump" ]
################################################################
################################################################
def GetDefault( varname, default ):
ret = default
if varname in os.environ:
ret = os.environ[varname]
if False==os.path.isdir(ret):
print("<localopts.py> Warning: path<%s> <ret %s> does not exist" % (varname,ret))
if os.path.isdir(ret):
if IsWindows():
ret = win32api.GetShortPathName(ret)
return os.path.normpath(ret)
################################################################
def ConfigFileName():
return "%s/../ork.build.ini"%os.environ["ORKDOTBUILD_ROOT"]
ConfigData = configparser.ConfigParser()
if os.path.isfile( ConfigFileName() ):
print("LOCALOPTS: Found %s" % ConfigFileName())
ConfigData.read( ConfigFileName() )
print(ConfigData)
else:
print("LOCALOPTS: Cannot find %s : using default options" % ConfigFileName())
ConfigData.add_section( "PATHS" )
ConfigData.add_section( "CONFIG" )
if IsOsx():
ConfigData.set( "PATHS", "VST_INST_DIR", GetDefault("VST_INST_DIR", "~/.vst") )
ConfigData.set( "PATHS", "XCODEDIR", GetDefault("XCODEDIR", "/Applications/Xcode.app") )
ConfigData.set( "CONFIG", "ARCH", GetDefault("ARCH", "x86_64") )
ConfigData.set( "CONFIG", "CXX", GetDefault("CXX", "clang++") )
elif IsIx():
ConfigData.set( "CONFIG", "CXX", "clang++" )
ConfigData.set( "CONFIG", "STD", "c++11" )
ConfigData.set( "PATHS", "VST_SDK_DIR", GetDefault("VST_SDK_DIR", "/sdk/vstsdk2.4") )
cfgfile = open(ConfigFileName(),'w')
ConfigData.write(cfgfile)
cfgfile.close()
#print ConfigData.sections()
################################################################
def GetEnv( sect, varname ):
#print "/////////////////////"
#print "sect<%s> varname<%s>" % (sect,varname)
ret = ""
if ConfigData.has_option( sect, varname ):
ret = ConfigData.get( sect, varname )
print(ret)
if os.path.isdir(ret):
if IsWindows():
ret = win32api.GetShortPathName(ret)
else:
ret = ret
#if False==os.path.isdir(ret):
# print "<localopts.py> Warning: path<%s> <ret %s> does not exist" % (varname,ret)
#print "/////////////////////"
return os.path.normpath(ret)
################################################################
def XCODEDIR():
return GetEnv( "PATHS", "XCODEDIR" )
def VST_INST_DIR():
return GetEnv( "PATHS", "VST_INST_DIR" )
def VST_SDK_DIR():
return GetEnv( "PATHS", "VST_SDK_DIR" )
def AQSISDIR():
return GetEnv( "PATHS", "AQSISDIR" )
def ARCH():
return GetEnv( "CONFIG", "ARCH" )
def CXX():
return GetEnv("CONFIG","CXX")
def STD():
return GetEnv("CONFIG","STD")
################################################################
def dump():
return None
# print "XCODEDIR<%s>" % XCODEDIR()
| mit |
google/glome | python/test/test_vectors.py | 1 | 2619 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module includes test vectors from the protocol reference.
"""
class TestVector:
"""Class that encapsulate needed components for testing.
Consider a use case where an user A sends a message to user B.
Attributes:
kap: A's private key.
ka: A's public key.
kbp: B's private key.
kb: B's public key.
counter: number of messages already shared.
msg: message to share.
sk: shared secret betweens A and B.
tag: tag that matches ka, kb, counter and msg.
"""
def __init__(self, kap: str, ka: str, kbp: str, kb: str, counter: int,
msg: str, sk: str, tag: str):
"""Constructor for TestVector Class."""
self.kap = bytes.fromhex(kap)
self.ka = bytes.fromhex(ka)
self.kbp = bytes.fromhex(kbp)
self.kb = bytes.fromhex(kb)
self.counter = counter
self.msg = msg.encode(encoding="ascii")
self.sk = bytes.fromhex(sk)
self.tag = bytes.fromhex(tag)
TEST1 = TestVector(
kap='77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a',
ka='8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a',
kbp='5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb',
kb='de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f',
counter=0,
msg='The quick brown fox',
sk='4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742',
tag='9c44389f462d35d0672faf73a5e118f8b9f5c340bbe8d340e2b947c205ea4fa3')
TEST2 = TestVector(
kap='b105f00db105f00db105f00db105f00db105f00db105f00db105f00db105f00d',
ka='d1b6941bba120bcd131f335da15778d9c68dadd398ae61cf8e7d94484ee65647',
kbp='fee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1deadfee1dead',
kb='872f435bb8b89d0e3ad62aa2e511074ee195e1c39ef6a88001418be656e3c376',
counter=100,
msg='The quick brown fox',
sk='4b1ee05fcd2ae53ebe4c9ec94915cb057109389a2aa415f26986bddebf379d67',
tag='06476f1f314b06c7f96e5dc62b2308268cbdb6140aefeeb55940731863032277')
| apache-2.0 |
sgerhart/ansible | lib/ansible/utils/module_docs_fragments/url.py | 66 | 2627 | # (c) 2018, John Barker<gundalow@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
force:
description:
- If C(yes) do not get a cached copy.
aliases:
- thirsty
type: bool
default: no
http_agent:
description:
- Header to identify as, generally appears in web server logs.
default: ansible-httpget
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: yes
type: bool
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without I(url_password) for sites that allow empty passwords
url_password:
description:
- The password for use in HTTP basic authentication.
- If the I(url_username) parameter is not specified, the I(url_password) parameter will not be used.
force_basic_auth:
description:
- Credentials specified with I(url_username) and I(url_password) should be passed in HTTP Header.
default: no
type: bool
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
the key is included, C(client_key) is not required.
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL
client authentication. If C(client_cert) contains both the certificate
and key, this option is not required.
"""
| mit |
dxwu/BinderFilter | resources/android-toolchain-16/lib/python2.7/test/test_typechecks.py | 136 | 3166 | """Unit tests for __instancecheck__ and __subclasscheck__."""
import unittest
from test import test_support
class ABC(type):
def __instancecheck__(cls, inst):
"""Implement isinstance(inst, cls)."""
return any(cls.__subclasscheck__(c)
for c in set([type(inst), inst.__class__]))
def __subclasscheck__(cls, sub):
"""Implement issubclass(sub, cls)."""
candidates = cls.__dict__.get("__subclass__", set()) | set([cls])
return any(c in candidates for c in sub.mro())
class Integer:
__metaclass__ = ABC
__subclass__ = set([int])
class SubInt(Integer):
pass
class TypeChecksTest(unittest.TestCase):
def testIsSubclassInternal(self):
self.assertEqual(Integer.__subclasscheck__(int), True)
self.assertEqual(Integer.__subclasscheck__(float), False)
def testIsSubclassBuiltin(self):
self.assertEqual(issubclass(int, Integer), True)
self.assertEqual(issubclass(int, (Integer,)), True)
self.assertEqual(issubclass(float, Integer), False)
self.assertEqual(issubclass(float, (Integer,)), False)
def testIsInstanceBuiltin(self):
self.assertEqual(isinstance(42, Integer), True)
self.assertEqual(isinstance(42, (Integer,)), True)
self.assertEqual(isinstance(3.14, Integer), False)
self.assertEqual(isinstance(3.14, (Integer,)), False)
def testIsInstanceActual(self):
self.assertEqual(isinstance(Integer(), Integer), True)
self.assertEqual(isinstance(Integer(), (Integer,)), True)
def testIsSubclassActual(self):
self.assertEqual(issubclass(Integer, Integer), True)
self.assertEqual(issubclass(Integer, (Integer,)), True)
def testSubclassBehavior(self):
self.assertEqual(issubclass(SubInt, Integer), True)
self.assertEqual(issubclass(SubInt, (Integer,)), True)
self.assertEqual(issubclass(SubInt, SubInt), True)
self.assertEqual(issubclass(SubInt, (SubInt,)), True)
self.assertEqual(issubclass(Integer, SubInt), False)
self.assertEqual(issubclass(Integer, (SubInt,)), False)
self.assertEqual(issubclass(int, SubInt), False)
self.assertEqual(issubclass(int, (SubInt,)), False)
self.assertEqual(isinstance(SubInt(), Integer), True)
self.assertEqual(isinstance(SubInt(), (Integer,)), True)
self.assertEqual(isinstance(SubInt(), SubInt), True)
self.assertEqual(isinstance(SubInt(), (SubInt,)), True)
self.assertEqual(isinstance(42, SubInt), False)
self.assertEqual(isinstance(42, (SubInt,)), False)
def test_oldstyle(self):
# These should just be ignored.
class X:
def __instancecheck__(self, inst):
return True
def __subclasscheck__(self, cls):
return True
class Sub(X): pass
self.assertNotIsInstance(3, X)
self.assertIsInstance(X(), X)
self.assertFalse(issubclass(int, X))
self.assertTrue(issubclass(Sub, X))
def test_main():
test_support.run_unittest(TypeChecksTest)
if __name__ == "__main__":
unittest.main()
| mit |
nelmiux/CarnotKE | jyhton/lib-python/2.7/__future__.py | 146 | 4380 | """Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_UNICODE_LITERALS)
| apache-2.0 |
simondlevy/RealtimePlotter | serialplot.py | 1 | 1736 | #!/usr/bin/env python3
'''
Real-time plot demo using serial input
Copyright (C) 2015 Simon D. Levy
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import serial
from realtime_plot import RealtimePlotter
from threading import Thread
# Change these to suit your needs
PORT = '/dev/ttyACM0'
BAUD = 115200
RANGE = (-1,+1)
class SerialPlotter(RealtimePlotter):
def __init__(self):
RealtimePlotter.__init__(self, [RANGE],
window_name='Serial input',
yticks = [RANGE],
styles = ['b-'])
self.xcurr = 0
self.ycurr = 0
def getValues(self):
return (self.ycurr,)
def _update(port, plotter):
msg = ''
while True:
c = port.read().decode()
if c == '\n':
try:
plotter.ycurr = float(msg)
except:
pass
msg = ''
else:
msg += c
plotter.xcurr += 1
if __name__ == '__main__':
try:
port = serial.Serial(PORT, BAUD)
except serial.SerialException:
print('Unable to access device on port %s' % PORT)
exit(1)
plotter = SerialPlotter()
thread = Thread(target=_update, args = (port, plotter))
thread.daemon = True
thread.start()
plotter.start()
| lgpl-3.0 |
jpodeszwik/linux_screenshoter | linux_screenshoter.py | 1 | 1512 | import sys
from PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QAction
from PyQt5.QtGui import QGuiApplication, QIcon
from paramiko import SSHClient
from scp import SCPClient
import paramiko
from datetime import date
import time
import configparser
class Screenshoter:
def __init__(self):
config = configparser.ConfigParser()
config.read('linux_screenshoter.cfg')
self.ssh_hostname=config.get('ssh', 'hostname')
self.ssh_port=int(config.get('ssh', 'port'))
self.ssh_username=config.get('ssh', 'username')
self.ssh_remote_path=config.get('ssh', 'remote_path')
def handle_click(self, reason):
if reason != QSystemTrayIcon.Trigger:
return
QGuiApplication.primaryScreen().grabWindow(0).save('scr.jpg', 'jpg')
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(hostname=self.ssh_hostname, port=self.ssh_port, username=self.ssh_username)
scp = SCPClient(ssh.get_transport())
dest_name = time.strftime("screenshot_%Y%m%d_%H%M%S.jpg", time.localtime())
scp.put('scr.jpg', self.ssh_remote_path + '/' + dest_name)
app = QApplication(sys.argv)
icon = QIcon('icon.png')
tray = QSystemTrayIcon(icon)
tray.show()
trayMenu = QMenu()
quitAction = QAction("&Quit", trayMenu, triggered=QApplication.instance().quit)
trayMenu.addAction(quitAction)
tray.setContextMenu(trayMenu)
screenshoter = Screenshoter()
tray.activated.connect(screenshoter.handle_click)
app.exec()
| mit |
atodorov/blivet | blivet/devicelibs/edd.py | 1 | 31887 | #
# edd.py
# BIOS EDD data parsing functions
#
# Copyright 2010-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s):
# Peter Jones <pjones@redhat.com>
# Hans de Goede <hdegoede@redhat.com>
# Ales Kozumplik <akozumpl@redhat.com>
#
import logging
import os
import re
import struct
import copy
from .. import util
log = logging.getLogger("blivet")
testdata_log = logging.getLogger("testdata")
testdata_log.setLevel(logging.DEBUG)
re_bios_device_number = re.compile(r'.*/int13_dev(\d+)/*$')
re_host_bus_pci = re.compile(r'^(PCIX|PCI|XPRS|HTPT)\s*(\S*)\s*channel: (\S*)\s*$')
re_interface_atapi = re.compile(r'^ATAPI\s*device: (\S*)\s*lun: (\S*)\s*$')
re_interface_ata = re.compile(r'^ATA\s*device: (\S*)\s*$')
re_interface_scsi = re.compile(r'^SCSI\s*id: (\S*)\s*lun: (\S*)\s*$')
re_interface_usb = re.compile(r'^USB\s*serial_number: (\S*)\s*$')
re_interface_1394 = re.compile(r'^1394\s*eui: (\S*)\s*$')
re_interface_fibre = re.compile(r'^FIBRE\s*wwid: (\S*)\s*lun: (\S*)\s*$')
re_interface_i2o = re.compile(r'^I2O\s*identity_tag: (\S*)\s*$')
# pretty sure the RAID definition using "identity_tag" is basically a kernel
# bug, but it's part of the ABI now, so it sticks. The format of the
# scnprintf() is at least correct.
re_interface_raid = re.compile(r'^RAID\s*identity_tag: (\S*)\s*$')
re_interface_edd3_sata = re.compile(r'^SATA\s*device: (\S*)\s*$')
# EDD 4 features from 2010 and later. Awesomely, the "version" output from
# int 13 AH=41h says: AH Version of extensions. Shall be set to 30h,
# so there's no way to distinguish these from EDD 3, even thuogh SATA does
# differ. In theory, if we're on <4.0, pmp should always be all 0's.
re_interface_edd4_sata = re.compile(r'^SATA\s*device: (\S*)\s*pmp: (\S*)\s*$')
re_interface_sas = re.compile(r'^SAS\s*sas_address: (\S*)\s*lun: \(\S*\)\s*$')
# to make life difficult, when it finds an unknown interface type string,
# the kernel prints the values without the string. But it does print the
# anchoring tab that would go between them...
re_interface_unknown = re.compile(r'^(\S*)\s*unknown: (\S*) (\S*)\s*$')
class EddEntry(object):
""" This object merely collects what the /sys/firmware/edd/* entries can
provide.
"""
def __init__(self, sysfspath, root=None):
self.root = util.Path(root or "", root="")
# some misc data from various files...
self.sysfspath = util.Path(sysfspath, root=self.root)
""" sysfspath is the path we're probing
"""
match = re_bios_device_number.match(sysfspath)
self.bios_device_number = int(match.group(1), base=16)
""" The device number from the EDD path """
self.sysfslink = None
""" The path /sys/block/BLAH is a symlink link to once we've resolved
that this is a particular device. Used for logging later.
"""
self.version = util.get_sysfs_attr(self.sysfspath, "version")
""" The edd version this entry claims conformance with, from
/sys/firmware/edd/int13_devXX/version """
self.mbr_sig = None
""" The MBR signature data from edd/int13_devXX/mbr_signature """
self.sectors = None
""" The number of sectors on the device from edd/int13_devXX/sectors """
# Now the data from edd/int13_devXX/host_bus
self.host_bus = None
""" The ID string for the host bus type, from
edd/int13_devXX/host_bus.
"""
self.pci_dev = None
""" The host bus bus:device.function, from edd/int13_devXX/host_bus.
"""
self.channel = None
""" The host bus device's channel number edd/int13_devXX/host_bus.
The spec says:
Channel number. If more than one interface of the same type is
accessed through a single Bus, Slot, Function, then the channel
number shall identify each interface. If there is only one
interface, the content of this field shall be cleared to zero. If
there are two interfaces, such as an ATA Primary and Secondary
interface, the primary interface shall be zero, and the secondary
interface shall be one.
Values 00h through FEh shall represent a valid Channel Number.
Value FFh shall indicate that this field is not used.
If the device is connected to a SATA controller functioning in
non-PATA emulation mode, this byte shall be FFh.
"""
# And now the various data from different formats of
# edd/int13_devXX/interface .
self.interface = None
""" interface is the actual contents of the interface file,
preserved for logging later.
"""
self.type = None
""" The device type from edd/int13_devXX/interface.
"""
self.atapi_device = None
""" The device number of the ATAPI device from
edd/int13_devXX/interface when self.type is ATAPI.
"""
self.atapi_lun = None
""" The LUN of the ATAPI device from edd/int13_devXX/interface when
self.type is ATAPI.
"""
self.ata_device = None
""" The device number from edd/int13_devXX/interface when self.type
is ATA or SATA (because Linux treats these the same.)
"""
self.ata_pmp = None
""" The ATA port multiplier ID from edd/int13_devXX/interface when
self.type is SATA.
"""
self.scsi_id = None
""" The SCSI device ID from edd/int13_devXX/interface when
self.type is SCSI
"""
self.scsi_lun = None
""" The SCSI device LUN from edd/int13_devXX/interface when
self.type is SCSI
"""
self.usb_serial = None
""" The USB storage device's serial number from
edd/int13_devXX/interface when self.type is USB.
"""
self.ieee1394_eui64 = None
""" The Firewire/IEEE-1394 EUI-64 ID from edd/int13_devXX/interface
when self.type is 1394.
"""
self.fibre_wwid = None
""" The FibreChannel WWID from edd/int13_devXX/interface when
self.type is FIBRE.
"""
self.fibre_lun = None
""" The FibreChannel LUN from edd/int13_devXX/interface when
self.type is FIBRE.
"""
self.i2o_identity = None
""" The I2O Identity from edd/int13_devXX/interface when self.type
is I2O.
"""
self.sas_address = None
""" The SAS Address from edd/int13_devXX/interface when self.type
is SAS.
"""
self.sas_lun = None
""" The SAS LUN from edd/int13_devXX/interface when self.type is SAS.
"""
self.load()
def _fmt(self, line_pad, separator):
s = "%(t)spath: %(sysfspath)s version: %(version)s %(nl)s" \
"%(t)smbr_signature: %(mbr_sig)s sectors: %(sectors)s"
if self.type is not None:
s += " %(type)s"
if self.sysfslink is not None:
s += "%(nl)s%(t)ssysfs pci path: %(sysfslink)s"
if any([self.host_bus, self.pci_dev, self.channel is not None]):
s += "%(nl)s%(t)shost_bus: %(host_bus)s pci_dev: %(pci_dev)s "\
"channel: %(channel)s"
if self.interface is not None:
s += "%(nl)s%(t)sinterface: \"%(interface)s\""
if any([self.atapi_device is not None, self.atapi_lun is not None]):
s += "%(nl)s%(t)satapi_device: %(atapi_device)s " \
"atapi_lun: %(atapi_lun)s"
if self.ata_device is not None:
s += "%(nl)s%(t)sata_device: %(ata_device)s"
if self.ata_pmp is not None:
s += ", ata_pmp: %(ata_pmp)s"
if any([self.scsi_id is not None, self.scsi_lun is not None]):
s += "%(nl)s%(t)sscsi_id: %(scsi_id)s, scsi_lun: %(scsi_lun)s"
if self.usb_serial is not None:
s += "%(nl)s%(t)susb_serial: %(usb_serial)s"
if self.ieee1394_eui64 is not None:
s += "%(nl)s%(t)s1394_eui: %(ieee1394_eui64)s"
if any([self.fibre_wwid, self.fibre_lun]):
s += "%(nl)s%(t)sfibre wwid: %(fibre_wwid)s lun: %s(fibre_lun)s"
if self.i2o_identity is not None:
s += "%(nl)s%(t)si2o_identity: %(i2o_identity)s"
if any([self.sas_address, self.sas_lun]):
s += "%(nl)s%(t)ssas_address: %(sas_address)s sas_lun: %(sas_lun)s"
d = copy.copy(self.__dict__)
d['t'] = line_pad
d['nl'] = separator
return s % d
def __gt__(self, other):
if not isinstance(self, other.__class__) and \
not isinstance(other, self.__class__):
return self.__class__ > other.__class__
ldict = copy.copy(self.__dict__)
rdict = copy.copy(other.__dict__)
del ldict["root"]
del rdict["root"]
return ldict > rdict
def __eq__(self, other):
if not isinstance(self, other.__class__) and \
not isinstance(other, self.__class__):
return self.__class__ == other.__class__
ldict = copy.copy(self.__dict__)
rdict = copy.copy(other.__dict__)
del ldict["root"]
del rdict["root"]
return ldict == rdict
def __lt__(self, other):
if not isinstance(self, other.__class__) and \
not isinstance(other, self.__class__):
return self.__class__ < other.__class__
ldict = copy.copy(self.__dict__)
rdict = copy.copy(other.__dict__)
del ldict["root"]
del rdict["root"]
return ldict < rdict
def __str__(self):
return self._fmt('\t', '\n')
def __repr__(self):
return "<EddEntry%s>" % (self._fmt(' ', ''),)
def load(self):
interface = util.get_sysfs_attr(self.sysfspath, "interface")
# save this so we can log it from the matcher.
self.interface = interface
if interface:
try:
self.type = interface.split()[0]
if self.type == "ATAPI":
match = re_interface_atapi.match(interface)
self.atapi_device = int(match.group(1))
self.atapi_lun = int(match.group(2))
elif self.type == "ATA":
match = re_interface_ata.match(interface)
self.ata_device = int(match.group(1))
elif self.type == "SCSI":
match = re_interface_scsi.match(interface)
self.scsi_id = int(match.group(1))
self.scsi_lun = int(match.group(2))
elif self.type == "USB":
match = re_interface_usb.match(interface)
self.usb_serial = int(match.group(1), base=16)
elif self.type == "1394":
match = re_interface_1394.match(interface)
self.ieee1394_eui64 = int(match.group(1), base=16)
elif self.type == "FIBRE":
match = re_interface_fibre.match(interface)
self.fibre_wwid = int(match.group(1), base=16)
self.fibre_lun = int(match.group(2), base=16)
elif self.type == "I2O":
match = re_interface_i2o.match(interface)
self.i2o_identity = int(match.group(1), base=16)
elif self.type == "RAID":
match = re_interface_raid.match(interface)
self.raid_array = int(match.group(1), base=16)
elif self.type == "SATA":
match = re_interface_edd4_sata.match(interface)
if match:
self.ata_device = int(match.group(1))
self.ata_pmp = int(match.group(2))
else:
match = re_interface_edd3_sata.match(interface)
self.ata_device = int(match.group(1))
elif self.type == "SAS":
sas_match = re_interface_sas.match(interface)
unknown_match = re_interface_unknown.match(interface)
if sas_match:
self.sas_address = int(sas_match.group(1), base=16)
self.sas_lun = int(sas_match.group(2), base=16)
elif unknown_match:
self.sas_address = int(unknown_match.group(1), base=16)
self.sas_lun = int(unknown_match.group(2), base=16)
else:
log.warning("edd: can not match interface for %s: %s",
self.sysfspath, interface)
else:
log.warning("edd: can not match interface for %s: %s",
self.sysfspath, interface)
except AttributeError as e:
if e.args == "'NoneType' object has no attribute 'group'":
log.warning("edd: can not match interface for %s: %s",
self.sysfspath, interface)
else:
raise e
self.mbr_sig = util.get_sysfs_attr(self.sysfspath, "mbr_signature")
sectors = util.get_sysfs_attr(self.sysfspath, "sectors")
if sectors:
self.sectors = int(sectors)
hbus = util.get_sysfs_attr(self.sysfspath, "host_bus")
if hbus:
match = re_host_bus_pci.match(hbus)
if match:
self.host_bus = match.group(1)
self.pci_dev = match.group(2)
self.channel = int(match.group(3))
else:
log.warning("edd: can not match host_bus for %s: %s",
self.sysfspath, hbus)
class EddMatcher(object):
""" This object tries to match given entry to a disk device name.
Assuming, heuristic analysis and guessing hapens here.
"""
def __init__(self, edd_entry, root=None):
self.edd = edd_entry
self.root = root
def devname_from_ata_pci_dev(self):
pattern = util.Path('/sys/block/*', root=self.root)
retries = []
def match_port(components, ata_port, ata_port_idx, path, link):
fn = util.Path(util.join_paths(components[0:6] +
['ata_port', ata_port]), root=self.root)
port_no = int(util.get_sysfs_attr(fn, 'port_no'))
if self.edd.type == "ATA":
# On ATA, port_no is kernel's ata_port->local_port_no, which
# should be the same as the ata device number.
if port_no != self.edd.ata_device:
return
else:
# On SATA, "port_no" is the kernel's ata_port->print_id, which
# is awesomely ata_port->id + 1, where ata_port->id is edd's
# ata_device
if port_no != self.edd.ata_device + 1:
return
fn = components[0:6] + ['link%d' % (ata_port_idx,), ]
exp = [r'.*'] + fn + [r'dev%d\.(\d+)(\.(\d+)){0,1}$' % (ata_port_idx,)]
exp = util.join_paths(exp)
expmatcher = re.compile(exp)
pmp = util.join_paths(fn + ['dev%d.*.*' % (ata_port_idx,)])
pmp = util.Path(pmp, root=self.root)
dev = util.join_paths(fn + ['dev%d.*' % (ata_port_idx,)])
dev = util.Path(dev, root=self.root)
for ataglob in [pmp, dev]:
for atapath in ataglob.glob():
match = expmatcher.match(atapath.ondisk)
if match is None:
continue
# so at this point it's devW.X.Y or devW.Z as such:
# dev_set_name(dev, "dev%d.%d",
# ap->print_id,ata_dev->devno); dev_set_name(dev,
# "dev%d.%d.0", ap->print_id, link->pmp); we care about
# print_id and pmp for matching and the ATA channel if
# applicable. We already checked print_id above.
if match.group(3) is None:
channel = int(match.group(1))
if (self.edd.channel == 255 and channel == 0) or \
(self.edd.channel == channel):
yield ({'link': util.Path(link, root=self.root),
'path': path.split('/')[-1]})
else:
pmp = int(match.group(1))
if self.edd.ata_pmp == pmp:
yield ({'link': util.Path(link, root=self.root),
'path': path.split('/')[-1]})
answers = []
for path in pattern.glob():
emptyslash = util.Path("/", self.root)
path = util.Path(path, self.root)
link = util.sysfs_readlink(path=emptyslash, link=path)
testdata_log.debug("sysfs link: \"%s\" -> \"%s\"", path, link)
# just add /sys/block/ at the beginning so it's always valid
# paths in the filesystem...
components = ['/sys/block'] + link.split('/')
if len(components) != 11:
continue
# ATA and SATA paths look like:
# ../devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda
# where literally the only pieces of data here are
# "pci0000:00:1f.2", "ata1", and "sda".
#
# EDD 3's "channel" doesn't really mean anything at all on SATA,
# and 255 means "not in use". Any other value should be an ATA
# device (but might be a SATA device in compat mode), and that
# matches N in devM.N . So basically "channel" means master/slave
# for ATA (non-SATA) devices. Also in EDD 3, SATA port multipliers
# aren't represented in any way.
#
# In EDD 4, which unfortunately says to leave 0x30 as the version
# number, the port multiplier id is an additional field on the
# interface. So basically we should try to use the value the
# kernel gives us*, but we can't trust it. Thankfully there
# won't be a devX.Y.Z (i.e. a port multiplier device) in sysfs
# that collides with devX.Z (a non-port-multiplied device),
# so if we get a value from the kernel, we can try with and
# without it.
#
# * When the kernel finally learns of these facts...
#
if components[4] != '0000:%s' % (self.edd.pci_dev,):
continue
if not components[5].startswith('ata'):
continue
ata_port = components[5]
ata_port_idx = int(components[5][3:])
# strictly this should always be required, but #!@#!@#!@ seabios
# iterates the sata device number /independently/ of the host
# bridge it claims things are attached to. In that case this
# the scsi target will always have "0" as the ID component.
args = {'device': self.edd.ata_device}
exp = r"target\d+:0:%(device)s/\d+:0:%(device)s:0/block/.*" % args
matcher = re.compile(exp)
match = matcher.match("/".join(components[7:]))
if not match:
retries.append({
'components': components,
'ata_port': ata_port,
'ata_port_idx': ata_port_idx,
'path': path,
'link': link,
})
continue
for port in match_port(components, ata_port, ata_port_idx, path,
link):
answers.append(port)
# now handle the ones we discarded because libata's scsi id doesn't
# match the ata_device.
for retry in retries:
for port in match_port(**retry):
if answers:
log.warning("edd: ignoring possible extra match for ATA device %s channel %s ata %d pmp %s: %s",
self.edd.pci_dev, self.edd.channel,
self.edd.ata_device, self.edd.ata_pmp,
retry['path'])
else:
log.warning("edd: using possible extra match for ATA device %s channel %s ata %d pmp %s: %s",
self.edd.pci_dev, self.edd.channel,
self.edd.ata_device, self.edd.ata_pmp,
retry['path'])
answers.append(port)
if len(answers) > 1:
log.error("edd: Found too many ATA devices for EDD device 0x%x: %s",
self.edd.bios_device_number,
[a['link'] for a in answers])
if len(answers) > 0:
self.edd.sysfslink = answers[0]['link']
return answers[0]['path']
else:
log.warning(
"edd: Could not find ATA device for pci dev %s channel %s ata %d pmp %s",
self.edd.pci_dev, self.edd.channel,
self.edd.ata_device, self.edd.ata_pmp)
return None
def devname_from_virtio_scsi_pci_dev(self):
if self.edd.scsi_id is None or self.edd.scsi_lun is None:
return None
# Virtio SCSI looks like scsi but with a virtio%d/ stuck in the middle
# channel appears to be a total lie on VirtIO SCSI devices.
tmpl = "../devices/pci0000:00/0000:%(pci_dev)s/virtio*/" \
"host*/target*:0:%(dev)d/*:0:%(dev)d:%(lun)d/block/"
args = {
'pci_dev': self.edd.pci_dev,
'dev': self.edd.scsi_id,
'lun': self.edd.scsi_lun,
}
pattern = util.Path(tmpl % args, self.root + "/sys/block/")
answers = []
for mp in pattern.glob():
# Normal VirtIO devices just have the block link right there...
block_entries = os.listdir(mp.ondisk)
for be in block_entries:
link = mp + be
answers.append({'link': link, 'path': be})
if len(answers) > 1:
log.error("Found too many VirtIO SCSI devices for EDD device 0x%x: %s",
self.edd.bios_device_number,
[a['link'] for a in answers])
if len(answers) > 0:
self.edd.sysfslink = answers[0]['link']
return answers[0]['path']
else:
log.info("edd: Could not find VirtIO SCSI device for pci dev %s "
"channel %s scsi id %s lun %s", self.edd.pci_dev,
self.edd.channel, self.edd.scsi_id, self.edd.scsi_lun)
def devname_from_scsi_pci_dev(self):
tmpl = "../devices/pci0000:00/0000:%(pci_dev)s/" \
"host%(chan)d/target%(chan)d:0:%(dev)d/" \
"%(chan)d:0:%(dev)d:%(lun)d/block/"
args = {
'pci_dev': self.edd.pci_dev,
'chan': self.edd.channel,
'dev': self.edd.scsi_id,
'lun': self.edd.scsi_lun,
}
pattern = util.Path(tmpl % args, root=self.root + "/sys/block/")
answers = []
for mp in pattern.glob():
# Normal VirtIO devices just have the block link right there...
block_entries = os.listdir(mp.ondisk)
for be in block_entries:
link = mp + be
answers.append({'link': link, 'path': be})
if len(answers) > 1:
log.error("Found too many SCSI devices for EDD device 0x%x: %s",
self.edd.bios_device_number,
[a['link'] for a in answers])
if len(answers) > 0:
self.edd.sysfslink = answers[0]['link']
return answers[0]['path']
else:
log.warning("edd: Could not find SCSI device for pci dev %s "
"channel %s scsi id %s lun %s", self.edd.pci_dev,
self.edd.channel, self.edd.scsi_id, self.edd.scsi_lun)
return None
def devname_from_virt_pci_dev(self):
pattern = util.Path("../devices/pci0000:00/0000:%s/virtio*/block/" %
(self.edd.pci_dev,), root=self.root + "/sys/block/")
answers = []
for mp in pattern.glob():
# Normal VirtIO devices just have the block link right there...
block_entries = os.listdir(mp.ondisk)
for be in block_entries:
link = mp + be
answers.append({'link': link, 'path': be})
if len(answers) > 1:
log.error("Found too many VirtIO devices for EDD device 0x%x: %s",
self.edd.bios_device_number,
[a['link'] for a in answers])
if len(answers) > 0:
self.edd.sysfslink = answers[0]['link']
return answers[0]['path']
else:
log.info(
"edd: Could not find Virtio device for pci dev %s channel %s",
self.edd.pci_dev, self.edd.channel)
return None
def devname_from_pci_dev(self):
if self.edd.pci_dev is None:
return None
name = self.devname_from_virt_pci_dev()
if name is not None:
return name
name = self.devname_from_virtio_scsi_pci_dev()
if name is not None:
return name
unsupported = ("ATAPI", "USB", "1394", "I2O", "RAID", "FIBRE", "SAS")
if self.edd.type in unsupported:
log.warning("edd: interface type %s is not implemented (%s)",
self.edd.type, self.edd.sysfspath)
log.warning("edd: interface details: %s", self.edd.interface)
if self.edd.type in ("ATA", "SATA") and \
self.edd.ata_device is not None:
name = self.devname_from_ata_pci_dev()
elif self.edd.type == "SCSI":
name = self.devname_from_scsi_pci_dev()
if self.edd.sysfslink:
path = util.Path("/sys/block/", root=self.root) \
+ self.edd.sysfslink \
+ "/device"
link = os.readlink(path.ondisk)
testdata_log.debug("sysfs link: \"%s\" -> \"%s\"", path, link)
return name
def match_via_mbrsigs(self, mbr_dict):
""" Try to match the edd entry based on its mbr signature.
This will obviously fail for a fresh drive/image, but in extreme
cases can also show false positives for randomly matching data.
"""
sysblock = util.Path("/sys/block/", root=self.root)
for (name, mbr_sig) in mbr_dict.items():
if mbr_sig == self.edd.mbr_sig:
self.edd.sysfslink = util.sysfs_readlink(sysblock, link=name)
return name
return None
def collect_edd_data(root=None):
edd_data_dict = {}
globstr = util.Path("/sys/firmware/edd/int13_dev*/", root=root)
for path in globstr.glob():
match = re_bios_device_number.match(path)
biosdev = int("0x%s" % (match.group(1),), base=16)
log.debug("edd: found device 0x%x at %s", biosdev, path)
edd_data_dict[biosdev] = EddEntry(path, root=root)
return edd_data_dict
def collect_mbrs(devices, root=None):
""" Read MBR signatures from devices.
Returns a dict mapping device names to their MBR signatures. It is not
guaranteed this will succeed, with a new disk for instance.
"""
mbr_dict = {}
for dev in devices:
try:
path = util.Path("/dev", root=root) + dev.name
fd = os.open(path.ondisk, os.O_RDONLY)
# The signature is the unsigned integer at byte 440:
os.lseek(fd, 440, 0)
data = os.read(fd, 4)
mbrsig = struct.unpack('I', data)
sdata = struct.unpack("BBBB", data)
sdata = "".join(["%02x" % (x,) for x in sdata])
os.close(fd)
testdata_log.debug("device %s data[440:443] = %s", path, sdata)
except OSError as e:
testdata_log.debug("device %s data[440:443] raised %s", path, e)
log.error("edd: could not read mbrsig from disk %s: %s",
dev.name, str(e))
continue
mbrsig_str = "0x%08x" % mbrsig
# sanity check
if mbrsig_str == '0x00000000':
log.info("edd: MBR signature on %s is zero. new disk image?",
dev.name)
continue
else:
for (dev_name, mbrsig_str_old) in mbr_dict.items():
if mbrsig_str_old == mbrsig_str:
log.error("edd: dupicite MBR signature %s for %s and %s",
mbrsig_str, dev_name, dev.name)
# this actually makes all the other data useless
return {}
# update the dictionary
mbr_dict[dev.name] = mbrsig_str
log.info("edd: collected mbr signatures: %s", mbr_dict)
return mbr_dict
def get_edd_dict(devices, root=None):
""" Generates the 'device name' -> 'edd number' mapping.
The EDD kernel module that exposes /sys/firmware/edd is thoroughly
broken, the information there is incomplete and sometimes downright
wrong. So after we mine out all useful information that the files under
/sys/firmware/edd/int13_*/ can provide, we resort to heuristics and
guessing. Our first attempt is, by looking at the device type int
'interface', attempting to map pci device number, channel number etc. to
a sysfs path, check that the path really exists, then read the device
name (e.g 'sda') from there. Should this fail we try to match contents
of 'mbr_signature' to a real MBR signature found on the existing block
devices.
"""
mbr_dict = collect_mbrs(devices, root=root)
edd_entries_dict = collect_edd_data(root=root)
edd_dict = {}
for (edd_number, edd_entry) in edd_entries_dict.items():
matcher = EddMatcher(edd_entry, root=root)
# first try to match through the pci dev etc.
name = matcher.devname_from_pci_dev()
log.debug("edd: data extracted from 0x%x:%r", edd_number, edd_entry)
if name:
log.info("edd: matched 0x%x to %s using PCI dev", edd_number, name)
# next try to compare mbr signatures
else:
name = matcher.match_via_mbrsigs(mbr_dict)
if name:
log.info("edd: matched 0x%x to %s using MBR sig", edd_number, name)
if name:
old_edd_number = edd_dict.get(name)
if old_edd_number:
log.info("edd: both edd entries 0x%x and 0x%x seem to map to %s",
old_edd_number, edd_number, name)
# this means all the other data can be confused and useless
return {}
edd_dict[name] = edd_number
else:
log.error("edd: unable to match edd entry 0x%x", edd_number)
return edd_dict
| gpl-2.0 |
wzbozon/statsmodels | statsmodels/stats/tabledist.py | 34 | 11643 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 01 20:20:16 2011
Author: Josef Perktold
License: BSD-3
TODO:
check orientation, size and alpha should be increasing for interp1d,
but what is alpha? can be either sf or cdf probability
change it to use one consistent notation
check: instead of bound checking I could use the fill-value of the interpolators
"""
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy.interpolate import interp1d, interp2d, Rbf
from statsmodels.tools.decorators import cache_readonly
class TableDist(object):
'''Distribution, critical values and p-values from tables
currently only 1 extra parameter, e.g. sample size
Parameters
----------
alpha : array_like, 1d
probabiliy in the table, could be either sf (right tail) or cdf (left
tail)
size : array_like, 1d
second paramater in the table
crit_table : array_like, 2d
array with critical values for sample size in rows and probability in
columns
Notes
-----
size and alpha should be increasing
'''
def __init__(self, alpha, size, crit_table):
self.alpha = np.asarray(alpha)
self.size = np.asarray(size)
self.crit_table = np.asarray(crit_table)
self.n_alpha = len(alpha)
self.signcrit = np.sign(np.diff(self.crit_table, 1).mean())
if self.signcrit > 0: #increasing
self.critv_bounds = self.crit_table[:,[0,1]]
else:
self.critv_bounds = self.crit_table[:,[1,0]]
@cache_readonly
def polyn(self):
polyn = [interp1d(self.size, self.crit_table[:,i])
for i in range(self.n_alpha)]
return polyn
@cache_readonly
def poly2d(self):
#check for monotonicity ?
#fix this, interp needs increasing
poly2d = interp2d(self.size, self.alpha, self.crit_table)
return poly2d
@cache_readonly
def polyrbf(self):
xs, xa = np.meshgrid(self.size.astype(float), self.alpha)
polyrbf = Rbf(xs.ravel(), xa.ravel(), self.crit_table.T.ravel(),function='linear')
return polyrbf
def _critvals(self, n):
'''rows of the table, linearly interpolated for given sample size
Parameters
----------
n : float
sample size, second parameter of the table
Returns
-------
critv : ndarray, 1d
critical values (ppf) corresponding to a row of the table
Notes
-----
This is used in two step interpolation, or if we want to know the
critical values for all alphas for any sample size that we can obtain
through interpolation
'''
return np.array([p(n) for p in self.polyn])
def prob(self, x, n):
'''find pvalues by interpolation, eiter cdf(x) or sf(x)
returns extrem probabilities, 0.001 and 0.2, for out of range
Parameters
----------
x : array_like
observed value, assumed to follow the distribution in the table
n : float
sample size, second parameter of the table
Returns
-------
prob : arraylike
This is the probability for each value of x, the p-value in
underlying distribution is for a statistical test.
'''
critv = self._critvals(n)
alpha = self.alpha
# if self.signcrit == 1:
# if x < critv[0]: #generalize: ? np.sign(x - critvals[0]) == self.signcrit:
# return alpha[0]
# elif x > critv[-1]:
# return alpha[-1]
# elif self.signcrit == -1:
# if x > critv[0]:
# return alpha[0]
# elif x < critv[-1]:
# return alpha[-1]
if self.signcrit < 1:
#reverse if critv is decreasing
critv, alpha = critv[::-1], alpha[::-1]
#now critv is increasing
if np.size(x) == 1:
if x < critv[0]:
return alpha[0]
elif x > critv[-1]:
return alpha[-1]
return interp1d(critv, alpha)(x)[()]
else:
#vectorized
cond_low = (x < critv[0])
cond_high = (x > critv[-1])
cond_interior = ~np.logical_or(cond_low, cond_high)
probs = np.nan * np.ones(x.shape) #mistake if nan left
probs[cond_low] = alpha[0]
probs[cond_low] = alpha[-1]
probs[cond_interior] = interp1d(critv, alpha)(x[cond_interior])
return probs
def crit2(self, prob, n):
'''returns interpolated quantiles, similar to ppf or isf
this can be either cdf or sf depending on the table, twosided?
this doesn't work, no more knots warning
'''
return self.poly2d(n, prob)
def crit(self, prob, n):
'''returns interpolated quantiles, similar to ppf or isf
use two sequential 1d interpolation, first by n then by prob
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob
'''
prob = np.asarray(prob)
alpha = self.alpha
critv = self._critvals(n)
#vectorized
cond_ilow = (prob > alpha[0])
cond_ihigh = (prob < alpha[-1])
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
#scalar
if prob.size == 1:
if cond_interior:
return interp1d(alpha, critv)(prob)
else:
return np.nan
#vectorized
quantile = np.nan * np.ones(prob.shape) #nans for outside
quantile[cond_interior] = interp1d(alpha, critv)(prob[cond_interior])
return quantile
def crit3(self, prob, n):
'''returns interpolated quantiles, similar to ppf or isf
uses Rbf to interpolate critical values as function of `prob` and `n`
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob, returns nan for arguments
that are outside of the table bounds
'''
prob = np.asarray(prob)
alpha = self.alpha
#vectorized
cond_ilow = (prob > alpha[0])
cond_ihigh = (prob < alpha[-1])
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
#scalar
if prob.size == 1:
if cond_interior:
return self.polyrbf(n, prob)
else:
return np.nan
#vectorized
quantile = np.nan * np.ones(prob.shape) #nans for outside
quantile[cond_interior] = self.polyrbf(n, prob[cond_interior])
return quantile
if __name__ == '__main__':
'''
example Lilliefors test for normality
An Analytic Approximation to the Distribution of Lilliefors's Test Statistic for Normality
Author(s): Gerard E. Dallal and Leland WilkinsonSource: The American Statistician, Vol. 40, No. 4 (Nov., 1986), pp. 294-296Published by: American Statistical AssociationStable URL: http://www.jstor.org/stable/2684607 .
'''
#for this test alpha is sf probability, i.e. right tail probability
alpha = np.array([ 0.2 , 0.15 , 0.1 , 0.05 , 0.01 , 0.001])[::-1]
size = np.array([ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 25, 30, 40, 100, 400, 900], float)
#critical values, rows are by sample size, columns are by alpha
crit_lf = np.array( [[303, 321, 346, 376, 413, 433],
[289, 303, 319, 343, 397, 439],
[269, 281, 297, 323, 371, 424],
[252, 264, 280, 304, 351, 402],
[239, 250, 265, 288, 333, 384],
[227, 238, 252, 274, 317, 365],
[217, 228, 241, 262, 304, 352],
[208, 218, 231, 251, 291, 338],
[200, 210, 222, 242, 281, 325],
[193, 202, 215, 234, 271, 314],
[187, 196, 208, 226, 262, 305],
[181, 190, 201, 219, 254, 296],
[176, 184, 195, 213, 247, 287],
[171, 179, 190, 207, 240, 279],
[167, 175, 185, 202, 234, 273],
[163, 170, 181, 197, 228, 266],
[159, 166, 176, 192, 223, 260],
[143, 150, 159, 173, 201, 236],
[131, 138, 146, 159, 185, 217],
[115, 120, 128, 139, 162, 189],
[ 74, 77, 82, 89, 104, 122],
[ 37, 39, 41, 45, 52, 61],
[ 25, 26, 28, 30, 35, 42]])[:,::-1] / 1000.
lf = TableDist(alpha, size, crit_lf)
print(lf.prob(0.166, 20), 'should be:', 0.15)
print('')
print(lf.crit2(0.15, 20), 'should be:', 0.166, 'interp2d bad')
print(lf.crit(0.15, 20), 'should be:', 0.166, 'two 1d')
print(lf.crit3(0.15, 20), 'should be:', 0.166, 'Rbf')
print('')
print(lf.crit2(0.17, 20), 'should be in:', (.159, .166), 'interp2d bad')
print(lf.crit(0.17, 20), 'should be in:', (.159, .166), 'two 1d')
print(lf.crit3(0.17, 20), 'should be in:', (.159, .166), 'Rbf')
print('')
print(lf.crit2(0.19, 20), 'should be in:', (.159, .166), 'interp2d bad')
print(lf.crit(0.19, 20), 'should be in:', (.159, .166), 'two 1d')
print(lf.crit3(0.19, 20), 'should be in:', (.159, .166), 'Rbf')
print('')
print(lf.crit2(0.199, 20), 'should be in:', (.159, .166), 'interp2d bad')
print(lf.crit(0.199, 20), 'should be in:', (.159, .166), 'two 1d')
print(lf.crit3(0.199, 20), 'should be in:', (.159, .166), 'Rbf')
#testing
print(np.max(np.abs(np.array([lf.prob(c, size[i]) for i in range(len(size)) for c in crit_lf[i]]).reshape(-1,lf.n_alpha) - lf.alpha)))
#1.6653345369377348e-16
print(np.max(np.abs(np.array([lf.crit(c, size[i]) for i in range(len(size)) for c in lf.alpha]).reshape(-1,lf.n_alpha) - crit_lf)))
#6.9388939039072284e-18)
print(np.max(np.abs(np.array([lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha]).reshape(-1,lf.n_alpha) - crit_lf)))
#4.0615705243496336e-12)
print((np.array([lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha[:-1]*1.1]).reshape(-1,lf.n_alpha-1) < crit_lf[:,:-1]).all())
print((np.array([lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha[:-1]*1.1]).reshape(-1,lf.n_alpha-1) > crit_lf[:,1:]).all())
print((np.array([lf.prob(c*0.9, size[i]) for i in range(len(size)) for c in crit_lf[i,:-1]]).reshape(-1,lf.n_alpha-1) > lf.alpha[:-1]).all())
print((np.array([lf.prob(c*1.1, size[i]) for i in range(len(size)) for c in crit_lf[i,1:]]).reshape(-1,lf.n_alpha-1) < lf.alpha[1:]).all())
#start at size_idx=2 because of non-monotonicity of lf_crit
print((np.array([lf.prob(c, size[i]*0.9) for i in range(2,len(size)) for c in crit_lf[i,:-1]]).reshape(-1,lf.n_alpha-1) > lf.alpha[:-1]).all())
| bsd-3-clause |
hshindo/POS-Tagging-benchmark | Theano/lstm.py | 1 | 3354 | __author__ = 'hiroki'
import numpy as np
import theano
import theano.tensor as T
from nn_utils import sigmoid, tanh, sample_weights
class LSTM(object):
def __init__(self,
w,
d,
n_layer,
vocab_size,
n_in,
n_hidden = 50,
n_i = 50,
n_c = 50,
n_o = 50,
n_f = 50,
n_y = 45,
activation=tanh):
self.w = w
self.d = d
self.activation = activation
"""embeddings"""
self.emb = theano.shared(sample_weights(vocab_size, n_in))
"""input gate parameters"""
self.W_xi = theano.shared(sample_weights(n_in, n_i))
self.W_hi = theano.shared(sample_weights(n_hidden, n_i))
"""forget gate parameters"""
self.W_xf = theano.shared(sample_weights(n_in, n_f))
self.W_hf = theano.shared(sample_weights(n_hidden, n_f))
"""cell parameters"""
self.W_xc = theano.shared(sample_weights(n_in, n_c))
self.W_hc = theano.shared(sample_weights(n_hidden, n_c))
"""output gate parameters"""
self.W_xo = theano.shared(sample_weights(n_in, n_o))
self.W_ho = theano.shared(sample_weights(n_hidden, n_o))
"""output parameters"""
self.W_hy = theano.shared(sample_weights(n_hidden, n_y))
self.c0 = theano.shared(np.zeros(n_hidden, dtype=theano.config.floatX))
self.h0 = self.activation(self.c0)
self.params = [self.W_xi, self.W_hi, self.W_xf, self.W_hf, self.W_xc,
self.W_hc, self.W_xo, self.W_ho, self.W_hy, self.c0]
self.x = self.emb[self.w]
self.layer_output = self.layers(n_layers=n_layer)
self.y, _ = theano.scan(fn=self.output_forward,
sequences=self.layer_output[-1],
outputs_info=[None])
self.y = self.y[::-1]
self.p_y_given_x = self.y.reshape((self.y.shape[0], self.y.shape[2]))
self.nll = -T.mean(T.log(self.p_y_given_x)[T.arange(d.shape[0]), d])
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.errors = T.neq(self.y_pred, d)
def layers(self, n_layers=2):
layer_output = []
for i in xrange(n_layers):
if i == 0:
layer_input = self.x
else:
layer_input = layer_output[-1][::-1]
[h, c], _ = theano.scan(fn=self.forward,
sequences=layer_input,
outputs_info=[self.h0, self.c0])
layer_output.append(h)
return layer_output
def forward(self, x_t, h_tm1, c_tm1):
'''
sequences: x_t
prior results: h_tm1, c_tm1
'''
i_t = sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + c_tm1)
f_t = sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + c_tm1)
c_t = f_t * c_tm1 + i_t * self.activation(T.dot(x_t, self.W_xc) + T.dot(h_tm1, self.W_hc))
o_t = sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + c_t)
h_t = o_t * self.activation(c_t)
return h_t, c_t
def output_forward(self, h_t):
y_t = T.nnet.softmax(T.dot(h_t, self.W_hy))
return y_t
| mit |
t-woerner/firewalld | src/firewall/core/io/zone.py | 1 | 39512 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2016 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [ "Zone", "zone_reader", "zone_writer" ]
import xml.sax as sax
import os
import io
import shutil
from firewall import config
from firewall.functions import checkIP, checkIP6, checkIPnMask, checkIP6nMask, checkInterface, uniqify, max_zone_name_len, u2b_if_py2, check_mac, portStr
from firewall.core.base import DEFAULT_ZONE_TARGET, ZONE_TARGETS
from firewall.core.io.io_object import PY2, IO_Object, \
IO_Object_ContentHandler, IO_Object_XMLGenerator, check_port, \
check_tcpudp, check_protocol
from firewall.core import rich
from firewall.core.logger import log
from firewall import errors
from firewall.errors import FirewallError
class Zone(IO_Object):
""" Zone class """
IMPORT_EXPORT_STRUCTURE = (
( "version", "" ), # s
( "short", "" ), # s
( "description", "" ), # s
( "UNUSED", False ), # b
( "target", "" ), # s
( "services", [ "", ], ), # as
( "ports", [ ( "", "" ), ], ), # a(ss)
( "icmp_blocks", [ "", ], ), # as
( "masquerade", False ), # b
( "forward_ports", [ ( "", "", "", "" ), ], ), # a(ssss)
( "interfaces", [ "" ] ), # as
( "sources", [ "" ] ), # as
( "rules_str", [ "" ] ), # as
( "protocols", [ "", ], ), # as
( "source_ports", [ ( "", "" ), ], ), # a(ss)
( "icmp_block_inversion", False ), # b
)
DBUS_SIGNATURE = '(sssbsasa(ss)asba(ssss)asasasasa(ss)b)'
ADDITIONAL_ALNUM_CHARS = [ "_", "-", "/" ]
PARSER_REQUIRED_ELEMENT_ATTRS = {
"short": None,
"description": None,
"zone": None,
"service": [ "name" ],
"port": [ "port", "protocol" ],
"icmp-block": [ "name" ],
"icmp-type": [ "name" ],
"forward-port": [ "port", "protocol" ],
"interface": [ "name" ],
"rule": None,
"source": None,
"destination": [ "address" ],
"protocol": [ "value" ],
"source-port": [ "port", "protocol" ],
"log": None,
"audit": None,
"accept": None,
"reject": None,
"drop": None,
"mark": [ "set" ],
"limit": [ "value" ],
"icmp-block-inversion": None,
}
PARSER_OPTIONAL_ELEMENT_ATTRS = {
"zone": [ "name", "immutable", "target", "version" ],
"masquerade": [ "enabled" ],
"forward-port": [ "to-port", "to-addr" ],
"rule": [ "family" ],
"source": [ "address", "mac", "invert", "family", "ipset" ],
"destination": [ "invert" ],
"log": [ "prefix", "level" ],
"reject": [ "type" ],
}
@staticmethod
def index_of(element):
for i, (el, dummy) in enumerate(Zone.IMPORT_EXPORT_STRUCTURE):
if el == element:
return i
raise FirewallError(errors.UNKNOWN_ERROR, "index_of()")
def __init__(self):
super(Zone, self).__init__()
self.version = ""
self.short = ""
self.description = ""
self.UNUSED = False
self.target = DEFAULT_ZONE_TARGET
self.services = [ ]
self.ports = [ ]
self.protocols = [ ]
self.icmp_blocks = [ ]
self.masquerade = False
self.forward_ports = [ ]
self.source_ports = [ ]
self.interfaces = [ ]
self.sources = [ ]
self.fw_config = None # to be able to check services and a icmp_blocks
self.rules = [ ]
self.icmp_block_inversion = False
self.combined = False
self.applied = False
def cleanup(self):
self.version = ""
self.short = ""
self.description = ""
self.UNUSED = False
self.target = DEFAULT_ZONE_TARGET
del self.services[:]
del self.ports[:]
del self.protocols[:]
del self.icmp_blocks[:]
self.masquerade = False
del self.forward_ports[:]
del self.source_ports[:]
del self.interfaces[:]
del self.sources[:]
self.fw_config = None # to be able to check services and a icmp_blocks
del self.rules[:]
self.icmp_block_inversion = False
self.combined = False
self.applied = False
def encode_strings(self):
""" HACK. I haven't been able to make sax parser return
strings encoded (because of python 2) instead of in unicode.
Get rid of it once we throw out python 2 support."""
self.version = u2b_if_py2(self.version)
self.short = u2b_if_py2(self.short)
self.description = u2b_if_py2(self.description)
self.target = u2b_if_py2(self.target)
self.services = [u2b_if_py2(s) for s in self.services]
self.ports = [(u2b_if_py2(po),u2b_if_py2(pr)) for (po,pr) in self.ports]
self.protocols = [u2b_if_py2(pr) for pr in self.protocols]
self.icmp_blocks = [u2b_if_py2(i) for i in self.icmp_blocks]
self.forward_ports = [(u2b_if_py2(p1),u2b_if_py2(p2),u2b_if_py2(p3),u2b_if_py2(p4)) for (p1,p2,p3,p4) in self.forward_ports]
self.source_ports = [(u2b_if_py2(po),u2b_if_py2(pr)) for (po,pr)
in self.source_ports]
self.interfaces = [u2b_if_py2(i) for i in self.interfaces]
self.sources = [u2b_if_py2(s) for s in self.sources]
self.rules = [u2b_if_py2(s) for s in self.rules]
def __getattr__(self, name):
if name == "rules_str":
rules_str = [str(rule) for rule in self.rules]
return rules_str
else:
return getattr(super(Zone, self), name)
def __setattr__(self, name, value):
if name == "rules_str":
self.rules = [rich.Rich_Rule(rule_str=s) for s in value]
else:
super(Zone, self).__setattr__(name, value)
def _check_config(self, config, item):
if item == "services" and self.fw_config:
existing_services = self.fw_config.get_services()
for service in config:
if service not in existing_services:
raise FirewallError(errors.INVALID_SERVICE,
"'%s' not among existing services" % \
service)
elif item == "ports":
for port in config:
check_port(port[0])
check_tcpudp(port[1])
elif item == "protocols":
for proto in config:
check_protocol(proto)
elif item == "icmp_blocks" and self.fw_config:
existing_icmptypes = self.fw_config.get_icmptypes()
for icmptype in config:
if icmptype not in existing_icmptypes:
raise FirewallError(errors.INVALID_ICMPTYPE,
"'%s' not among existing icmp types" % \
icmptype)
elif item == "forward_ports":
for fwd_port in config:
check_port(fwd_port[0])
check_tcpudp(fwd_port[1])
if not fwd_port[2] and not fwd_port[3]:
raise FirewallError(
errors.INVALID_FORWARD,
"'%s' is missing to-port AND to-addr " % fwd_port)
if fwd_port[2]:
check_port(fwd_port[2])
if fwd_port[3]:
if not checkIP(fwd_port[3]) and not checkIP6(fwd_port[3]):
raise FirewallError(
errors.INVALID_ADDR,
"to-addr '%s' is not a valid address" % fwd_port[3])
elif item == "source_ports":
for port in config:
check_port(port[0])
check_tcpudp(port[1])
elif item == "target":
if config not in ZONE_TARGETS:
raise FirewallError(errors.INVALID_TARGET, config)
elif item == "interfaces":
for interface in config:
if not checkInterface(interface):
raise FirewallError(errors.INVALID_INTERFACE, interface)
elif item == "sources":
for source in config:
if not checkIPnMask(source) and not checkIP6nMask(source) and \
not check_mac(source) and not source.startswith("ipset:"):
raise FirewallError(errors.INVALID_ADDR, source)
elif item == "rules_str":
for rule in config:
rich.Rich_Rule(rule_str=rule)
def check_name(self, name):
super(Zone, self).check_name(name)
if name.startswith('/'):
raise FirewallError(errors.INVALID_NAME,
"'%s' can't start with '/'" % name)
elif name.endswith('/'):
raise FirewallError(errors.INVALID_NAME,
"'%s' can't end with '/'" % name)
elif name.count('/') > 1:
raise FirewallError(errors.INVALID_NAME,
"more than one '/' in '%s'" % name)
else:
if "/" in name:
checked_name = name[:name.find('/')]
else:
checked_name = name
if len(checked_name) > max_zone_name_len():
raise FirewallError(errors.INVALID_NAME,
"Zone of '%s' has %d chars, max is %d %s" % (
name, len(checked_name),
max_zone_name_len(),
self.combined))
def combine(self, zone):
self.combined = True
self.filename = None
self.version = ""
self.short = ""
self.description = ""
for interface in zone.interfaces:
if interface not in self.interfaces:
self.interfaces.append(interface)
for source in zone.sources:
if source not in self.sources:
self.sources.append(source)
for service in zone.services:
if service not in self.services:
self.services.append(service)
for port in zone.ports:
if port not in self.ports:
self.ports.append(port)
for proto in zone.protocols:
if proto not in self.protocols:
self.protocols.append(proto)
for icmp in zone.icmp_blocks:
if icmp not in self.icmp_blocks:
self.icmp_blocks.append(icmp)
if zone.masquerade:
self.masquerade = True
for forward in zone.forward_ports:
if forward not in self.forward_ports:
self.forward_ports.append(forward)
for port in zone.source_ports:
if port not in self.source_ports:
self.source_ports.append(port)
for rule in zone.rules:
self.rules.append(rule)
if zone.icmp_block_inversion:
self.icmp_block_inversion = True
# PARSER
class zone_ContentHandler(IO_Object_ContentHandler):
def __init__(self, item):
IO_Object_ContentHandler.__init__(self, item)
self._rule = None
self._rule_error = False
self._limit_ok = None
def startElement(self, name, attrs):
IO_Object_ContentHandler.startElement(self, name, attrs)
if self._rule_error:
return
self.item.parser_check_element_attrs(name, attrs)
if name == "zone":
if "name" in attrs:
log.warning("Ignoring deprecated attribute name='%s'",
attrs["name"])
if "version" in attrs:
self.item.version = attrs["version"]
if "immutable" in attrs:
log.warning("Ignoring deprecated attribute immutable='%s'",
attrs["immutable"])
if "target" in attrs:
target = attrs["target"]
if target not in ZONE_TARGETS:
raise FirewallError(errors.INVALID_TARGET, target)
if target != "" and target != DEFAULT_ZONE_TARGET:
self.item.target = target
elif name == "short":
pass
elif name == "description":
pass
elif name == "service":
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_Service(attrs["name"])
return
if attrs["name"] not in self.item.services:
self.item.services.append(attrs["name"])
else:
log.warning("Service '%s' already set, ignoring.",
attrs["name"])
elif name == "port":
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_Port(attrs["port"],
attrs["protocol"])
return
check_port(attrs["port"])
check_tcpudp(attrs["protocol"])
entry = (portStr(attrs["port"], "-"), attrs["protocol"])
if entry not in self.item.ports:
self.item.ports.append(entry)
else:
log.warning("Port '%s/%s' already set, ignoring.",
attrs["port"], attrs["protocol"])
elif name == "protocol":
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_Protocol(attrs["value"])
else:
check_protocol(attrs["value"])
if attrs["value"] not in self.item.protocols:
self.item.protocols.append(attrs["value"])
else:
log.warning("Protocol '%s' already set, ignoring.",
attrs["value"])
elif name == "icmp-block":
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_IcmpBlock(attrs["name"])
return
if attrs["name"] not in self.item.icmp_blocks:
self.item.icmp_blocks.append(attrs["name"])
else:
log.warning("icmp-block '%s' already set, ignoring.",
attrs["name"])
elif name == "icmp-type":
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_IcmpType(attrs["name"])
return
else:
log.warning("Invalid rule: icmp-block '%s' outside of rule",
attrs["name"])
elif name == "masquerade":
if "enabled" in attrs and \
attrs["enabled"].lower() in [ "no", "false" ] :
log.warning("Ignoring deprecated attribute enabled='%s'",
attrs["enabled"])
return
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_Masquerade()
else:
if self.item.masquerade:
log.warning("Masquerade already set, ignoring.")
else:
self.item.masquerade = True
elif name == "forward-port":
to_port = ""
if "to-port" in attrs:
to_port = attrs["to-port"]
to_addr = ""
if "to-addr" in attrs:
to_addr = attrs["to-addr"]
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_ForwardPort(attrs["port"],
attrs["protocol"],
to_port, to_addr)
return
check_port(attrs["port"])
check_tcpudp(attrs["protocol"])
if to_port:
check_port(to_port)
if to_addr:
if not checkIP(to_addr) and not checkIP6(to_addr):
raise FirewallError(errors.INVALID_ADDR,
"to-addr '%s' is not a valid address" \
% to_addr)
entry = (portStr(attrs["port"], "-"), attrs["protocol"],
portStr(to_port, "-"), str(to_addr))
if entry not in self.item.forward_ports:
self.item.forward_ports.append(entry)
else:
log.warning("Forward port %s/%s%s%s already set, ignoring.",
attrs["port"], attrs["protocol"],
" >%s" % to_port if to_port else "",
" @%s" % to_addr if to_addr else "")
elif name == "source-port":
if self._rule:
if self._rule.element:
log.warning("Invalid rule: More than one element in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.element = rich.Rich_SourcePort(attrs["port"],
attrs["protocol"])
return
check_port(attrs["port"])
check_tcpudp(attrs["protocol"])
entry = (portStr(attrs["port"], "-"), attrs["protocol"])
if entry not in self.item.source_ports:
self.item.source_ports.append(entry)
else:
log.warning("Source port '%s/%s' already set, ignoring.",
attrs["port"], attrs["protocol"])
elif name == "interface":
if self._rule:
log.warning('Invalid rule: interface use in rule.')
self._rule_error = True
return
# zone bound to interface
if "name" not in attrs:
log.warning('Invalid interface: Name missing.')
self._rule_error = True
return
if attrs["name"] not in self.item.interfaces:
self.item.interfaces.append(attrs["name"])
else:
log.warning("Interface '%s' already set, ignoring.",
attrs["name"])
elif name == "source":
if self._rule:
if self._rule.source:
log.warning("Invalid rule: More than one source in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
invert = False
if "invert" in attrs and \
attrs["invert"].lower() in [ "yes", "true" ]:
invert = True
addr = mac = ipset = None
if "address" in attrs:
addr = attrs["address"]
if "mac" in attrs:
mac = attrs["mac"]
if "ipset" in attrs:
ipset = attrs["ipset"]
self._rule.source = rich.Rich_Source(addr, mac, ipset,
invert=invert)
return
# zone bound to source
if "address" not in attrs and "ipset" not in attrs:
log.warning('Invalid source: No address no ipset.')
return
if "address" in attrs and "ipset" in attrs:
log.warning('Invalid source: Address and ipset.')
return
if "family" in attrs:
log.warning("Ignoring deprecated attribute family='%s'",
attrs["family"])
if "invert" in attrs:
log.warning('Invalid source: Invertion not allowed here.')
return
if "address" in attrs:
if not checkIPnMask(attrs["address"]) and \
not checkIP6nMask(attrs["address"]) and \
not check_mac(attrs["address"]):
raise FirewallError(errors.INVALID_ADDR, attrs["address"])
if "ipset" in attrs:
entry = "ipset:%s" % attrs["ipset"]
if entry not in self.item.sources:
self.item.sources.append(entry)
else:
log.warning("Source '%s' already set, ignoring.",
attrs["address"])
if "address" in attrs:
entry = attrs["address"]
if entry not in self.item.sources:
self.item.sources.append(entry)
else:
log.warning("Source '%s' already set, ignoring.",
attrs["address"])
elif name == "destination":
if not self._rule:
log.warning('Invalid rule: Destination outside of rule')
self._rule_error = True
return
if self._rule.destination:
log.warning("Invalid rule: More than one destination in rule '%s', ignoring.",
str(self._rule))
return
invert = False
if "invert" in attrs and \
attrs["invert"].lower() in [ "yes", "true" ]:
invert = True
self._rule.destination = rich.Rich_Destination(attrs["address"],
invert)
elif name in [ "accept", "reject", "drop", "mark" ]:
if not self._rule:
log.warning('Invalid rule: Action outside of rule')
self._rule_error = True
return
if self._rule.action:
log.warning('Invalid rule: More than one action')
self._rule_error = True
return
if name == "accept":
self._rule.action = rich.Rich_Accept()
elif name == "reject":
_type = None
if "type" in attrs:
_type = attrs["type"]
self._rule.action = rich.Rich_Reject(_type)
elif name == "drop":
self._rule.action = rich.Rich_Drop()
elif name == "mark":
_set = attrs["set"]
self._rule.action = rich.Rich_Mark(_set)
self._limit_ok = self._rule.action
elif name == "log":
if not self._rule:
log.warning('Invalid rule: Log outside of rule')
return
if self._rule.log:
log.warning('Invalid rule: More than one log')
return
level = None
if "level" in attrs:
level = attrs["level"]
if level not in [ "emerg", "alert", "crit", "error",
"warning", "notice", "info", "debug" ]:
log.warning('Invalid rule: Invalid log level')
self._rule_error = True
return
prefix = attrs["prefix"] if "prefix" in attrs else None
self._rule.log = rich.Rich_Log(prefix, level)
self._limit_ok = self._rule.log
elif name == "audit":
if not self._rule:
log.warning('Invalid rule: Audit outside of rule')
return
if self._rule.audit:
log.warning("Invalid rule: More than one audit in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
self._rule.audit = rich.Rich_Audit()
self._limit_ok = self._rule.audit
elif name == "rule":
family = None
if "family" in attrs:
family = attrs["family"]
if family not in [ "ipv4", "ipv6" ]:
log.warning('Invalid rule: Rule family "%s" invalid',
attrs["family"])
self._rule_error = True
return
self._rule = rich.Rich_Rule(family)
elif name == "limit":
if not self._limit_ok:
log.warning('Invalid rule: Limit outside of action, log and audit')
self._rule_error = True
return
if self._limit_ok.limit:
log.warning("Invalid rule: More than one limit in rule '%s', ignoring.",
str(self._rule))
self._rule_error = True
return
value = attrs["value"]
self._limit_ok.limit = rich.Rich_Limit(value)
elif name == "icmp-block-inversion":
if self.item.icmp_block_inversion:
log.warning("Icmp-Block-Inversion already set, ignoring.")
else:
self.item.icmp_block_inversion = True
else:
log.warning("Unknown XML element '%s'", name)
return
def endElement(self, name):
IO_Object_ContentHandler.endElement(self, name)
if name == "rule":
if not self._rule_error:
try:
self._rule.check()
except Exception as e:
log.warning("%s: %s", e, str(self._rule))
else:
if str(self._rule) not in \
[ str(x) for x in self.item.rules ]:
self.item.rules.append(self._rule)
else:
log.warning("Rule '%s' already set, ignoring.",
str(self._rule))
self._rule = None
self._rule_error = False
elif name in [ "accept", "reject", "drop", "mark", "log", "audit" ]:
self._limit_ok = None
def zone_reader(filename, path, no_check_name=False):
zone = Zone()
if not filename.endswith(".xml"):
raise FirewallError(errors.INVALID_NAME,
"'%s' is missing .xml suffix" % filename)
zone.name = filename[:-4]
if not no_check_name:
zone.check_name(zone.name)
zone.filename = filename
zone.path = path
zone.builtin = False if path.startswith(config.ETC_FIREWALLD) else True
zone.default = zone.builtin
handler = zone_ContentHandler(zone)
parser = sax.make_parser()
parser.setContentHandler(handler)
name = "%s/%s" % (path, filename)
with open(name, "r") as f:
try:
parser.parse(f)
except sax.SAXParseException as msg:
raise FirewallError(errors.INVALID_ZONE,
"not a valid zone file: %s" % \
msg.getException())
del handler
del parser
if PY2:
zone.encode_strings()
return zone
def zone_writer(zone, path=None):
_path = path if path else zone.path
if zone.filename:
name = "%s/%s" % (_path, zone.filename)
else:
name = "%s/%s.xml" % (_path, zone.name)
if os.path.exists(name):
try:
shutil.copy2(name, "%s.old" % name)
except Exception as msg:
log.error("Backup of file '%s' failed: %s", name, msg)
dirpath = os.path.dirname(name)
if dirpath.startswith(config.ETC_FIREWALLD) and not os.path.exists(dirpath):
if not os.path.exists(config.ETC_FIREWALLD):
os.mkdir(config.ETC_FIREWALLD, 0o750)
os.mkdir(dirpath, 0o750)
f = io.open(name, mode='wt', encoding='UTF-8')
handler = IO_Object_XMLGenerator(f)
handler.startDocument()
# start zone element
attrs = {}
if zone.version and zone.version != "":
attrs["version"] = zone.version
if zone.target != DEFAULT_ZONE_TARGET:
attrs["target"] = zone.target
handler.startElement("zone", attrs)
handler.ignorableWhitespace("\n")
# short
if zone.short and zone.short != "":
handler.ignorableWhitespace(" ")
handler.startElement("short", { })
handler.characters(zone.short)
handler.endElement("short")
handler.ignorableWhitespace("\n")
# description
if zone.description and zone.description != "":
handler.ignorableWhitespace(" ")
handler.startElement("description", { })
handler.characters(zone.description)
handler.endElement("description")
handler.ignorableWhitespace("\n")
# interfaces
for interface in uniqify(zone.interfaces):
handler.ignorableWhitespace(" ")
handler.simpleElement("interface", { "name": interface })
handler.ignorableWhitespace("\n")
# source
for source in uniqify(zone.sources):
handler.ignorableWhitespace(" ")
if "ipset:" in source:
handler.simpleElement("source", { "ipset": source[6:] })
else:
handler.simpleElement("source", { "address": source })
handler.ignorableWhitespace("\n")
# services
for service in uniqify(zone.services):
handler.ignorableWhitespace(" ")
handler.simpleElement("service", { "name": service })
handler.ignorableWhitespace("\n")
# ports
for port in uniqify(zone.ports):
handler.ignorableWhitespace(" ")
handler.simpleElement("port", { "port": port[0], "protocol": port[1] })
handler.ignorableWhitespace("\n")
# protocols
for protocol in uniqify(zone.protocols):
handler.ignorableWhitespace(" ")
handler.simpleElement("protocol", { "value": protocol })
handler.ignorableWhitespace("\n")
# icmp-block-inversion
if zone.icmp_block_inversion:
handler.ignorableWhitespace(" ")
handler.simpleElement("icmp-block-inversion", { })
handler.ignorableWhitespace("\n")
# icmp-blocks
for icmp in uniqify(zone.icmp_blocks):
handler.ignorableWhitespace(" ")
handler.simpleElement("icmp-block", { "name": icmp })
handler.ignorableWhitespace("\n")
# masquerade
if zone.masquerade:
handler.ignorableWhitespace(" ")
handler.simpleElement("masquerade", { })
handler.ignorableWhitespace("\n")
# forward-ports
for forward in uniqify(zone.forward_ports):
handler.ignorableWhitespace(" ")
attrs = { "port": forward[0], "protocol": forward[1] }
if forward[2] and forward[2] != "" :
attrs["to-port"] = forward[2]
if forward[3] and forward[3] != "" :
attrs["to-addr"] = forward[3]
handler.simpleElement("forward-port", attrs)
handler.ignorableWhitespace("\n")
# source-ports
for port in uniqify(zone.source_ports):
handler.ignorableWhitespace(" ")
handler.simpleElement("source-port", { "port": port[0],
"protocol": port[1] })
handler.ignorableWhitespace("\n")
# rules
for rule in zone.rules:
attrs = { }
if rule.family:
attrs["family"] = rule.family
handler.ignorableWhitespace(" ")
handler.startElement("rule", attrs)
handler.ignorableWhitespace("\n")
# source
if rule.source:
attrs = { }
if rule.source.addr:
attrs["address"] = rule.source.addr
if rule.source.mac:
attrs["mac"] = rule.source.mac
if rule.source.ipset:
attrs["ipset"] = rule.source.ipset
if rule.source.invert:
attrs["invert"] = "True"
handler.ignorableWhitespace(" ")
handler.simpleElement("source", attrs)
handler.ignorableWhitespace("\n")
# destination
if rule.destination:
attrs = { "address": rule.destination.addr }
if rule.destination.invert:
attrs["invert"] = "True"
handler.ignorableWhitespace(" ")
handler.simpleElement("destination", attrs)
handler.ignorableWhitespace("\n")
# element
if rule.element:
element = ""
attrs = { }
if type(rule.element) == rich.Rich_Service:
element = "service"
attrs["name"] = rule.element.name
elif type(rule.element) == rich.Rich_Port:
element = "port"
attrs["port"] = rule.element.port
attrs["protocol"] = rule.element.protocol
elif type(rule.element) == rich.Rich_Protocol:
element = "protocol"
attrs["value"] = rule.element.value
elif type(rule.element) == rich.Rich_Masquerade:
element = "masquerade"
elif type(rule.element) == rich.Rich_IcmpBlock:
element = "icmp-block"
attrs["name"] = rule.element.name
elif type(rule.element) == rich.Rich_IcmpType:
element = "icmp-type"
attrs["name"] = rule.element.name
elif type(rule.element) == rich.Rich_ForwardPort:
element = "forward-port"
attrs["port"] = rule.element.port
attrs["protocol"] = rule.element.protocol
if rule.element.to_port != "":
attrs["to-port"] = rule.element.to_port
if rule.element.to_address != "":
attrs["to-addr"] = rule.element.to_address
elif type(rule.element) == rich.Rich_SourcePort:
element = "source-port"
attrs["port"] = rule.element.port
attrs["protocol"] = rule.element.protocol
else:
raise FirewallError(
errors.INVALID_OBJECT,
"Unknown element '%s' in zone_writer" % type(rule.element))
handler.ignorableWhitespace(" ")
handler.simpleElement(element, attrs)
handler.ignorableWhitespace("\n")
# rule.element
# log
if rule.log:
attrs = { }
if rule.log.prefix:
attrs["prefix"] = rule.log.prefix
if rule.log.level:
attrs["level"] = rule.log.level
if rule.log.limit:
handler.ignorableWhitespace(" ")
handler.startElement("log", attrs)
handler.ignorableWhitespace("\n ")
handler.simpleElement("limit",
{ "value": rule.log.limit.value })
handler.ignorableWhitespace("\n ")
handler.endElement("log")
else:
handler.ignorableWhitespace(" ")
handler.simpleElement("log", attrs)
handler.ignorableWhitespace("\n")
# audit
if rule.audit:
attrs = {}
if rule.audit.limit:
handler.ignorableWhitespace(" ")
handler.startElement("audit", { })
handler.ignorableWhitespace("\n ")
handler.simpleElement("limit",
{ "value": rule.audit.limit.value })
handler.ignorableWhitespace("\n ")
handler.endElement("audit")
else:
handler.ignorableWhitespace(" ")
handler.simpleElement("audit", attrs)
handler.ignorableWhitespace("\n")
# action
if rule.action:
action = ""
attrs = { }
if type(rule.action) == rich.Rich_Accept:
action = "accept"
elif type(rule.action) == rich.Rich_Reject:
action = "reject"
if rule.action.type:
attrs["type"] = rule.action.type
elif type(rule.action) == rich.Rich_Drop:
action = "drop"
elif type(rule.action) == rich.Rich_Mark:
action = "mark"
attrs["set"] = rule.action.set
else:
log.warning("Unknown action '%s'", type(rule.action))
if rule.action.limit:
handler.ignorableWhitespace(" ")
handler.startElement(action, attrs)
handler.ignorableWhitespace("\n ")
handler.simpleElement("limit",
{ "value": rule.action.limit.value })
handler.ignorableWhitespace("\n ")
handler.endElement(action)
else:
handler.ignorableWhitespace(" ")
handler.simpleElement(action, attrs)
handler.ignorableWhitespace("\n")
handler.ignorableWhitespace(" ")
handler.endElement("rule")
handler.ignorableWhitespace("\n")
# end zone element
handler.endElement("zone")
handler.ignorableWhitespace("\n")
handler.endDocument()
f.close()
del handler
| gpl-2.0 |
soarpenguin/ansible | lib/ansible/modules/network/aos/aos_rack_type.py | 36 | 7617 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_rack_type
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Rack Type
description:
- Apstra AOS Rack Type module let you manage your Rack Type easily.
You can create create and delete Rack Type by Name, ID or by using a JSON File.
This module is idempotent and support the I(check) mode.
It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Rack Type to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the Rack Type to manage (can't be used to create a new Rack Type),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the Rack Type to create. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the Rack Type (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Delete a Rack Type by name"
aos_rack_type:
session: "{{ aos_session }}"
name: "my-rack-type"
state: absent
- name: "Delete a Rack Type by id"
aos_rack_type:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save a Rack Type to a file
- name: "Access Rack Type 1/3"
aos_rack_type:
session: "{{ aos_session }}"
name: "my-rack-type"
state: present
register: rack_type
- name: "Save Rack Type into a JSON file 2/3"
copy:
content: "{{ rack_type.value | to_nice_json }}"
dest: rack_type_saved.json
- name: "Save Rack Type into a YAML file 3/3"
copy:
content: "{{ rack_type.value | to_nice_yaml }}"
dest: rack_type_saved.yaml
- name: "Load Rack Type from a JSON file"
aos_rack_type:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/rack_type_saved.json') }}"
state: present
- name: "Load Rack Type from a YAML file"
aos_rack_type:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/rack_type_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Rack Type
returned: always
type: str
sample: AOS-1x25-1
id:
description: AOS unique ID assigned to the Rack Type
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def rack_type_absent(module, aos, my_rack_type):
margs = module.params
# If the module do not exist, return directly
if my_rack_type.exists is False:
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
value={})
# If not in check mode, delete Rack Type
if not module.check_mode:
try:
my_rack_type.delete()
except:
module.fail_json(msg="An error occurred, while trying to delete the Rack Type")
module.exit_json( changed=True,
name=my_rack_type.name,
id=my_rack_type.id,
value={} )
def rack_type_present(module, aos, my_rack_type):
margs = module.params
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.RackTypes, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if rack_type doesn't exist already, create a new one
if my_rack_type.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
module.exit_json( changed=False,
name=my_rack_type.name,
id=my_rack_type.id,
value=my_rack_type.value )
#########################################################
# Main Function
#########################################################
def rack_type(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'] )
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
my_rack_type = find_collection_item(aos.RackTypes,
item_name=item_name,
item_id=item_id)
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
rack_type_absent(module, aos, my_rack_type)
elif margs['state'] == 'present':
rack_type_present(module, aos, my_rack_type)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False ),
id=dict(required=False ),
content=dict(required=False, type="json"),
state=dict( required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive = [('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
rack_type(module)
if __name__ == "__main__":
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.